[dpdk-dev] mbuf: fix for incomplete nb_segs types change
Checks
Commit Message
Update types of variables to correspond to nb_segs type change from
uint8_t to uint16_t.
Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
Cc: olivier.matz@6wind.com
Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
---
lib/librte_mbuf/rte_mbuf.c | 4 ++--
lib/librte_mbuf/rte_mbuf.h | 11 +++++++----
lib/librte_pdump/rte_pdump.c | 2 +-
3 files changed, 10 insertions(+), 7 deletions(-)
Comments
Hi Ilya,
On Fri, Nov 10, 2017 at 04:56:43PM +0300, Ilya V. Matveychikov wrote:
> Update types of variables to correspond to nb_segs type change from
> uint8_t to uint16_t.
>
> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
> Cc: olivier.matz@6wind.com
>
> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
Thanks for this fix.
I did quick search for other references:
$ git grep seg | grep -i int8
app/test-pmd/config.c: tx_pkt_nb_segs = (uint8_t) nb_segs;
app/test-pmd/csumonly.c: uint16_t seglen[], uint8_t nb_seg)
app/test-pmd/testpmd.c:uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
app/test-pmd/testpmd.h:extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */
doc/guides/sample_app_ug/ipv4_multicast.rst: hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
drivers/bus/dpaa/rte_dpaa_bus.h: return (uint8_t *)(memseg[i].addr) +
drivers/net/ark/ark_ethdev_rx.c: uint8_t segments;
drivers/net/avp/rte_avp_common.h: uint8_t nb_segs; /**< Number of segments */
drivers/net/bnx2x/bnx2x.c:bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment,
drivers/net/bnx2x/bnx2x.c: uint8_t segment;
drivers/net/bnx2x/bnx2x.h:bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment,
drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
drivers/net/bnxt/hsi_struct_def_dpdk.h: uint8_t tpa_segs;
drivers/net/dpaa2/dpaa2_rxtx.c: first_seg->buf_addr = (uint8_t *)sg_addr;
drivers/net/dpaa2/dpaa2_rxtx.c: next_seg->buf_addr = (uint8_t *)sg_addr;
drivers/net/i40e/i40e_rxtx.c: * m->nb_segs is uint8_t, so nb_segs is always less than
drivers/net/ixgbe/ixgbe_rxtx.c: int8_t i, nb_segs = m->nb_segs;
drivers/net/mlx5/mlx5_rxtx_vec_neon.h:txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: const uint8x16_t dseg_shuf_m = {
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)txq->wqes;
drivers/net/qede/qede_rxtx.c: uint8_t num_segs, uint16_t pkt_len)
drivers/net/qede/qede_rxtx.c: uint8_t num_segs = 1;
drivers/net/qede/qede_rxtx.c: uint8_t nb_segs = 0;
drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
drivers/net/szedata2/rte_eth_szedata2.c: uint8_t mbuf_segs;
drivers/net/vmxnet3/base/vmxnet3_defs.h: uint8 segCnt; /* Number of aggregated packets */
examples/ipv4_multicast/main.c: hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
lib/librte_mbuf/rte_mbuf.h: uint8_t nseg;
lib/librte_mbuf/rte_mbuf.h: head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
lib/librte_pdump/rte_pdump.c: uint8_t nseg;
test/test/packet_burst_generator.c: uint8_t pkt_len, uint8_t nb_pkt_segs)
test/test/packet_burst_generator.c: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
test/test/packet_burst_generator.h: uint8_t pkt_len, uint8_t nb_pkt_segs);
test/test/packet_burst_generator.h: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
test/test/test_cryptodev.h: int nb_segs, uint8_t pattern) {
I think the ones in ixgbe, szedata and examples/ipv4_multicast/main.c could be
fixed without risk.
Some don't look critical for now (ex: testpmd), but could be integrated in this
patch too.
For others, I think we can let the maintainers have a look.
Thanks,
Olivier
> On Nov 13, 2017, at 2:10 PM, Olivier MATZ <olivier.matz@6wind.com> wrote:
>
> Hi Ilya,
>
> On Fri, Nov 10, 2017 at 04:56:43PM +0300, Ilya V. Matveychikov wrote:
>> Update types of variables to correspond to nb_segs type change from
>> uint8_t to uint16_t.
>>
>> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
>> Cc: olivier.matz@6wind.com
>>
>> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
>
> Thanks for this fix.
>
> I did quick search for other references:
>
> $ git grep seg | grep -i int8
> app/test-pmd/config.c: tx_pkt_nb_segs = (uint8_t) nb_segs;
> app/test-pmd/csumonly.c: uint16_t seglen[], uint8_t nb_seg)
> app/test-pmd/testpmd.c:uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
> app/test-pmd/testpmd.h:extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */
> doc/guides/sample_app_ug/ipv4_multicast.rst: hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
> drivers/bus/dpaa/rte_dpaa_bus.h: return (uint8_t *)(memseg[i].addr) +
> drivers/net/ark/ark_ethdev_rx.c: uint8_t segments;
> drivers/net/avp/rte_avp_common.h: uint8_t nb_segs; /**< Number of segments */
> drivers/net/bnx2x/bnx2x.c:bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment,
> drivers/net/bnx2x/bnx2x.c: uint8_t segment;
> drivers/net/bnx2x/bnx2x.h:bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment,
> drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
> drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
> drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
> drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
> drivers/net/bnxt/hsi_struct_def_dpdk.h: uint8_t tpa_segs;
> drivers/net/dpaa2/dpaa2_rxtx.c: first_seg->buf_addr = (uint8_t *)sg_addr;
> drivers/net/dpaa2/dpaa2_rxtx.c: next_seg->buf_addr = (uint8_t *)sg_addr;
> drivers/net/i40e/i40e_rxtx.c: * m->nb_segs is uint8_t, so nb_segs is always less than
> drivers/net/ixgbe/ixgbe_rxtx.c: int8_t i, nb_segs = m->nb_segs;
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h:txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: const uint8x16_t dseg_shuf_m = {
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
> drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)txq->wqes;
> drivers/net/qede/qede_rxtx.c: uint8_t num_segs, uint16_t pkt_len)
> drivers/net/qede/qede_rxtx.c: uint8_t num_segs = 1;
> drivers/net/qede/qede_rxtx.c: uint8_t nb_segs = 0;
> drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> drivers/net/szedata2/rte_eth_szedata2.c: uint8_t mbuf_segs;
> drivers/net/vmxnet3/base/vmxnet3_defs.h: uint8 segCnt; /* Number of aggregated packets */
> examples/ipv4_multicast/main.c: hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
> lib/librte_mbuf/rte_mbuf.h: uint8_t nseg;
> lib/librte_mbuf/rte_mbuf.h: head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
> lib/librte_pdump/rte_pdump.c: uint8_t nseg;
> test/test/packet_burst_generator.c: uint8_t pkt_len, uint8_t nb_pkt_segs)
> test/test/packet_burst_generator.c: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
> test/test/packet_burst_generator.h: uint8_t pkt_len, uint8_t nb_pkt_segs);
> test/test/packet_burst_generator.h: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
> test/test/test_cryptodev.h: int nb_segs, uint8_t pattern) {
>
> I think the ones in ixgbe, szedata and examples/ipv4_multicast/main.c could be
> fixed without risk.
> Some don't look critical for now (ex: testpmd), but could be integrated in this
> patch too.
>
Something already fixed by:
6c293ffd63b3f61a95d8c492d007160874e7a057
4c20622a95087b42ddb403406d3877a9dc3d731b
But I didn’t try to fix it everywhere in drivers as it may depends of the particular hardware - does it support more that uint8_t queues or not. So, I agree with you that maintainers should have a look on that change.
Also, not sure that testpmd need to have more that 255 segments of 16k possible. And my opinion that it’s better to apply this patch separately without integration with drivers fixes and stuff like that.
Ilya.
> But I didn’t try to fix it everywhere in drivers as it may depends of the particular hardware - does it support more that uint8_t queues or not. So, I agree with you that maintainers should have a look on that change.
does it support more that uint8_t *segments*, not queues :-)
On Mon, Nov 13, 2017 at 02:18:55PM +0400, Ilya Matveychikov wrote:
>
> > On Nov 13, 2017, at 2:10 PM, Olivier MATZ <olivier.matz@6wind.com> wrote:
> >
> > Hi Ilya,
> >
> > On Fri, Nov 10, 2017 at 04:56:43PM +0300, Ilya V. Matveychikov wrote:
> >> Update types of variables to correspond to nb_segs type change from
> >> uint8_t to uint16_t.
> >>
> >> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
> >> Cc: olivier.matz@6wind.com
> >>
> >> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
> >
> > Thanks for this fix.
> >
> > I did quick search for other references:
> >
> > $ git grep seg | grep -i int8
> > app/test-pmd/config.c: tx_pkt_nb_segs = (uint8_t) nb_segs;
> > app/test-pmd/csumonly.c: uint16_t seglen[], uint8_t nb_seg)
> > app/test-pmd/testpmd.c:uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
> > app/test-pmd/testpmd.h:extern uint8_t tx_pkt_nb_segs; /**< Number of segments in TX packets */
> > doc/guides/sample_app_ug/ipv4_multicast.rst: hdr->pkt.nb_segs = (uint8_t)(pkt->pkt.nb_segs + 1);
> > drivers/bus/dpaa/rte_dpaa_bus.h: return (uint8_t *)(memseg[i].addr) +
> > drivers/net/ark/ark_ethdev_rx.c: uint8_t segments;
> > drivers/net/avp/rte_avp_common.h: uint8_t nb_segs; /**< Number of segments */
> > drivers/net/bnx2x/bnx2x.c:bnx2x_igu_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id, uint8_t segment,
> > drivers/net/bnx2x/bnx2x.c: uint8_t segment;
> > drivers/net/bnx2x/bnx2x.h:bnx2x_igu_ack_sb_gen(struct bnx2x_softc *sc, uint8_t segment,
> > drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
> > drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id;
> > drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
> > drivers/net/bnx2x/ecore_hsi.h: uint8_t igu_seg_id /* segment id of the IGU */;
> > drivers/net/bnxt/hsi_struct_def_dpdk.h: uint8_t tpa_segs;
> > drivers/net/dpaa2/dpaa2_rxtx.c: first_seg->buf_addr = (uint8_t *)sg_addr;
> > drivers/net/dpaa2/dpaa2_rxtx.c: next_seg->buf_addr = (uint8_t *)sg_addr;
> > drivers/net/i40e/i40e_rxtx.c: * m->nb_segs is uint8_t, so nb_segs is always less than
> > drivers/net/ixgbe/ixgbe_rxtx.c: int8_t i, nb_segs = m->nb_segs;
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h:txq_wr_dseg_v(struct mlx5_txq_data *txq, uint8_t *dseg,
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: const uint8x16_t dseg_shuf_m = {
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: uint8_t *dseg;
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)(wqe + 1);
> > drivers/net/mlx5/mlx5_rxtx_vec_neon.h: dseg = (uint8_t *)txq->wqes;
> > drivers/net/qede/qede_rxtx.c: uint8_t num_segs, uint16_t pkt_len)
> > drivers/net/qede/qede_rxtx.c: uint8_t num_segs = 1;
> > drivers/net/qede/qede_rxtx.c: uint8_t nb_segs = 0;
> > drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> > drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> > drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> > drivers/net/sfc/base/ef10_nvram.c: if ((rc = tlv_init_cursor_from_size(&cursor, (uint8_t *)seg_data,
> > drivers/net/szedata2/rte_eth_szedata2.c: uint8_t mbuf_segs;
> > drivers/net/vmxnet3/base/vmxnet3_defs.h: uint8 segCnt; /* Number of aggregated packets */
> > examples/ipv4_multicast/main.c: hdr->nb_segs = (uint8_t)(pkt->nb_segs + 1);
> > lib/librte_mbuf/rte_mbuf.h: uint8_t nseg;
> > lib/librte_mbuf/rte_mbuf.h: head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
> > lib/librte_pdump/rte_pdump.c: uint8_t nseg;
> > test/test/packet_burst_generator.c: uint8_t pkt_len, uint8_t nb_pkt_segs)
> > test/test/packet_burst_generator.c: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs)
> > test/test/packet_burst_generator.h: uint8_t pkt_len, uint8_t nb_pkt_segs);
> > test/test/packet_burst_generator.h: int nb_pkt_per_burst, uint8_t pkt_len, uint8_t nb_pkt_segs);
> > test/test/test_cryptodev.h: int nb_segs, uint8_t pattern) {
> >
> > I think the ones in ixgbe, szedata and examples/ipv4_multicast/main.c could be
> > fixed without risk.
> > Some don't look critical for now (ex: testpmd), but could be integrated in this
> > patch too.
> >
>
> Something already fixed by:
> 6c293ffd63b3f61a95d8c492d007160874e7a057
> 4c20622a95087b42ddb403406d3877a9dc3d731b
You are right, sorry, I didn't update my workspace since rc3.
> But I didn’t try to fix it everywhere in drivers as it may depends of the particular hardware - does it support more that uint8_t queues or not. So, I agree with you that maintainers should have a look on that change.
>
> Also, not sure that testpmd need to have more that 255 segments of 16k possible. And my opinion that it’s better to apply this patch separately without integration with drivers fixes and stuff like that.
Agree.
Acked-by: Olivier Matz <olivier.matz@6wind.com>
> > >> Update types of variables to correspond to nb_segs type change from
> > >> uint8_t to uint16_t.
> > >>
> > >> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
> > >> Cc: olivier.matz@6wind.com
> > >>
> > >> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
> Acked-by: Olivier Matz <olivier.matz@6wind.com>
Is there a real benefit to get it in 17.11?
Is there a risk?
On Tue, Nov 14, 2017 at 06:09:35AM +0100, Thomas Monjalon wrote:
> > > >> Update types of variables to correspond to nb_segs type change from
> > > >> uint8_t to uint16_t.
> > > >>
> > > >> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
> > > >> Cc: olivier.matz@6wind.com
> > > >>
> > > >> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
> > Acked-by: Olivier Matz <olivier.matz@6wind.com>
>
> Is there a real benefit to get it in 17.11?
Without this patch:
- cloning a mbuf with more than 255 segments will create an mbuf with an
invalid number of segments.
- chaining mbufs with rte_pktmbuf_chain() will return an error if the
resulting mbuf has more than 255 segments.
> Is there a risk?
It is not so critical, since manipulating mbufs with a large number of
segments is a very specific use case.
But the risk seems low, I think we can put it in 17.11.
Olivier
14/11/2017 09:24, Olivier MATZ:
> On Tue, Nov 14, 2017 at 06:09:35AM +0100, Thomas Monjalon wrote:
> > > > >> Update types of variables to correspond to nb_segs type change from
> > > > >> uint8_t to uint16_t.
> > > > >>
> > > > >> Fixes: 97cb466d65c9 ("mbuf: use 2 bytes for port and nb segments")
> > > > >> Cc: olivier.matz@6wind.com
> > > > >>
> > > > >> Signed-off-by: Ilya V. Matveychikov <matvejchikov@gmail.com>
> > > Acked-by: Olivier Matz <olivier.matz@6wind.com>
> >
> > Is there a real benefit to get it in 17.11?
>
> Without this patch:
>
> - cloning a mbuf with more than 255 segments will create an mbuf with an
> invalid number of segments.
>
> - chaining mbufs with rte_pktmbuf_chain() will return an error if the
> resulting mbuf has more than 255 segments.
>
> > Is there a risk?
>
> It is not so critical, since manipulating mbufs with a large number of
> segments is a very specific use case.
>
> But the risk seems low, I think we can put it in 17.11.
OK, applied, thanks
@@ -203,7 +203,7 @@ void
rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
{
const struct rte_mbuf *m_seg;
- unsigned nb_segs;
+ unsigned int nb_segs;
if (m == NULL)
rte_panic("mbuf is NULL\n");
@@ -239,7 +239,7 @@ void
rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
{
unsigned int len;
- unsigned nb_segs;
+ unsigned int nb_segs;
__rte_mbuf_sanity_check(m, 1);
@@ -584,6 +584,9 @@ struct rte_mbuf {
} __rte_cache_aligned;
+/**< Maximum number of @nb_segs allowed. */
+#define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
+
/**
* Prefetch the first part of the mbuf
*
@@ -1447,7 +1450,7 @@ static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
{
struct rte_mbuf *mc, *mi, **prev;
uint32_t pktlen;
- uint8_t nseg;
+ uint16_t nseg;
if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
return NULL;
@@ -1807,14 +1810,14 @@ static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
*
* @return
* - 0, on success.
- * - -EOVERFLOW, if the chain is full (256 entries)
+ * - -EOVERFLOW, if the chain segment limit exceeded
*/
static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
{
struct rte_mbuf *cur_tail;
/* Check for number-of-segments-overflow */
- if (head->nb_segs + tail->nb_segs >= 1 << (sizeof(head->nb_segs) * 8))
+ if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
return -EOVERFLOW;
/* Chain 'tail' onto the old tail */
@@ -1822,7 +1825,7 @@ static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail
cur_tail->next = tail;
/* accumulate number of segments and total length. */
- head->nb_segs = (uint8_t)(head->nb_segs + tail->nb_segs);
+ head->nb_segs += tail->nb_segs;
head->pkt_len += tail->pkt_len;
/* pkt_len is only set in the head */
@@ -139,7 +139,7 @@ pdump_pktmbuf_copy(struct rte_mbuf *m, struct rte_mempool *mp)
{
struct rte_mbuf *m_dup, *seg, **prev;
uint32_t pktlen;
- uint8_t nseg;
+ uint16_t nseg;
m_dup = rte_pktmbuf_alloc(mp);
if (unlikely(m_dup == NULL))