[PATCH v2] net/ice: add diagnostic support in TX path

Mingjin Ye mingjinx.ye at intel.com
Fri Jan 5 11:11:25 CET 2024


The only way to enable diagnostics for TX paths is to modify the
application source code. Making it difficult to diagnose faults.

In this patch, the devarg option "mbuf_check" is introduced and the
parameters are configured to enable the corresponding diagnostics.

supported cases: mbuf, size, segment, offload.
 1. mbuf: check for corrupted mbuf.
 2. size: check min/max packet length according to hw spec.
 3. segment: check number of mbuf segments not exceed hw limitation.
 4. offload: check any unsupported offload flag.

parameter format: mbuf_check=[mbuf,<case1>,<case2>]
eg: dpdk-testpmd -a 0000:81:01.0,mbuf_check=[mbuf,size] -- -i

Signed-off-by: Mingjin Ye <mingjinx.ye at intel.com>
---
v2: rebase.
---
 doc/guides/nics/ice.rst      |  13 +++
 drivers/net/ice/ice_ethdev.c | 104 ++++++++++++++++++++++-
 drivers/net/ice/ice_ethdev.h |  24 ++++++
 drivers/net/ice/ice_rxtx.c   | 158 ++++++++++++++++++++++++++++++++---
 drivers/net/ice/ice_rxtx.h   |  20 +++++
 5 files changed, 308 insertions(+), 11 deletions(-)

diff --git a/doc/guides/nics/ice.rst b/doc/guides/nics/ice.rst
index bafb3ba022..d1aee811b3 100644
--- a/doc/guides/nics/ice.rst
+++ b/doc/guides/nics/ice.rst
@@ -257,6 +257,19 @@ Runtime Configuration
   As a trade-off, this configuration may cause the packet processing performance
   degradation due to the PCI bandwidth limitation.
 
+- ``Tx diagnostics`` (default ``not enabled``)
+
+  Set the ``devargs`` parameter ``mbuf_check`` to enable TX diagnostics. For example,
+  ``-a 18:01.0,mbuf_check=<case>`` or ``-a 18:01.0,mbuf_check=[<case1>,<case2>...]``.
+  Also, ``xstats_get`` can be used to get the error counts, which are collected in
+  ``tx_mbuf_error_packets`` xstats. For example, ``testpmd> show port xstats all``.
+  Supported cases:
+
+  *   mbuf: Check for corrupted mbuf.
+  *   size: Check min/max packet length according to hw spec.
+  *   segment: Check number of mbuf segments not exceed hw limitation.
+  *   offload: Check any unsupported offload flag.
+
 Driver compilation and testing
 ------------------------------
 
diff --git a/drivers/net/ice/ice_ethdev.c b/drivers/net/ice/ice_ethdev.c
index 72e13f95f8..254993b813 100644
--- a/drivers/net/ice/ice_ethdev.c
+++ b/drivers/net/ice/ice_ethdev.c
@@ -12,6 +12,7 @@
 #include <unistd.h>
 
 #include <rte_tailq.h>
+#include <rte_os_shim.h>
 
 #include "eal_firmware.h"
 
@@ -34,6 +35,7 @@
 #define ICE_HW_DEBUG_MASK_ARG     "hw_debug_mask"
 #define ICE_ONE_PPS_OUT_ARG       "pps_out"
 #define ICE_RX_LOW_LATENCY_ARG    "rx_low_latency"
+#define ICE_MBUF_CHECK_ARG       "mbuf_check"
 
 #define ICE_CYCLECOUNTER_MASK  0xffffffffffffffffULL
 
@@ -49,6 +51,7 @@ static const char * const ice_valid_args[] = {
 	ICE_ONE_PPS_OUT_ARG,
 	ICE_RX_LOW_LATENCY_ARG,
 	ICE_DEFAULT_MAC_DISABLE,
+	ICE_MBUF_CHECK_ARG,
 	NULL
 };
 
@@ -319,6 +322,14 @@ static const struct ice_xstats_name_off ice_stats_strings[] = {
 #define ICE_NB_ETH_XSTATS (sizeof(ice_stats_strings) / \
 		sizeof(ice_stats_strings[0]))
 
+static const struct ice_xstats_name_off ice_mbuf_strings[] = {
+	{"tx_mbuf_error_packets", offsetof(struct ice_mbuf_stats,
+		tx_pkt_errors)},
+};
+
+#define ICE_NB_MBUF_XSTATS (sizeof(ice_mbuf_strings) / \
+		sizeof(ice_mbuf_strings[0]))
+
 static const struct ice_xstats_name_off ice_hw_port_strings[] = {
 	{"tx_link_down_dropped", offsetof(struct ice_hw_port_stats,
 		tx_dropped_link_down)},
@@ -2061,6 +2072,50 @@ handle_pps_out_arg(__rte_unused const char *key, const char *value,
 	return 0;
 }
 
+static int
+ice_parse_mbuf_check(__rte_unused const char *key, const char *value, void *args)
+{
+	char *cur;
+	char *tmp;
+	int str_len;
+	int valid_len;
+
+	int ret = 0;
+	uint64_t *mc_flags = args;
+	char *str2 = strdup(value);
+	if (str2 == NULL)
+		return -1;
+
+	str_len = strlen(str2);
+	if (str2[0] == '[' && str2[str_len - 1] == ']') {
+		if (str_len < 3) {
+			ret = -1;
+			goto mdd_end;
+		}
+		valid_len = str_len - 2;
+		memmove(str2, str2 + 1, valid_len);
+		memset(str2 + valid_len, '\0', 2);
+	}
+	cur = strtok_r(str2, ",", &tmp);
+	while (cur != NULL) {
+		if (!strcmp(cur, "mbuf"))
+			*mc_flags |= ICE_MBUF_CHECK_F_TX_MBUF;
+		else if (!strcmp(cur, "size"))
+			*mc_flags |= ICE_MBUF_CHECK_F_TX_SIZE;
+		else if (!strcmp(cur, "segment"))
+			*mc_flags |= ICE_MBUF_CHECK_F_TX_SEGMENT;
+		else if (!strcmp(cur, "offload"))
+			*mc_flags |= ICE_MBUF_CHECK_F_TX_OFFLOAD;
+		else
+			PMD_DRV_LOG(ERR, "Unsupported mdd check type: %s", cur);
+		cur = strtok_r(NULL, ",", &tmp);
+	}
+
+mdd_end:
+	free(str2);
+	return ret;
+}
+
 static int ice_parse_devargs(struct rte_eth_dev *dev)
 {
 	struct ice_adapter *ad =
@@ -2117,6 +2172,14 @@ static int ice_parse_devargs(struct rte_eth_dev *dev)
 	if (ret)
 		goto bail;
 
+	ret = rte_kvargs_process(kvlist, ICE_MBUF_CHECK_ARG,
+				 &ice_parse_mbuf_check, &ad->mc_flags);
+	if (ret)
+		goto bail;
+
+	if (ad->mc_flags)
+		ad->devargs.mbuf_check = 1;
+
 	ret = rte_kvargs_process(kvlist, ICE_RX_LOW_LATENCY_ARG,
 				 &parse_bool, &ad->devargs.rx_low_latency);
 
@@ -3802,6 +3865,7 @@ ice_dev_start(struct rte_eth_dev *dev)
 	max_frame_size = pf->dev_data->mtu ?
 		pf->dev_data->mtu + ICE_ETH_OVERHEAD :
 		ICE_FRAME_SIZE_MAX;
+	ad->max_pkt_len = max_frame_size;
 
 	/* Set the max frame size to HW*/
 	ice_aq_set_mac_cfg(hw, max_frame_size, false, NULL);
@@ -6161,6 +6225,9 @@ ice_stats_reset(struct rte_eth_dev *dev)
 	/* read the stats, reading current register values into offset */
 	ice_read_stats_registers(pf, hw);
 
+	memset(&pf->mbuf_stats, 0,
+		sizeof(struct ice_mbuf_stats));
+
 	return 0;
 }
 
@@ -6169,17 +6236,33 @@ ice_xstats_calc_num(void)
 {
 	uint32_t num;
 
-	num = ICE_NB_ETH_XSTATS + ICE_NB_HW_PORT_XSTATS;
+	num = ICE_NB_ETH_XSTATS + ICE_NB_MBUF_XSTATS + ICE_NB_HW_PORT_XSTATS;
 
 	return num;
 }
 
+static void
+ice_update_mbuf_stats(struct rte_eth_dev *ethdev,
+		struct ice_mbuf_stats *mbuf_stats)
+{
+	uint16_t idx;
+	struct ice_tx_queue *txq;
+
+	for (idx = 0; idx < ethdev->data->nb_tx_queues; idx++) {
+		txq = ethdev->data->tx_queues[idx];
+		mbuf_stats->tx_pkt_errors += txq->mbuf_errors;
+	}
+}
+
 static int
 ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 	       unsigned int n)
 {
 	struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
 	struct ice_hw *hw = ICE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct ice_adapter *adapter =
+		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	struct ice_mbuf_stats mbuf_stats = {0};
 	unsigned int i;
 	unsigned int count;
 	struct ice_hw_port_stats *hw_stats = &pf->stats;
@@ -6195,6 +6278,9 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 
 	count = 0;
 
+	if (adapter->devargs.mbuf_check)
+		ice_update_mbuf_stats(dev, &mbuf_stats);
+
 	/* Get stats from ice_eth_stats struct */
 	for (i = 0; i < ICE_NB_ETH_XSTATS; i++) {
 		xstats[count].value =
@@ -6204,6 +6290,15 @@ ice_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
 		count++;
 	}
 
+	/* Get stats from ice_mbuf_stats struct */
+	for (i = 0; i < ICE_NB_MBUF_XSTATS; i++) {
+		xstats[count].value =
+			*(uint64_t *)((char *)&mbuf_stats +
+					ice_mbuf_strings[i].offset);
+		xstats[count].id = count;
+		count++;
+	}
+
 	/* Get individual stats from ice_hw_port struct */
 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
 		xstats[count].value =
@@ -6235,6 +6330,13 @@ static int ice_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 		count++;
 	}
 
+	/* Get stats from ice_mbuf_stats struct */
+	for (i = 0; i < ICE_NB_MBUF_XSTATS; i++) {
+		strlcpy(xstats_names[count].name, ice_mbuf_strings[i].name,
+			sizeof(xstats_names[count].name));
+		count++;
+	}
+
 	/* Get individual stats from ice_hw_port struct */
 	for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
 		strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index fa4981ed14..023578f2cf 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -507,6 +507,10 @@ struct ice_tm_conf {
 	bool clear_on_fail;
 };
 
+struct ice_mbuf_stats {
+	uint64_t tx_pkt_errors;
+};
+
 struct ice_pf {
 	struct ice_adapter *adapter; /* The adapter this PF associate to */
 	struct ice_vsi *main_vsi; /* pointer to main VSI structure */
@@ -536,6 +540,7 @@ struct ice_pf {
 	uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
 	struct ice_hw_port_stats stats_offset;
 	struct ice_hw_port_stats stats;
+	struct ice_mbuf_stats mbuf_stats;
 	/* internal packet statistics, it should be excluded from the total */
 	struct ice_eth_stats internal_stats_offset;
 	struct ice_eth_stats internal_stats;
@@ -574,6 +579,7 @@ struct ice_devargs {
 	uint8_t xtr_flag_offs[PROTO_XTR_MAX];
 	/* Name of the field. */
 	char xtr_field_name[RTE_MBUF_DYN_NAMESIZE];
+	int mbuf_check;
 };
 
 /**
@@ -592,6 +598,21 @@ struct ice_rss_prof_info {
 	bool symm;
 };
 
+#define ICE_MBUF_CHECK_F_TX_MBUF        (1ULL << 0)
+#define ICE_MBUF_CHECK_F_TX_SIZE        (1ULL << 1)
+#define ICE_MBUF_CHECK_F_TX_SEGMENT     (1ULL << 2)
+#define ICE_MBUF_CHECK_F_TX_OFFLOAD     (1ULL << 3)
+
+enum ice_tx_burst_type {
+	ICE_TX_DEFAULT,
+	ICE_TX_SIMPLE,
+	ICE_TX_VEC_SSE,
+	ICE_TX_VEC_AVX2,
+	ICE_TX_VEC_AVX2_OFFLOAD,
+	ICE_TX_VEC_AVX512,
+	ICE_TX_VEC_AVX512_OFFLOAD,
+};
+
 /**
  * Structure to store private data for each PF/VF instance.
  */
@@ -622,6 +643,9 @@ struct ice_adapter {
 	/* Set bit if the engine is disabled */
 	unsigned long disabled_engine_mask;
 	struct ice_parser *psr;
+	uint64_t mc_flags; /* mbuf check flags. */
+	uint16_t max_pkt_len; /* Maximum packet length */
+	enum ice_tx_burst_type tx_burst_type;
 #ifdef RTE_ARCH_X86
 	bool rx_use_avx2;
 	bool rx_use_avx512;
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 73e47ae92d..ac7fac20ce 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -3678,6 +3678,132 @@ ice_check_empty_mbuf(struct rte_mbuf *tx_pkt)
 	return 0;
 }
 
+static
+const eth_tx_burst_t ice_tx_pkt_burst_ops[] = {
+	[ICE_TX_DEFAULT] = ice_xmit_pkts,
+	[ICE_TX_SIMPLE] = ice_xmit_pkts_simple,
+#ifdef RTE_ARCH_X86
+	[ICE_TX_VEC_SSE] = ice_xmit_pkts_vec,
+	[ICE_TX_VEC_AVX2] = ice_xmit_pkts_vec_avx2,
+	[ICE_TX_VEC_AVX2_OFFLOAD] = ice_xmit_pkts_vec_avx2_offload,
+#ifdef CC_AVX512_SUPPORT
+	[ICE_TX_VEC_AVX512] = ice_xmit_pkts_vec_avx512,
+	[ICE_TX_VEC_AVX512_OFFLOAD] = ice_xmit_pkts_vec_avx512_offload,
+#endif
+#endif
+};
+
+/* Tx mbuf check */
+static uint16_t
+ice_xmit_pkts_check(void *tx_queue, struct rte_mbuf **tx_pkts,
+	      uint16_t nb_pkts)
+{
+	struct ice_tx_queue *txq = tx_queue;
+	uint16_t idx;
+	struct rte_mbuf *mb;
+	bool pkt_error = false;
+	uint16_t good_pkts = nb_pkts;
+	const char *reason = NULL;
+	struct ice_adapter *adapter = txq->vsi->adapter;
+	enum ice_tx_burst_type tx_burst_type =
+		txq->vsi->adapter->tx_burst_type;
+	uint64_t ol_flags;
+
+	for (idx = 0; idx < nb_pkts; idx++) {
+		mb = tx_pkts[idx];
+		ol_flags = mb->ol_flags;
+
+		if ((adapter->mc_flags & ICE_MBUF_CHECK_F_TX_MBUF) &&
+			(rte_mbuf_check(mb, 0, &reason) != 0)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: %s\n", reason);
+			pkt_error = true;
+			break;
+		}
+
+		if ((adapter->mc_flags & ICE_MBUF_CHECK_F_TX_SIZE) &&
+			(mb->data_len > mb->pkt_len ||
+			mb->data_len < ICE_TX_MIN_PKT_LEN ||
+			mb->data_len > adapter->max_pkt_len)) {
+			PMD_TX_LOG(ERR, "INVALID mbuf: data_len (%u) is out "
+			"of range, reasonable range (%d - %u)\n", mb->data_len,
+			ICE_TX_MIN_PKT_LEN, adapter->max_pkt_len);
+			pkt_error = true;
+			break;
+		}
+
+		if (adapter->mc_flags & ICE_MBUF_CHECK_F_TX_SEGMENT) {
+			if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+				/**
+				 * No TSO case: nb->segs, pkt_len to not exceed
+				 * the limites.
+				 */
+				if (mb->nb_segs > ICE_TX_MTU_SEG_MAX) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					ICE_TX_MTU_SEG_MAX);
+					pkt_error = true;
+					break;
+				}
+				if (mb->pkt_len > ICE_FRAME_SIZE_MAX) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: pkt_len (%d) exceeds "
+					"HW limit, maximum allowed value is %d\n", mb->nb_segs,
+					ICE_FRAME_SIZE_MAX);
+					pkt_error = true;
+					break;
+				}
+			} else if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+				/** TSO case: tso_segsz, nb_segs, pkt_len not exceed
+				 * the limits.
+				 */
+				if (mb->tso_segsz < ICE_MIN_TSO_MSS ||
+					mb->tso_segsz > ICE_MAX_TSO_MSS) {
+					/**
+					 * MSS outside the range are considered malicious
+					 */
+					PMD_TX_LOG(ERR, "INVALID mbuf: tso_segsz (%u) is out "
+					"of range, reasonable range (%d - %u)\n", mb->tso_segsz,
+					ICE_MIN_TSO_MSS, ICE_MAX_TSO_MSS);
+					pkt_error = true;
+					break;
+				}
+				if (mb->nb_segs >
+					((struct ice_tx_queue *)tx_queue)->nb_tx_desc) {
+					PMD_TX_LOG(ERR, "INVALID mbuf: nb_segs out "
+					"of ring length\n");
+					pkt_error = true;
+					break;
+				}
+			}
+		}
+
+		if (adapter->mc_flags & ICE_MBUF_CHECK_F_TX_OFFLOAD) {
+			if (ol_flags & ICE_TX_OFFLOAD_NOTSUP_MASK) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"is not supported\n");
+				pkt_error = true;
+				break;
+			}
+
+			if (!rte_validate_tx_offload(mb)) {
+				PMD_TX_LOG(ERR, "INVALID mbuf: TX offload "
+				"setup error\n");
+				pkt_error = true;
+				break;
+			}
+		}
+	}
+
+	if (pkt_error) {
+		txq->mbuf_errors++;
+		good_pkts = idx;
+		if (good_pkts == 0)
+			return 0;
+	}
+
+	return ice_tx_pkt_burst_ops[tx_burst_type](tx_queue,
+								tx_pkts, good_pkts);
+}
+
 uint16_t
 ice_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	      uint16_t nb_pkts)
@@ -3746,6 +3872,8 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 {
 	struct ice_adapter *ad =
 		ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+	enum ice_tx_burst_type tx_burst_type = ICE_TX_DEFAULT;
+	int mbuf_check = ad->devargs.mbuf_check;
 #ifdef RTE_ARCH_X86
 	struct ice_tx_queue *txq;
 	int i;
@@ -3800,14 +3928,13 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX512 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst =
-					ice_xmit_pkts_vec_avx512_offload;
+				tx_burst_type = ICE_TX_VEC_AVX512_OFFLOAD;
 				dev->tx_pkt_prepare = ice_prep_pkts;
 			} else {
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX512 Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+				tx_burst_type = ICE_TX_VEC_AVX512;
 			}
 #endif
 		} else {
@@ -3815,32 +3942,43 @@ ice_set_tx_function(struct rte_eth_dev *dev)
 				PMD_DRV_LOG(NOTICE,
 					    "Using AVX2 OFFLOAD Vector Tx (port %d).",
 					    dev->data->port_id);
-				dev->tx_pkt_burst =
-					ice_xmit_pkts_vec_avx2_offload;
+				tx_burst_type = ICE_TX_VEC_AVX2_OFFLOAD;
 				dev->tx_pkt_prepare = ice_prep_pkts;
 			} else {
 				PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
 					    ad->tx_use_avx2 ? "avx2 " : "",
 					    dev->data->port_id);
-				dev->tx_pkt_burst = ad->tx_use_avx2 ?
-						    ice_xmit_pkts_vec_avx2 :
-						    ice_xmit_pkts_vec;
+				tx_burst_type = ad->tx_use_avx2 ?
+							ICE_TX_VEC_AVX2 : ICE_TX_VEC_SSE;
 			}
 		}
 
+		if (mbuf_check) {
+			ad->tx_burst_type = tx_burst_type;
+			dev->tx_pkt_burst = ice_xmit_pkts_check;
+		} else {
+			dev->tx_pkt_burst = ice_tx_pkt_burst_ops[tx_burst_type];
+		}
 		return;
 	}
 #endif
 
 	if (ad->tx_simple_allowed) {
 		PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
-		dev->tx_pkt_burst = ice_xmit_pkts_simple;
+		tx_burst_type = ICE_TX_SIMPLE;
 		dev->tx_pkt_prepare = NULL;
 	} else {
 		PMD_INIT_LOG(DEBUG, "Normal tx finally be used.");
-		dev->tx_pkt_burst = ice_xmit_pkts;
+		tx_burst_type = ICE_TX_DEFAULT;
 		dev->tx_pkt_prepare = ice_prep_pkts;
 	}
+
+	if (mbuf_check) {
+		ad->tx_burst_type = tx_burst_type;
+		dev->tx_pkt_burst = ice_xmit_pkts_check;
+	} else {
+		dev->tx_pkt_burst = ice_tx_pkt_burst_ops[tx_burst_type];
+	}
 }
 
 static const struct {
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index bd2c4abec9..088ddba600 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -45,6 +45,25 @@
 
 #define ICE_TX_MIN_PKT_LEN 17
 
+#define ICE_TX_OFFLOAD_MASK (    \
+		RTE_MBUF_F_TX_OUTER_IPV6 |	 \
+		RTE_MBUF_F_TX_OUTER_IPV4 |	 \
+		RTE_MBUF_F_TX_OUTER_IP_CKSUM |  \
+		RTE_MBUF_F_TX_VLAN |        \
+		RTE_MBUF_F_TX_IPV6 |		 \
+		RTE_MBUF_F_TX_IPV4 |		 \
+		RTE_MBUF_F_TX_IP_CKSUM |        \
+		RTE_MBUF_F_TX_L4_MASK |         \
+		RTE_MBUF_F_TX_IEEE1588_TMST |	 \
+		RTE_MBUF_F_TX_TCP_SEG |         \
+		RTE_MBUF_F_TX_QINQ |        \
+		RTE_MBUF_F_TX_TUNNEL_MASK |	 \
+		RTE_MBUF_F_TX_UDP_SEG |	 \
+		RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
+
+#define ICE_TX_OFFLOAD_NOTSUP_MASK \
+		(RTE_MBUF_F_TX_OFFLOAD_MASK ^ ICE_TX_OFFLOAD_MASK)
+
 extern uint64_t ice_timestamp_dynflag;
 extern int ice_timestamp_dynfield_offset;
 
@@ -164,6 +183,7 @@ struct ice_tx_queue {
 	struct ice_vsi *vsi; /* the VSI this queue belongs to */
 	uint16_t tx_next_dd;
 	uint16_t tx_next_rs;
+	uint64_t mbuf_errors;
 	bool tx_deferred_start; /* don't start this queue in dev start */
 	bool q_set; /* indicate if tx queue has been configured */
 	ice_tx_release_mbufs_t tx_rel_mbufs;
-- 
2.25.1



More information about the dev mailing list