[dpdk-dev] [PATCH] ixgbe: TCP/UDP segment offload support on 82599.

Qinglai Xiao jigsaw at gmail.com
Fri Oct 4 19:06:53 CEST 2013


Add support for TCP/UDP segment offload on 82599.
User can turn on TSO by setting MSS in the first frame.
Meantime, the L2 and L3 len, together with offload flags must be set in the
first frame accordingly. Otherwise the driver will cease the sending.
---
 lib/librte_mbuf/rte_mbuf.h        |    6 +++++-
 lib/librte_pmd_ixgbe/ixgbe_rxtx.c |   32 +++++++++++++++++++++++++++++---
 2 files changed, 34 insertions(+), 4 deletions(-)

diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index d914562..ea4bb88 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -159,6 +159,10 @@ struct rte_pktmbuf {
 			uint16_t id;
 		} fdir;             /**< Filter identifier if FDIR enabled */
 		uint32_t sched;     /**< Hierarchical scheduler */
+		uint16_t mss;       /**< Maximum Segment Size. If more than zero,
+					 then TSO is enabled. User is responsible
+					 for setting vlan_macip and TCP/IP cksum
+					 accordingly. */
 	} hash;                 /**< hash information */
 };
 
@@ -195,7 +199,7 @@ struct rte_mbuf {
 	uint16_t refcnt_reserved;     /**< Do not use this field */
 #endif
 	uint8_t type;                 /**< Type of mbuf. */
-	uint8_t reserved;             /**< Unused field. Required for padding. */
+	uint8_t reserved;             /**< Unused field. Required for padding. */ 
 	uint16_t ol_flags;            /**< Offload features. */
 
 	union {
diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 5c8668e..63d7f8a 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -498,7 +498,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
 static inline void
 ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
 		volatile struct ixgbe_adv_tx_context_desc *ctx_txd,
-		uint16_t ol_flags, uint32_t vlan_macip_lens)
+		uint16_t ol_flags, uint32_t vlan_macip_lens, uint16_t mss)
 {
 	uint32_t type_tucmd_mlhl;
 	uint32_t mss_l4len_idx;
@@ -520,6 +520,10 @@ ixgbe_set_xmit_ctx(struct igb_tx_queue* txq,
 
 	/* Specify which HW CTX to upload. */
 	mss_l4len_idx = (ctx_idx << IXGBE_ADVTXD_IDX_SHIFT);
+
+	/* MSS is reqired for TSO. The user must set mss accordingly */
+	mss_l4len_idx |= mss << IXGBE_ADVTXD_MSS_SHIFT;
+
 	switch (ol_flags & PKT_TX_L4_MASK) {
 	case PKT_TX_UDP_CKSUM:
 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_UDP |
@@ -694,6 +698,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 	uint32_t vlan_macip_lens;
 	uint32_t ctx = 0;
 	uint32_t new_ctx;
+	uint16_t mss;
 
 	txq = tx_queue;
 	sw_ring = txq->sw_ring;
@@ -719,10 +724,25 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 * are needed for offload functionality.
 		 */
 		ol_flags = tx_pkt->ol_flags;
+
 		vlan_macip_lens = tx_pkt->pkt.vlan_macip.data;
+		mss = tx_pkt->pkt.hash.mss;
 
 		/* If hardware offload required */
 		tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
+
+		/*
+		 * If mss is set, we assume TSO is required.
+		 *
+		 * If TSO is turned on, the caller must set the offload bits
+		 * accordingly, otherwise we have to drop the packet, because
+		 * we have no knowledge of L2 or L3.
+		 */
+		if (!tx_ol_req && mss) {
+			PMD_TX_LOG(DEBUG, "TSO set without offload bits. Abort sending.");
+			goto end_of_tx;
+		}
+
 		if (tx_ol_req) {
 			/* If new context need be built or reuse the exist ctx. */
 			ctx = what_advctx_update(txq, tx_ol_req,
@@ -841,6 +861,11 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 		 */
 		cmd_type_len = IXGBE_ADVTXD_DTYP_DATA |
 			IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+		/* Enable TSE bit for TSO */
+		if (mss)
+			cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
 		olinfo_status = (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
 #ifdef RTE_LIBRTE_IEEE1588
 		if (ol_flags & PKT_TX_IEEE1588_TMST)
@@ -868,7 +893,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 				}
 
 				ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
-				    vlan_macip_lens);
+				    vlan_macip_lens, mss);
 
 				txe->last_id = tx_last;
 				tx_id = txe->next_id;
@@ -3392,7 +3417,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev)
 
 	/* Enable TX CRC (checksum offload requirement) */
 	hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-	hlreg0 |= IXGBE_HLREG0_TXCRCEN;
+	/* IXGBE_HLREG0_TXPADEN is required for TCP segmentation offload */
+	hlreg0 |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN;
 	IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
 	/* Setup the Base and Length of the Tx Descriptor Rings */
-- 
1.7.10.4



More information about the dev mailing list