[dpdk-dev] [PATCH 12/13] mlx5: add checksum offloading support

Adrien Mazarguil adrien.mazarguil at 6wind.com
Mon Oct 5 19:53:08 CEST 2015


This is the same implementation as mlx4.

Signed-off-by: Adrien Mazarguil <adrien.mazarguil at 6wind.com>
Signed-off-by: Nelio Laranjeiro <nelio.laranjeiro at 6wind.com>
---
 drivers/net/mlx5/mlx5_rxq.c   | 14 +++++++
 drivers/net/mlx5/mlx5_rxtx.c  | 94 +++++++++++++++++++++++++++++++++++++++++++
 drivers/net/mlx5/mlx5_rxtx.h  |  2 +
 drivers/net/mlx5/mlx5_utils.h |  6 +++
 4 files changed, 116 insertions(+)

diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c
index d44bb10..8cfad17 100644
--- a/drivers/net/mlx5/mlx5_rxq.c
+++ b/drivers/net/mlx5/mlx5_rxq.c
@@ -565,6 +565,15 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
 	/* Number of descriptors and mbufs currently allocated. */
 	desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1));
 	mbuf_n = desc_n;
+	/* Toggle RX checksum offload if hardware supports it. */
+	if (priv->hw_csum) {
+		tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+		rxq->csum = tmpl.csum;
+	}
+	if (priv->hw_csum_l2tun) {
+		tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+		rxq->csum_l2tun = tmpl.csum_l2tun;
+	}
 	/* Enable scattered packets support for this queue if necessary. */
 	if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
 	    (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -788,6 +797,11 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
 		rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
 	assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
 	rte_pktmbuf_free(buf);
+	/* Toggle RX checksum offload if hardware supports it. */
+	if (priv->hw_csum)
+		tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+	if (priv->hw_csum_l2tun)
+		tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
 	/* Enable scattered packets support for this queue if necessary. */
 	if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
 	    (dev->data->dev_conf.rxmode.max_rx_pkt_len >
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 960a3e5..668aff0 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -390,6 +390,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			++elts_comp;
 			send_flags |= IBV_EXP_QP_BURST_SIGNALED;
 		}
+		/* Should we enable HW CKSUM offload */
+		if (buf->ol_flags &
+		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+			send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+			/* HW does not support checksum offloads at arbitrary
+			 * offsets but automatically recognizes the packet
+			 * type. For inner L3/L4 checksums, only VXLAN (UDP)
+			 * tunnels are currently supported. */
+			if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
+				send_flags |= IBV_EXP_QP_BURST_TUNNEL;
+		}
 		if (likely(segs == 1)) {
 			uintptr_t addr;
 			uint32_t length;
@@ -491,6 +502,85 @@ stop:
 }
 
 /**
+ * Translate RX completion flags to packet type.
+ *
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(uint32_t flags)
+{
+	uint32_t pkt_type;
+
+	if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+		pkt_type =
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+				  RTE_PTYPE_L3_IPV4) |
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+				  RTE_PTYPE_L3_IPV6) |
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_IPV4_PACKET,
+				  RTE_PTYPE_INNER_L3_IPV4) |
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_IPV6_PACKET,
+				  RTE_PTYPE_INNER_L3_IPV6);
+	else
+		pkt_type =
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_IPV4_PACKET,
+				  RTE_PTYPE_L3_IPV4) |
+			TRANSPOSE(flags,
+				  IBV_EXP_CQ_RX_IPV6_PACKET,
+				  RTE_PTYPE_L3_IPV6);
+	return pkt_type;
+}
+
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+{
+	uint32_t ol_flags = 0;
+
+	if (rxq->csum)
+		ol_flags |=
+			TRANSPOSE(~flags,
+				  IBV_EXP_CQ_RX_IP_CSUM_OK,
+				  PKT_RX_IP_CKSUM_BAD) |
+			TRANSPOSE(~flags,
+				  IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+				  PKT_RX_L4_CKSUM_BAD);
+	/*
+	 * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+	 * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+	 * (its value is 0).
+	 */
+	if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+		ol_flags |=
+			TRANSPOSE(~flags,
+				  IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+				  PKT_RX_IP_CKSUM_BAD) |
+			TRANSPOSE(~flags,
+				  IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+				  PKT_RX_L4_CKSUM_BAD);
+	return ol_flags;
+}
+
+/**
  * DPDK callback for RX with scattered packets support.
  *
  * @param dpdk_rxq
@@ -669,6 +759,8 @@ mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		NB_SEGS(pkt_buf) = j;
 		PORT(pkt_buf) = rxq->port_id;
 		PKT_LEN(pkt_buf) = pkt_buf_len;
+		pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
+		pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
 		/* Return packet. */
 		*(pkts++) = pkt_buf;
@@ -828,6 +920,8 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		NEXT(seg) = NULL;
 		PKT_LEN(seg) = len;
 		DATA_LEN(seg) = len;
+		seg->packet_type = rxq_cq_to_pkt_type(flags);
+		seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
 		/* Return packet. */
 		*(pkts++) = seg;
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index 228dff6..0eb1e98 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -119,6 +119,8 @@ struct rxq {
 		struct rxq_elt (*no_sp)[]; /* RX elements. */
 	} elts;
 	unsigned int sp:1; /* Use scattered RX elements. */
+	unsigned int csum:1; /* Enable checksum offloading. */
+	unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
 	uint32_t mb_len; /* Length of a mp-issued mbuf. */
 	struct mlx5_rxq_stats stats; /* RX queue counters. */
 	unsigned int socket; /* CPU socket ID for allocations. */
diff --git a/drivers/net/mlx5/mlx5_utils.h b/drivers/net/mlx5/mlx5_utils.h
index e48e6b6..8ff075b 100644
--- a/drivers/net/mlx5/mlx5_utils.h
+++ b/drivers/net/mlx5/mlx5_utils.h
@@ -149,6 +149,12 @@ pmd_drv_log_basename(const char *s)
 #define NB_SEGS(m) ((m)->nb_segs)
 #define PORT(m) ((m)->port)
 
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+	(((from) >= (to)) ? \
+	 (((val) & (from)) / ((from) / (to))) : \
+	 (((val) & (from)) * ((to) / (from))))
+
 /* Allocate a buffer on the stack and fill it with a printf format string. */
 #define MKSTR(name, ...) \
 	char name[snprintf(NULL, 0, __VA_ARGS__) + 1]; \
-- 
2.1.0



More information about the dev mailing list