[PATCH 19.11] app/testpmd: perform SW IP checksum for GRO/GSO packets

Wenwu Ma wenwux.ma at intel.com
Fri Jul 8 04:53:43 CEST 2022


upstream commit 1945c64674b2b9ad55af0ef31f8a02ae0b747400

The GRO/GSO library doesn't re-calculate checksums for
merged/fragmented packets. If users want the packets to
have correct IP checksums, they should select HW IP
checksum calculation for the port which the packets are
transmitted to. But if the port doesn't support HW IP
checksum, users may perform a SW IP checksum.

Fixes: b7091f1dcfbc ("app/testpmd: enable the heavyweight mode TCP/IPv4 GRO")
Fixes: 52f38a2055ed ("app/testpmd: enable TCP/IPv4 VxLAN and GRE GSO")

Signed-off-by: Wenwu Ma <wenwux.ma at intel.com>
Reviewed-by: Jiayu Hu <jiayu.hu at intel.com>
Tested-by: Wei Ling <weix.ling at intel.com>
Acked-by: Yuying Zhang <yuying.zhang at intel.com>
---
 app/test-pmd/csumonly.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/app/test-pmd/csumonly.c b/app/test-pmd/csumonly.c
index 25091de881..117f14451d 100644
--- a/app/test-pmd/csumonly.c
+++ b/app/test-pmd/csumonly.c
@@ -731,6 +731,26 @@ pkt_copy_split(const struct rte_mbuf *pkt)
 	return md[0];
 }
 
+/*
+ * Re-calculate IP checksum for merged/fragmented packets.
+ */
+static void
+pkts_ip_csum_recalc(struct rte_mbuf **pkts_burst, const uint16_t nb_pkts, uint64_t tx_offloads)
+{
+	int i;
+	struct rte_ipv4_hdr *ipv4_hdr;
+	for (i = 0; i < nb_pkts; i++) {
+		if ((pkts_burst[i]->ol_flags & PKT_TX_IPV4) &&
+			(tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) == 0) {
+			ipv4_hdr = rte_pktmbuf_mtod_offset(pkts_burst[i],
+						struct rte_ipv4_hdr *,
+						pkts_burst[i]->l2_len);
+			ipv4_hdr->hdr_checksum = 0;
+			ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
+		}
+	}
+}
+
 /*
  * Receive a burst of packets, and for each packet:
  *  - parse packet, and try to recognize a supported packet type (1)
@@ -1038,6 +1058,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 				fs->gro_times = 0;
 			}
 		}
+
+		pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads);
 	}
 
 	if (gso_ports[fs->tx_port].enable == 0)
@@ -1059,6 +1081,8 @@ pkt_burst_checksum_forward(struct fwd_stream *fs)
 
 		tx_pkts_burst = gso_segments;
 		nb_rx = nb_segments;
+
+		pkts_ip_csum_recalc(tx_pkts_burst, nb_rx, tx_offloads);
 	}
 
 	nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
-- 
2.25.1



More information about the stable mailing list