[dpdk-stable] patch 'net/virtio: fix mbuf data and packet length mismatch' has been queued to LTS release 17.11.10

luca.boccassi at gmail.com luca.boccassi at gmail.com
Thu Dec 19 15:33:02 CET 2019


Hi,

FYI, your patch has been queued to LTS release 17.11.10

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 12/21/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From b5f254957572c565952e534bb0e8ef024a6c98f8 Mon Sep 17 00:00:00 2001
From: Marvin Liu <yong.liu at intel.com>
Date: Mon, 23 Sep 2019 22:05:11 +0800
Subject: [PATCH] net/virtio: fix mbuf data and packet length mismatch

[ upstream commit 1ae55ad38e5e00b61704e4cb29037098b143688a ]

If reserve virtio header room by function rte_pktmbuf_prepend, both
segment data length and packet length of mbuf will be increased.
Data length will be equal to descriptor length, while packet length
should be decreased as virtio-net header won't be taken into packet.
Thus will cause mismatch in mbuf structure. Fix this issue by access
mbuf data directly and increase descriptor length if it is needed.

Fixes: 58169a9c8153 ("net/virtio: support Tx checksum offload")
Fixes: 892dc798fa9c ("net/virtio: implement Tx path for packed queues")
Fixes: 4905ed3a523f ("net/virtio: optimize Tx enqueue for packed ring")
Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")

Reported-by: Stephen Hemminger <stephen at networkplumber.org>
Signed-off-by: Marvin Liu <yong.liu at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 drivers/net/virtio/virtio_rxtx.c | 15 +++++++++------
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
index b37a186b62..40255eee4e 100644
--- a/drivers/net/virtio/virtio_rxtx.c
+++ b/drivers/net/virtio/virtio_rxtx.c
@@ -36,6 +36,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <errno.h>
+#include <stdbool.h>
 
 #include <rte_cycles.h>
 #include <rte_memory.h>
@@ -285,6 +286,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	uint16_t head_size = vq->hw->vtnet_hdr_size;
 	struct virtio_net_hdr *hdr;
 	int offload;
+	bool prepend_header = false;
 
 	offload = tx_offload_enabled(vq->hw);
 	head_idx = vq->vq_desc_head_idx;
@@ -297,12 +299,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 
 	if (can_push) {
 		/* prepend cannot fail, checked by caller */
-		hdr = (struct virtio_net_hdr *)
-			rte_pktmbuf_prepend(cookie, head_size);
-		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
-		 * which is wrong. Below subtract restores correct pkt size.
-		 */
-		cookie->pkt_len -= head_size;
+		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
+			cookie->data_off - head_size;
+		prepend_header = true;
 		/* if offload disabled, it is not zeroed below, do it now */
 		if (offload == 0) {
 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
@@ -388,6 +387,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 	do {
 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
 		start_dp[idx].len   = cookie->data_len;
+		if (prepend_header) {
+			start_dp[idx].len += head_size;
+			prepend_header = false;
+		}
 		start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
 		idx = start_dp[idx].next;
 	} while ((cookie = cookie->next) != NULL);
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-12-19 14:32:27.736094674 +0000
+++ 0035-net-virtio-fix-mbuf-data-and-packet-length-mismatch.patch	2019-12-19 14:32:25.825292273 +0000
@@ -1,8 +1,10 @@
-From 1ae55ad38e5e00b61704e4cb29037098b143688a Mon Sep 17 00:00:00 2001
+From b5f254957572c565952e534bb0e8ef024a6c98f8 Mon Sep 17 00:00:00 2001
 From: Marvin Liu <yong.liu at intel.com>
 Date: Mon, 23 Sep 2019 22:05:11 +0800
 Subject: [PATCH] net/virtio: fix mbuf data and packet length mismatch
 
+[ upstream commit 1ae55ad38e5e00b61704e4cb29037098b143688a ]
+
 If reserve virtio header room by function rte_pktmbuf_prepend, both
 segment data length and packet length of mbuf will be increased.
 Data length will be equal to descriptor length, while packet length
@@ -14,109 +16,35 @@
 Fixes: 892dc798fa9c ("net/virtio: implement Tx path for packed queues")
 Fixes: 4905ed3a523f ("net/virtio: optimize Tx enqueue for packed ring")
 Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
-Cc: stable at dpdk.org
 
 Reported-by: Stephen Hemminger <stephen at networkplumber.org>
 Signed-off-by: Marvin Liu <yong.liu at intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
 ---
- drivers/net/virtio/virtio_rxtx.c | 44 +++++++++++++++++---------------
- 1 file changed, 24 insertions(+), 20 deletions(-)
+ drivers/net/virtio/virtio_rxtx.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
 
 diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
-index 42f2beb407..929aa4cbd3 100644
+index b37a186b62..40255eee4e 100644
 --- a/drivers/net/virtio/virtio_rxtx.c
 +++ b/drivers/net/virtio/virtio_rxtx.c
-@@ -640,9 +640,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
- 		dxp->ndescs = 1;
- 		virtio_update_packet_stats(&txvq->stats, cookies[i]);
- 
--		hdr = (struct virtio_net_hdr *)
--			rte_pktmbuf_prepend(cookies[i], head_size);
--		cookies[i]->pkt_len -= head_size;
-+		hdr = (struct virtio_net_hdr *)(char *)cookies[i]->buf_addr +
-+			cookies[i]->data_off - head_size;
- 
- 		/* if offload disabled, hdr is not zeroed yet, do it now */
- 		if (!vq->hw->has_tx_offload)
-@@ -651,9 +650,10 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
- 			virtqueue_xmit_offload(hdr, cookies[i], true);
- 
- 		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
--		start_dp[idx].len   = cookies[i]->data_len;
-+		start_dp[idx].len   = cookies[i]->data_len + head_size;
- 		start_dp[idx].flags = 0;
- 
-+
- 		vq_update_avail_ring(vq, idx);
- 
- 		idx++;
-@@ -687,9 +687,8 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
- 	flags = vq->vq_packed.cached_flags;
- 
- 	/* prepend cannot fail, checked by caller */
--	hdr = (struct virtio_net_hdr *)
--		rte_pktmbuf_prepend(cookie, head_size);
--	cookie->pkt_len -= head_size;
-+	hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-+		cookie->data_off - head_size;
- 
- 	/* if offload disabled, hdr is not zeroed yet, do it now */
- 	if (!vq->hw->has_tx_offload)
-@@ -698,7 +697,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
- 		virtqueue_xmit_offload(hdr, cookie, true);
- 
- 	dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
--	dp->len  = cookie->data_len;
-+	dp->len  = cookie->data_len + head_size;
- 	dp->id   = id;
- 
- 	if (++vq->vq_avail_idx >= vq->vq_nentries) {
-@@ -730,6 +729,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+@@ -36,6 +36,7 @@
+ #include <stdlib.h>
+ #include <string.h>
+ #include <errno.h>
++#include <stdbool.h>
+ 
+ #include <rte_cycles.h>
+ #include <rte_memory.h>
+@@ -285,6 +286,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
  	uint16_t head_size = vq->hw->vtnet_hdr_size;
  	struct virtio_net_hdr *hdr;
- 	uint16_t prev;
-+	bool prepend_header = false;
- 
- 	id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
- 
-@@ -748,12 +748,9 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- 
- 	if (can_push) {
- 		/* prepend cannot fail, checked by caller */
--		hdr = (struct virtio_net_hdr *)
--			rte_pktmbuf_prepend(cookie, head_size);
--		/* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
--		 * which is wrong. Below subtract restores correct pkt size.
--		 */
--		cookie->pkt_len -= head_size;
-+		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
-+			cookie->data_off - head_size;
-+		prepend_header = true;
- 
- 		/* if offload disabled, it is not zeroed below, do it now */
- 		if (!vq->hw->has_tx_offload)
-@@ -781,6 +778,11 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- 
- 		start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
- 		start_dp[idx].len  = cookie->data_len;
-+		if (prepend_header) {
-+			start_dp[idx].len += head_size;
-+			prepend_header = false;
-+		}
-+
- 		if (likely(idx != head_idx)) {
- 			flags = cookie->next ? VRING_DESC_F_NEXT : 0;
- 			flags |= vq->vq_packed.cached_flags;
-@@ -822,6 +824,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
- 	uint16_t seg_num = cookie->nb_segs;
- 	uint16_t head_idx, idx;
- 	uint16_t head_size = vq->hw->vtnet_hdr_size;
+ 	int offload;
 +	bool prepend_header = false;
- 	struct virtio_net_hdr *hdr;
  
+ 	offload = tx_offload_enabled(vq->hw);
  	head_idx = vq->vq_desc_head_idx;
-@@ -837,12 +840,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+@@ -297,12 +299,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
  
  	if (can_push) {
  		/* prepend cannot fail, checked by caller */
@@ -129,10 +57,10 @@
 +		hdr = (struct virtio_net_hdr *)(char *)cookie->buf_addr +
 +			cookie->data_off - head_size;
 +		prepend_header = true;
- 
  		/* if offload disabled, it is not zeroed below, do it now */
- 		if (!vq->hw->has_tx_offload)
-@@ -881,6 +881,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
+ 		if (offload == 0) {
+ 			ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+@@ -388,6 +387,10 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
  	do {
  		start_dp[idx].addr  = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
  		start_dp[idx].len   = cookie->data_len;


More information about the stable mailing list