patch 'vhost/crypto: fix build with GCC 12' has been queued to stable release 21.11.2

Kevin Traynor ktraynor at redhat.com
Fri Jun 24 17:01:46 CEST 2022


Hi,

FYI, your patch has been queued to stable release 21.11.2

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 06/27/22. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/kevintraynor/dpdk-stable

This queued commit can be viewed at:
https://github.com/kevintraynor/dpdk-stable/commit/f69a61bde0e2d72021fd3c609fd4b62edc8f8951

Thanks.

Kevin

---
>From f69a61bde0e2d72021fd3c609fd4b62edc8f8951 Mon Sep 17 00:00:00 2001
From: David Marchand <david.marchand at redhat.com>
Date: Thu, 16 Jun 2022 16:46:50 +0200
Subject: [PATCH] vhost/crypto: fix build with GCC 12
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

[ upstream commit 4414bb67010dfec2559af52efe8f479b26d55447 ]

GCC 12 raises the following warning:

In file included from ../lib/mempool/rte_mempool.h:46,
                 from ../lib/mbuf/rte_mbuf.h:38,
                 from ../lib/vhost/vhost_crypto.c:7:
../lib/vhost/vhost_crypto.c: In function ‘rte_vhost_crypto_fetch_requests’:
../lib/eal/x86/include/rte_memcpy.h:371:9: warning: array subscript 1 is
     outside array bounds of ‘struct virtio_crypto_op_data_req[1]’
     [-Warray-bounds]
  371 | rte_mov32((uint8_t *)dst + 3 * 32, (const uint8_t *)src + 3 * 32);
      | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
../lib/vhost/vhost_crypto.c:1178:42: note: while referencing ‘req’
 1178 |         struct virtio_crypto_op_data_req req;
      |                                          ^~~

Split this function and separate the per descriptor copy.
This makes the code clearer, and the compiler happier.

Note: logs for errors have been moved to callers to avoid duplicates.

Fixes: 3c79609fda7c ("vhost/crypto: handle virtually non-contiguous buffers")

Signed-off-by: David Marchand <david.marchand at redhat.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 lib/vhost/vhost_crypto.c | 127 +++++++++++++++------------------------
 1 file changed, 48 insertions(+), 79 deletions(-)

diff --git a/lib/vhost/vhost_crypto.c b/lib/vhost/vhost_crypto.c
index 926b5c0bd9..293960d350 100644
--- a/lib/vhost/vhost_crypto.c
+++ b/lib/vhost/vhost_crypto.c
@@ -566,92 +566,56 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req,
 }
 
+static __rte_always_inline uint32_t
+copy_data_from_desc(void *dst, struct vhost_crypto_data_req *vc_req,
+	struct vhost_crypto_desc *desc, uint32_t size)
+{
+	uint64_t remain;
+	uint64_t addr;
+
+	remain = RTE_MIN(desc->len, size);
+	addr = desc->addr;
+	do {
+		uint64_t len;
+		void *src;
+
+		len = remain;
+		src = IOVA_TO_VVA(void *, vc_req, addr, &len, VHOST_ACCESS_RO);
+		if (unlikely(src == NULL || len == 0))
+			return 0;
+
+		rte_memcpy(dst, src, len);
+		remain -= len;
+		/* cast is needed for 32-bit architecture */
+		dst = RTE_PTR_ADD(dst, (size_t)len);
+		addr += len;
+	} while (unlikely(remain != 0));
+
+	return RTE_MIN(desc->len, size);
+}
+
+
 static __rte_always_inline int
-copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-		struct vhost_crypto_desc *head,
-		struct vhost_crypto_desc **cur_desc,
-		uint32_t size, uint32_t max_n_descs)
+copy_data(void *data, struct vhost_crypto_data_req *vc_req,
+	struct vhost_crypto_desc *head, struct vhost_crypto_desc **cur_desc,
+	uint32_t size, uint32_t max_n_descs)
 {
 	struct vhost_crypto_desc *desc = *cur_desc;
-	uint64_t remain, addr, dlen, len;
-	uint32_t to_copy;
-	uint8_t *data = dst_data;
-	uint8_t *src;
-	int left = size;
+	uint32_t left = size;
 
-	to_copy = RTE_MIN(desc->len, (uint32_t)left);
-	dlen = to_copy;
-	src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
-			VHOST_ACCESS_RO);
-	if (unlikely(!src || !dlen))
-		return -1;
+	do {
+		uint32_t copied;
 
-	rte_memcpy((uint8_t *)data, src, dlen);
-	data += dlen;
-
-	if (unlikely(dlen < to_copy)) {
-		remain = to_copy - dlen;
-		addr = desc->addr + dlen;
-
-		while (remain) {
-			len = remain;
-			src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
-					VHOST_ACCESS_RO);
-			if (unlikely(!src || !len)) {
-				VC_LOG_ERR("Failed to map descriptor");
-				return -1;
-			}
-
-			rte_memcpy(data, src, len);
-			addr += len;
-			remain -= len;
-			data += len;
-		}
-	}
-
-	left -= to_copy;
-
-	while (desc >= head && desc - head < (int)max_n_descs && left) {
-		desc++;
-		to_copy = RTE_MIN(desc->len, (uint32_t)left);
-		dlen = to_copy;
-		src = IOVA_TO_VVA(uint8_t *, vc_req, desc->addr, &dlen,
-				VHOST_ACCESS_RO);
-		if (unlikely(!src || !dlen)) {
-			VC_LOG_ERR("Failed to map descriptor");
+		copied = copy_data_from_desc(data, vc_req, desc, left);
+		if (copied == 0)
 			return -1;
-		}
+		left -= copied;
+		data = RTE_PTR_ADD(data, copied);
+		desc++;
+	} while (desc < head + max_n_descs && left != 0);
 
-		rte_memcpy(data, src, dlen);
-		data += dlen;
-
-		if (unlikely(dlen < to_copy)) {
-			remain = to_copy - dlen;
-			addr = desc->addr + dlen;
-
-			while (remain) {
-				len = remain;
-				src = IOVA_TO_VVA(uint8_t *, vc_req, addr, &len,
-						VHOST_ACCESS_RO);
-				if (unlikely(!src || !len)) {
-					VC_LOG_ERR("Failed to map descriptor");
-					return -1;
-				}
-
-				rte_memcpy(data, src, len);
-				addr += len;
-				remain -= len;
-				data += len;
-			}
-		}
-
-		left -= to_copy;
-	}
-
-	if (unlikely(left > 0)) {
-		VC_LOG_ERR("Incorrect virtio descriptor");
+	if (unlikely(left != 0))
 		return -1;
-	}
 
-	if (unlikely(desc - head == (int)max_n_descs))
+	if (unlikely(desc == head + max_n_descs))
 		*cur_desc = NULL;
 	else
@@ -853,4 +817,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
 			cipher->para.iv_len, max_n_descs))) {
+		VC_LOG_ERR("Incorrect virtio descriptor");
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -884,4 +849,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				vc_req, head, &desc, cipher->para.src_data_len,
 				max_n_descs) < 0)) {
+			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1007,4 +973,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	if (unlikely(copy_data(iv_data, vc_req, head, &desc,
 			chain->para.iv_len, max_n_descs) < 0)) {
+		VC_LOG_ERR("Incorrect virtio descriptor");
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -1038,4 +1005,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				vc_req, head, &desc, chain->para.src_data_len,
 				max_n_descs) < 0)) {
+			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1122,4 +1090,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				chain->para.hash_result_len,
 				max_n_descs) < 0)) {
+			VC_LOG_ERR("Incorrect virtio descriptor");
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
-- 
2.34.3

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2022-06-24 15:51:10.050438195 +0100
+++ 0043-vhost-crypto-fix-build-with-GCC-12.patch	2022-06-24 15:51:08.933984249 +0100
@@ -1 +1 @@
-From 4414bb67010dfec2559af52efe8f479b26d55447 Mon Sep 17 00:00:00 2001
+From f69a61bde0e2d72021fd3c609fd4b62edc8f8951 Mon Sep 17 00:00:00 2001
@@ -8,0 +9,2 @@
+[ upstream commit 4414bb67010dfec2559af52efe8f479b26d55447 ]
+
@@ -30 +31,0 @@
-Cc: stable at dpdk.org
@@ -39 +40 @@
-index b1c0eb6a0f..96ffb82a5d 100644
+index 926b5c0bd9..293960d350 100644



More information about the stable mailing list