[dpdk-stable] patch 'vhost/crypto: fix possible dead loop' has been queued to LTS release 18.11.1

Kevin Traynor ktraynor at redhat.com
Thu Jan 31 16:48:36 CET 2019


Hi,

FYI, your patch has been queued to LTS release 18.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/07/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Kevin Traynor

---
>From 25b041012ca317971230bb563daa144aef58a0ea Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang at intel.com>
Date: Fri, 4 Jan 2019 11:22:45 +0000
Subject: [PATCH] vhost/crypto: fix possible dead loop

[ upstream commit c7e7244b82ad174a8ca51a385e6ad2eb508261d8 ]

This patch fixes a possible infinite loop caused by incorrect
descriptor chain created by the driver.

Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 lib/librte_vhost/vhost_crypto.c | 121 ++++++++++++++++++++++----------
 1 file changed, 82 insertions(+), 39 deletions(-)

diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index dd01afc08..80b83ef77 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -467,5 +467,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
 
 static __rte_always_inline struct vring_desc *
-find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+		uint32_t *nb_descs)
 {
 	if (desc->flags & VRING_DESC_F_WRITE)
@@ -473,4 +474,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
 
 	while (desc->flags & VRING_DESC_F_NEXT) {
+		if (unlikely(*nb_descs == 0))
+			return NULL;
+		(*nb_descs)--;
+
 		desc = &head[desc->next];
 		if (desc->flags & VRING_DESC_F_WRITE)
@@ -482,11 +487,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
 
 static struct virtio_crypto_inhdr *
-reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+		uint32_t *nb_descs)
 {
 	uint64_t dlen;
 	struct virtio_crypto_inhdr *inhdr;
 
-	while (desc->flags & VRING_DESC_F_NEXT)
+	while (desc->flags & VRING_DESC_F_NEXT) {
+		if (unlikely(*nb_descs == 0))
+			return NULL;
+		(*nb_descs)--;
 		desc = &vc_req->head[desc->next];
+	}
 
 	dlen = desc->len;
@@ -501,13 +511,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
 static __rte_always_inline int
 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
-		uint32_t size)
+		uint32_t size, uint32_t *nb_descs)
 {
 	struct vring_desc *desc = *cur_desc;
-	int left = size;
-
-	rte_prefetch0(&head[desc->next]);
-	left -= desc->len;
+	int left = size - desc->len;
 
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+		(*nb_descs)--;
+		if (unlikely(*nb_descs == 0))
+			return -1;
+
 		desc = &head[desc->next];
 		rte_prefetch0(&head[desc->next]);
@@ -518,5 +529,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 		return -1;
 
-	*cur_desc = &head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &head[desc->next];
 	return 0;
 }
@@ -540,5 +554,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
 static int
 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-		struct vring_desc **cur_desc, uint32_t size)
+		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
 {
 	struct vring_desc *desc = *cur_desc;
@@ -549,5 +563,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	int left = size;
 
-	rte_prefetch0(&vc_req->head[desc->next]);
 	to_copy = RTE_MIN(desc->len, (uint32_t)left);
 	dlen = to_copy;
@@ -583,4 +596,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
+		if (unlikely(*nb_descs == 0)) {
+			VC_LOG_ERR("Invalid descriptors");
+			return -1;
+		}
+		(*nb_descs)--;
+
 		desc = &vc_req->head[desc->next];
 		rte_prefetch0(&vc_req->head[desc->next]);
@@ -625,5 +644,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	}
 
-	*cur_desc = &vc_req->head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &vc_req->head[desc->next];
 
 	return 0;
@@ -685,5 +707,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint8_t *src,
 		uint32_t offset,
-		uint64_t write_back_len)
+		uint64_t write_back_len,
+		uint32_t *nb_descs)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
@@ -732,4 +755,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 
 	while (write_back_len) {
+		if (unlikely(*nb_descs == 0)) {
+			VC_LOG_ERR("Invalid descriptors");
+			goto error_exit;
+		}
+		(*nb_descs)--;
+
 		desc = &vc_req->head[desc->next];
 		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
@@ -771,5 +800,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 	}
 
-	*cur_desc = &vc_req->head[desc->next];
+	if (unlikely(*nb_descs == 0))
+		*cur_desc = NULL;
+	else
+		*cur_desc = &vc_req->head[desc->next];
 
 	*end_wb_data = wb_data;
@@ -788,5 +820,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct vhost_crypto_data_req *vc_req,
 		struct virtio_crypto_cipher_data_req *cipher,
-		struct vring_desc *cur_desc)
+		struct vring_desc *cur_desc,
+		uint32_t *nb_descs)
 {
 	struct vring_desc *desc = cur_desc;
@@ -798,6 +831,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* prepare */
 	/* iv */
-	if (unlikely(copy_data(iv_data, vc_req, &desc,
-			cipher->para.iv_len) < 0)) {
+	if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
+			nb_descs) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -819,5 +852,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.src_data_len) < 0)) {
+				cipher->para.src_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -836,6 +869,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, &desc, cipher->para.src_data_len)
-				< 0)) {
+				vc_req, &desc, cipher->para.src_data_len,
+				nb_descs) < 0)) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -848,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc);
+	desc = find_write_desc(vc_req->head, desc, nb_descs);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -867,5 +900,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.dst_data_len) < 0)) {
+				cipher->para.dst_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -878,5 +911,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
 				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
-				cipher->para.dst_data_len);
+				cipher->para.dst_data_len, nb_descs);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -920,5 +953,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct vhost_crypto_data_req *vc_req,
 		struct virtio_crypto_alg_chain_data_req *chain,
-		struct vring_desc *cur_desc)
+		struct vring_desc *cur_desc,
+		uint32_t *nb_descs)
 {
 	struct vring_desc *desc = cur_desc, *digest_desc;
@@ -933,5 +967,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* iv */
 	if (unlikely(copy_data(iv_data, vc_req, &desc,
-			chain->para.iv_len) < 0)) {
+			chain->para.iv_len, nb_descs) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -954,5 +988,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.src_data_len) < 0)) {
+				chain->para.src_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -970,5 +1004,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		}
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
-				vc_req, &desc, chain->para.src_data_len)) < 0) {
+				vc_req, &desc, chain->para.src_data_len,
+				nb_descs)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -982,5 +1017,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc);
+	desc = find_write_desc(vc_req->head, desc, nb_descs);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -1001,5 +1036,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.dst_data_len) < 0)) {
+				chain->para.dst_data_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1018,5 +1053,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.hash_result_len) < 0)) {
+				chain->para.hash_result_len, nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1030,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				chain->para.cipher_start_src_offset,
 				chain->para.dst_data_len -
-				chain->para.cipher_start_src_offset);
+				chain->para.cipher_start_src_offset, nb_descs);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1043,5 +1078,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		/** create a wb_data for digest */
 		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
-				digest_addr, 0, chain->para.hash_result_len);
+				digest_addr, 0, chain->para.hash_result_len,
+				nb_descs);
 		if (unlikely(ewb->next == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1050,5 +1086,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
-				chain->para.hash_result_len)) < 0) {
+				chain->para.hash_result_len, nb_descs)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1109,4 +1145,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	uint64_t session_id;
 	uint64_t dlen;
+	uint32_t nb_descs = vq->size;
 	int err = 0;
 
@@ -1117,4 +1154,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
 		dlen = head->len;
+		nb_descs = dlen / sizeof(struct vring_desc);
+		/* drop invalid descriptors */
+		if (unlikely(nb_descs > vq->size))
+			return -1;
 		desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
 				&dlen, VHOST_ACCESS_RO);
@@ -1139,6 +1180,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
 			req = &tmp_req;
-			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
-					< 0)) {
+			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
+					&nb_descs) < 0)) {
 				err = VIRTIO_CRYPTO_BADMSG;
 				VC_LOG_ERR("Invalid descriptor");
@@ -1153,5 +1194,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	} else {
 		if (unlikely(move_desc(vc_req->head, &desc,
-				sizeof(*req)) < 0)) {
+				sizeof(*req), &nb_descs) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			goto error_exit;
@@ -1194,9 +1235,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
 			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
-					&req->u.sym_req.u.cipher, desc);
+					&req->u.sym_req.u.cipher, desc,
+					&nb_descs);
 			break;
 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
 			err = prepare_sym_chain_op(vcrypto, op, vc_req,
-					&req->u.sym_req.u.chain, desc);
+					&req->u.sym_req.u.chain, desc,
+					&nb_descs);
 			break;
 		}
@@ -1216,5 +1259,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 error_exit:
 
-	inhdr = reach_inhdr(vc_req, desc);
+	inhdr = reach_inhdr(vc_req, desc, &nb_descs);
 	if (likely(inhdr != NULL))
 		inhdr->status = (uint8_t)err;
-- 
2.19.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-01-31 15:44:06.398900413 +0000
+++ 0028-vhost-crypto-fix-possible-dead-loop.patch	2019-01-31 15:44:05.000000000 +0000
@@ -1,13 +1,14 @@
-From c7e7244b82ad174a8ca51a385e6ad2eb508261d8 Mon Sep 17 00:00:00 2001
+From 25b041012ca317971230bb563daa144aef58a0ea Mon Sep 17 00:00:00 2001
 From: Fan Zhang <roy.fan.zhang at intel.com>
 Date: Fri, 4 Jan 2019 11:22:45 +0000
 Subject: [PATCH] vhost/crypto: fix possible dead loop
 
+[ upstream commit c7e7244b82ad174a8ca51a385e6ad2eb508261d8 ]
+
 This patch fixes a possible infinite loop caused by incorrect
 descriptor chain created by the driver.
 
 Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
-Cc: stable at dpdk.org
 
 Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
@@ -16,10 +17,10 @@
  1 file changed, 82 insertions(+), 39 deletions(-)
 
 diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
-index 598196fb7..e12458ce0 100644
+index dd01afc08..80b83ef77 100644
 --- a/lib/librte_vhost/vhost_crypto.c
 +++ b/lib/librte_vhost/vhost_crypto.c
-@@ -468,5 +468,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
+@@ -467,5 +467,6 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
  
  static __rte_always_inline struct vring_desc *
 -find_write_desc(struct vring_desc *head, struct vring_desc *desc)
@@ -27,7 +28,7 @@
 +		uint32_t *nb_descs)
  {
  	if (desc->flags & VRING_DESC_F_WRITE)
-@@ -474,4 +475,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+@@ -473,4 +474,8 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
  
  	while (desc->flags & VRING_DESC_F_NEXT) {
 +		if (unlikely(*nb_descs == 0))
@@ -36,7 +37,7 @@
 +
  		desc = &head[desc->next];
  		if (desc->flags & VRING_DESC_F_WRITE)
-@@ -483,11 +488,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+@@ -482,11 +487,16 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc)
  
  static struct virtio_crypto_inhdr *
 -reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
@@ -55,7 +56,7 @@
 +	}
  
  	dlen = desc->len;
-@@ -502,13 +512,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
+@@ -501,13 +511,14 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc)
  static __rte_always_inline int
  move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 -		uint32_t size)
@@ -75,7 +76,7 @@
 +
  		desc = &head[desc->next];
  		rte_prefetch0(&head[desc->next]);
-@@ -519,5 +530,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -518,5 +529,8 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  		return -1;
  
 -	*cur_desc = &head[desc->next];
@@ -85,20 +86,20 @@
 +		*cur_desc = &head[desc->next];
  	return 0;
  }
-@@ -541,5 +555,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+@@ -540,5 +554,5 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
  static int
  copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 -		struct vring_desc **cur_desc, uint32_t size)
 +		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
  {
  	struct vring_desc *desc = *cur_desc;
-@@ -550,5 +564,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -549,5 +563,4 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	int left = size;
  
 -	rte_prefetch0(&vc_req->head[desc->next]);
  	to_copy = RTE_MIN(desc->len, (uint32_t)left);
  	dlen = to_copy;
-@@ -584,4 +597,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -583,4 +596,10 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  
  	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
 +		if (unlikely(*nb_descs == 0)) {
@@ -109,7 +110,7 @@
 +
  		desc = &vc_req->head[desc->next];
  		rte_prefetch0(&vc_req->head[desc->next]);
-@@ -626,5 +645,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -625,5 +644,8 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	}
  
 -	*cur_desc = &vc_req->head[desc->next];
@@ -119,7 +120,7 @@
 +		*cur_desc = &vc_req->head[desc->next];
  
  	return 0;
-@@ -686,5 +708,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -685,5 +707,6 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  		uint8_t *src,
  		uint32_t offset,
 -		uint64_t write_back_len)
@@ -127,7 +128,7 @@
 +		uint32_t *nb_descs)
  {
  	struct vhost_crypto_writeback_data *wb_data, *head;
-@@ -733,4 +756,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -732,4 +755,10 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  
  	while (write_back_len) {
 +		if (unlikely(*nb_descs == 0)) {
@@ -138,7 +139,7 @@
 +
  		desc = &vc_req->head[desc->next];
  		if (unlikely(!(desc->flags & VRING_DESC_F_WRITE))) {
-@@ -772,5 +801,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -771,5 +800,8 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  	}
  
 -	*cur_desc = &vc_req->head[desc->next];
@@ -148,7 +149,7 @@
 +		*cur_desc = &vc_req->head[desc->next];
  
  	*end_wb_data = wb_data;
-@@ -789,5 +821,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -788,5 +820,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct vhost_crypto_data_req *vc_req,
  		struct virtio_crypto_cipher_data_req *cipher,
 -		struct vring_desc *cur_desc)
@@ -156,7 +157,7 @@
 +		uint32_t *nb_descs)
  {
  	struct vring_desc *desc = cur_desc;
-@@ -799,6 +832,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -798,6 +831,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* prepare */
  	/* iv */
 -	if (unlikely(copy_data(iv_data, vc_req, &desc,
@@ -165,14 +166,14 @@
 +			nb_descs) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -820,5 +853,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -819,5 +852,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.src_data_len) < 0)) {
 +				cipher->para.src_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -837,6 +870,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -836,6 +869,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 -				vc_req, &desc, cipher->para.src_data_len)
@@ -181,28 +182,28 @@
 +				nb_descs) < 0)) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -849,5 +882,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -848,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -868,5 +901,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -867,5 +900,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.dst_data_len) < 0)) {
 +				cipher->para.dst_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -879,5 +912,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -878,5 +911,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
  				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
 -				cipher->para.dst_data_len);
 +				cipher->para.dst_data_len, nb_descs);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -921,5 +954,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -920,5 +953,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct vhost_crypto_data_req *vc_req,
  		struct virtio_crypto_alg_chain_data_req *chain,
 -		struct vring_desc *cur_desc)
@@ -210,21 +211,21 @@
 +		uint32_t *nb_descs)
  {
  	struct vring_desc *desc = cur_desc, *digest_desc;
-@@ -934,5 +968,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -933,5 +967,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* iv */
  	if (unlikely(copy_data(iv_data, vc_req, &desc,
 -			chain->para.iv_len) < 0)) {
 +			chain->para.iv_len, nb_descs) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -955,5 +989,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -954,5 +988,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.src_data_len) < 0)) {
 +				chain->para.src_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -971,5 +1005,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -970,5 +1004,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		}
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 -				vc_req, &desc, chain->para.src_data_len)) < 0) {
@@ -232,35 +233,35 @@
 +				nb_descs)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -983,5 +1018,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -982,5 +1017,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -1002,5 +1037,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1001,5 +1036,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.dst_data_len) < 0)) {
 +				chain->para.dst_data_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1019,5 +1054,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1018,5 +1053,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.hash_result_len) < 0)) {
 +				chain->para.hash_result_len, nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1031,5 +1066,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1030,5 +1065,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  				chain->para.cipher_start_src_offset,
  				chain->para.dst_data_len -
 -				chain->para.cipher_start_src_offset);
 +				chain->para.cipher_start_src_offset, nb_descs);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1044,5 +1079,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1043,5 +1078,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		/** create a wb_data for digest */
  		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
 -				digest_addr, 0, chain->para.hash_result_len);
@@ -268,20 +269,20 @@
 +				nb_descs);
  		if (unlikely(ewb->next == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1051,5 +1087,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1050,5 +1086,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
 -				chain->para.hash_result_len)) < 0) {
 +				chain->para.hash_result_len, nb_descs)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -1110,4 +1146,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1109,4 +1145,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	uint64_t session_id;
  	uint64_t dlen;
 +	uint32_t nb_descs = vq->size;
  	int err = 0;
  
-@@ -1118,4 +1155,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1117,4 +1154,8 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
  		dlen = head->len;
 +		nb_descs = dlen / sizeof(struct vring_desc);
@@ -290,7 +291,7 @@
 +			return -1;
  		desc = IOVA_TO_VVA(struct vring_desc *, vc_req, head->addr,
  				&dlen, VHOST_ACCESS_RO);
-@@ -1140,6 +1181,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1139,6 +1180,6 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  		case RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE:
  			req = &tmp_req;
 -			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req))
@@ -299,14 +300,14 @@
 +					&nb_descs) < 0)) {
  				err = VIRTIO_CRYPTO_BADMSG;
  				VC_LOG_ERR("Invalid descriptor");
-@@ -1154,5 +1195,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1153,5 +1194,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	} else {
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				sizeof(*req)) < 0)) {
 +				sizeof(*req), &nb_descs) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			goto error_exit;
-@@ -1195,9 +1236,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1194,9 +1235,11 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
  			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
 -					&req->u.sym_req.u.cipher, desc);
@@ -320,7 +321,7 @@
 +					&nb_descs);
  			break;
  		}
-@@ -1217,5 +1260,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1216,5 +1259,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  error_exit:
  
 -	inhdr = reach_inhdr(vc_req, desc);


More information about the stable mailing list