[dpdk-stable] patch 'vhost/crypto: fix possible out of bound access' has been queued to LTS release 18.11.1

Kevin Traynor ktraynor at redhat.com
Thu Jan 31 16:48:37 CET 2019


Hi,

FYI, your patch has been queued to LTS release 18.11.1

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 02/07/19. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Kevin Traynor

---
>From fd9dd27602a6494663ace53f0d52122ed9f0de1c Mon Sep 17 00:00:00 2001
From: Fan Zhang <roy.fan.zhang at intel.com>
Date: Fri, 4 Jan 2019 11:22:46 +0000
Subject: [PATCH] vhost/crypto: fix possible out of bound access

[ upstream commit 16d2e718b8ce7b775cd9118e6256dbad081433c3 ]

This patch fixes a out of bound access possbility in vhost
crypto. Originally the incorrect next descriptor index may
cause the library read invalid memory content and crash
the application.

Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
---
 lib/librte_vhost/vhost_crypto.c | 89 ++++++++++++++++++++-------------
 1 file changed, 53 insertions(+), 36 deletions(-)

diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 80b83ef77..0694c0a74 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -468,5 +468,5 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
 static __rte_always_inline struct vring_desc *
 find_write_desc(struct vring_desc *head, struct vring_desc *desc,
-		uint32_t *nb_descs)
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	if (desc->flags & VRING_DESC_F_WRITE)
@@ -474,5 +474,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
 
 	while (desc->flags & VRING_DESC_F_NEXT) {
-		if (unlikely(*nb_descs == 0))
+		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
 			return NULL;
 		(*nb_descs)--;
@@ -488,5 +488,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
 static struct virtio_crypto_inhdr *
 reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
-		uint32_t *nb_descs)
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	uint64_t dlen;
@@ -494,5 +494,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
 
 	while (desc->flags & VRING_DESC_F_NEXT) {
-		if (unlikely(*nb_descs == 0))
+		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
 			return NULL;
 		(*nb_descs)--;
@@ -511,5 +511,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
 static __rte_always_inline int
 move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
-		uint32_t size, uint32_t *nb_descs)
+		uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
 {
 	struct vring_desc *desc = *cur_desc;
@@ -518,5 +518,5 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
 		(*nb_descs)--;
-		if (unlikely(*nb_descs == 0))
+		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
 			return -1;
 
@@ -531,6 +531,10 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 	if (unlikely(*nb_descs == 0))
 		*cur_desc = NULL;
-	else
+	else {
+		if (unlikely(desc->next >= vq_size))
+			return -1;
 		*cur_desc = &head[desc->next];
+	}
+
 	return 0;
 }
@@ -554,5 +558,6 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
 static int
 copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
-		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
+		struct vring_desc **cur_desc, uint32_t size,
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	struct vring_desc *desc = *cur_desc;
@@ -596,5 +601,5 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 
 	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
-		if (unlikely(*nb_descs == 0)) {
+		if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
 			VC_LOG_ERR("Invalid descriptors");
 			return -1;
@@ -646,6 +651,9 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 	if (unlikely(*nb_descs == 0))
 		*cur_desc = NULL;
-	else
+	else {
+		if (unlikely(desc->next >= vq_size))
+			return -1;
 		*cur_desc = &vc_req->head[desc->next];
+	}
 
 	return 0;
@@ -658,5 +666,4 @@ write_back_data(struct vhost_crypto_data_req *vc_req)
 
 	while (wb_data) {
-		rte_prefetch0(wb_data->next);
 		rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
 		wb_last = wb_data;
@@ -708,5 +715,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 		uint32_t offset,
 		uint64_t write_back_len,
-		uint32_t *nb_descs)
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	struct vhost_crypto_writeback_data *wb_data, *head;
@@ -755,5 +762,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 
 	while (write_back_len) {
-		if (unlikely(*nb_descs == 0)) {
+		if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
 			VC_LOG_ERR("Invalid descriptors");
 			goto error_exit;
@@ -802,6 +809,9 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
 	if (unlikely(*nb_descs == 0))
 		*cur_desc = NULL;
-	else
+	else {
+		if (unlikely(desc->next >= vq_size))
+			goto error_exit;
 		*cur_desc = &vc_req->head[desc->next];
+	}
 
 	*end_wb_data = wb_data;
@@ -821,5 +831,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_cipher_data_req *cipher,
 		struct vring_desc *cur_desc,
-		uint32_t *nb_descs)
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	struct vring_desc *desc = cur_desc;
@@ -832,5 +842,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* iv */
 	if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
-			nb_descs) < 0)) {
+			nb_descs, vq_size) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -852,5 +862,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.src_data_len, nb_descs) < 0)) {
+				cipher->para.src_data_len, nb_descs,
+				vq_size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -870,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 				vc_req, &desc, cipher->para.src_data_len,
-				nb_descs) < 0)) {
+				nb_descs, vq_size) < 0)) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -881,5 +892,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc, nb_descs);
+	desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -900,5 +911,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				cipher->para.dst_data_len, nb_descs) < 0)) {
+				cipher->para.dst_data_len,
+				nb_descs, vq_size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -911,5 +923,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
 				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
-				cipher->para.dst_data_len, nb_descs);
+				cipher->para.dst_data_len, nb_descs, vq_size);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -954,5 +966,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		struct virtio_crypto_alg_chain_data_req *chain,
 		struct vring_desc *cur_desc,
-		uint32_t *nb_descs)
+		uint32_t *nb_descs, uint32_t vq_size)
 {
 	struct vring_desc *desc = cur_desc, *digest_desc;
@@ -967,5 +979,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 	/* iv */
 	if (unlikely(copy_data(iv_data, vc_req, &desc,
-			chain->para.iv_len, nb_descs) < 0)) {
+			chain->para.iv_len, nb_descs, vq_size) < 0)) {
 		ret = VIRTIO_CRYPTO_BADMSG;
 		goto error_exit;
@@ -988,5 +1000,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.src_data_len, nb_descs) < 0)) {
+				chain->para.src_data_len,
+				nb_descs, vq_size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1005,5 +1018,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
 				vc_req, &desc, chain->para.src_data_len,
-				nb_descs)) < 0) {
+				nb_descs, vq_size)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1017,5 +1030,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 	/* dst */
-	desc = find_write_desc(vc_req->head, desc, nb_descs);
+	desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
 	if (unlikely(!desc)) {
 		VC_LOG_ERR("Cannot find write location");
@@ -1036,5 +1049,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.dst_data_len, nb_descs) < 0)) {
+				chain->para.dst_data_len,
+				nb_descs, vq_size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1053,5 +1067,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(move_desc(vc_req->head, &desc,
-				chain->para.hash_result_len, nb_descs) < 0)) {
+				chain->para.hash_result_len,
+				nb_descs, vq_size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1065,5 +1080,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 				chain->para.cipher_start_src_offset,
 				chain->para.dst_data_len -
-				chain->para.cipher_start_src_offset, nb_descs);
+				chain->para.cipher_start_src_offset,
+				nb_descs, vq_size);
 		if (unlikely(vc_req->wb == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1079,5 +1095,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
 				digest_addr, 0, chain->para.hash_result_len,
-				nb_descs);
+				nb_descs, vq_size);
 		if (unlikely(ewb->next == NULL)) {
 			ret = VIRTIO_CRYPTO_ERR;
@@ -1086,5 +1102,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
 
 		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
-				chain->para.hash_result_len, nb_descs)) < 0) {
+				chain->para.hash_result_len,
+				nb_descs, vq_size)) < 0) {
 			ret = VIRTIO_CRYPTO_BADMSG;
 			goto error_exit;
@@ -1181,5 +1198,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 			req = &tmp_req;
 			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
-					&nb_descs) < 0)) {
+					&nb_descs, vq->size) < 0)) {
 				err = VIRTIO_CRYPTO_BADMSG;
 				VC_LOG_ERR("Invalid descriptor");
@@ -1194,5 +1211,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 	} else {
 		if (unlikely(move_desc(vc_req->head, &desc,
-				sizeof(*req), &nb_descs) < 0)) {
+				sizeof(*req), &nb_descs, vq->size) < 0)) {
 			VC_LOG_ERR("Incorrect descriptor");
 			goto error_exit;
@@ -1236,10 +1253,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
 					&req->u.sym_req.u.cipher, desc,
-					&nb_descs);
+					&nb_descs, vq->size);
 			break;
 		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
 			err = prepare_sym_chain_op(vcrypto, op, vc_req,
 					&req->u.sym_req.u.chain, desc,
-					&nb_descs);
+					&nb_descs, vq->size);
 			break;
 		}
@@ -1259,5 +1276,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
 error_exit:
 
-	inhdr = reach_inhdr(vc_req, desc, &nb_descs);
+	inhdr = reach_inhdr(vc_req, desc, &nb_descs, vq->size);
 	if (likely(inhdr != NULL))
 		inhdr->status = (uint8_t)err;
-- 
2.19.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2019-01-31 15:44:06.421740311 +0000
+++ 0029-vhost-crypto-fix-possible-out-of-bound-access.patch	2019-01-31 15:44:05.000000000 +0000
@@ -1,15 +1,16 @@
-From 16d2e718b8ce7b775cd9118e6256dbad081433c3 Mon Sep 17 00:00:00 2001
+From fd9dd27602a6494663ace53f0d52122ed9f0de1c Mon Sep 17 00:00:00 2001
 From: Fan Zhang <roy.fan.zhang at intel.com>
 Date: Fri, 4 Jan 2019 11:22:46 +0000
 Subject: [PATCH] vhost/crypto: fix possible out of bound access
 
+[ upstream commit 16d2e718b8ce7b775cd9118e6256dbad081433c3 ]
+
 This patch fixes a out of bound access possbility in vhost
 crypto. Originally the incorrect next descriptor index may
 cause the library read invalid memory content and crash
 the application.
 
 Fixes: 3bb595ecd682 ("vhost/crypto: add request handler")
-Cc: stable at dpdk.org
 
 Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
 Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
@@ -18,52 +19,52 @@
  1 file changed, 53 insertions(+), 36 deletions(-)
 
 diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
-index e12458ce0..ebf26f030 100644
+index 80b83ef77..0694c0a74 100644
 --- a/lib/librte_vhost/vhost_crypto.c
 +++ b/lib/librte_vhost/vhost_crypto.c
-@@ -469,5 +469,5 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
+@@ -468,5 +468,5 @@ vhost_crypto_msg_post_handler(int vid, void *msg)
  static __rte_always_inline struct vring_desc *
  find_write_desc(struct vring_desc *head, struct vring_desc *desc,
 -		uint32_t *nb_descs)
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	if (desc->flags & VRING_DESC_F_WRITE)
-@@ -475,5 +475,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+@@ -474,5 +474,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
  
  	while (desc->flags & VRING_DESC_F_NEXT) {
 -		if (unlikely(*nb_descs == 0))
 +		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
  			return NULL;
  		(*nb_descs)--;
-@@ -489,5 +489,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
+@@ -488,5 +488,5 @@ find_write_desc(struct vring_desc *head, struct vring_desc *desc,
  static struct virtio_crypto_inhdr *
  reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
 -		uint32_t *nb_descs)
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	uint64_t dlen;
-@@ -495,5 +495,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+@@ -494,5 +494,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
  
  	while (desc->flags & VRING_DESC_F_NEXT) {
 -		if (unlikely(*nb_descs == 0))
 +		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
  			return NULL;
  		(*nb_descs)--;
-@@ -512,5 +512,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
+@@ -511,5 +511,5 @@ reach_inhdr(struct vhost_crypto_data_req *vc_req, struct vring_desc *desc,
  static __rte_always_inline int
  move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
 -		uint32_t size, uint32_t *nb_descs)
 +		uint32_t size, uint32_t *nb_descs, uint32_t vq_size)
  {
  	struct vring_desc *desc = *cur_desc;
-@@ -519,5 +519,5 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -518,5 +518,5 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
  		(*nb_descs)--;
 -		if (unlikely(*nb_descs == 0))
 +		if (unlikely(*nb_descs == 0 || desc->next >= vq_size))
  			return -1;
  
-@@ -532,6 +532,10 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
+@@ -531,6 +531,10 @@ move_desc(struct vring_desc *head, struct vring_desc **cur_desc,
  	if (unlikely(*nb_descs == 0))
  		*cur_desc = NULL;
 -	else
@@ -75,7 +76,7 @@
 +
  	return 0;
  }
-@@ -555,5 +559,6 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
+@@ -554,5 +558,6 @@ get_data_ptr(struct vhost_crypto_data_req *vc_req, struct vring_desc *cur_desc,
  static int
  copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
 -		struct vring_desc **cur_desc, uint32_t size, uint32_t *nb_descs)
@@ -83,14 +84,14 @@
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	struct vring_desc *desc = *cur_desc;
-@@ -597,5 +602,5 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -596,5 +601,5 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  
  	while ((desc->flags & VRING_DESC_F_NEXT) && left > 0) {
 -		if (unlikely(*nb_descs == 0)) {
 +		if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
  			VC_LOG_ERR("Invalid descriptors");
  			return -1;
-@@ -647,6 +652,9 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
+@@ -646,6 +651,9 @@ copy_data(void *dst_data, struct vhost_crypto_data_req *vc_req,
  	if (unlikely(*nb_descs == 0))
  		*cur_desc = NULL;
 -	else
@@ -101,27 +102,27 @@
 +	}
  
  	return 0;
-@@ -659,5 +667,4 @@ write_back_data(struct vhost_crypto_data_req *vc_req)
+@@ -658,5 +666,4 @@ write_back_data(struct vhost_crypto_data_req *vc_req)
  
  	while (wb_data) {
 -		rte_prefetch0(wb_data->next);
  		rte_memcpy(wb_data->dst, wb_data->src, wb_data->len);
  		wb_last = wb_data;
-@@ -709,5 +716,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -708,5 +715,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  		uint32_t offset,
  		uint64_t write_back_len,
 -		uint32_t *nb_descs)
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	struct vhost_crypto_writeback_data *wb_data, *head;
-@@ -756,5 +763,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -755,5 +762,5 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  
  	while (write_back_len) {
 -		if (unlikely(*nb_descs == 0)) {
 +		if (unlikely(*nb_descs == 0 || desc->next >= vq_size)) {
  			VC_LOG_ERR("Invalid descriptors");
  			goto error_exit;
-@@ -803,6 +810,9 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
+@@ -802,6 +809,9 @@ prepare_write_back_data(struct vhost_crypto_data_req *vc_req,
  	if (unlikely(*nb_descs == 0))
  		*cur_desc = NULL;
 -	else
@@ -132,21 +133,21 @@
 +	}
  
  	*end_wb_data = wb_data;
-@@ -822,5 +832,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -821,5 +831,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct virtio_crypto_cipher_data_req *cipher,
  		struct vring_desc *cur_desc,
 -		uint32_t *nb_descs)
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	struct vring_desc *desc = cur_desc;
-@@ -833,5 +843,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -832,5 +842,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* iv */
  	if (unlikely(copy_data(iv_data, vc_req, &desc, cipher->para.iv_len,
 -			nb_descs) < 0)) {
 +			nb_descs, vq_size) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -853,5 +863,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -852,5 +862,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.src_data_len, nb_descs) < 0)) {
@@ -154,21 +155,21 @@
 +				vq_size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -871,5 +882,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -870,5 +881,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
  				vc_req, &desc, cipher->para.src_data_len,
 -				nb_descs) < 0)) {
 +				nb_descs, vq_size) < 0)) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -882,5 +893,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -881,5 +892,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc, nb_descs);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -901,5 +912,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -900,5 +911,6 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				cipher->para.dst_data_len, nb_descs) < 0)) {
@@ -176,28 +177,28 @@
 +				nb_descs, vq_size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -912,5 +924,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -911,5 +923,5 @@ prepare_sym_cipher_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		vc_req->wb = prepare_write_back_data(vc_req, &desc, &ewb,
  				rte_pktmbuf_mtod(m_src, uint8_t *), 0,
 -				cipher->para.dst_data_len, nb_descs);
 +				cipher->para.dst_data_len, nb_descs, vq_size);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -955,5 +967,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -954,5 +966,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		struct virtio_crypto_alg_chain_data_req *chain,
  		struct vring_desc *cur_desc,
 -		uint32_t *nb_descs)
 +		uint32_t *nb_descs, uint32_t vq_size)
  {
  	struct vring_desc *desc = cur_desc, *digest_desc;
-@@ -968,5 +980,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -967,5 +979,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  	/* iv */
  	if (unlikely(copy_data(iv_data, vc_req, &desc,
 -			chain->para.iv_len, nb_descs) < 0)) {
 +			chain->para.iv_len, nb_descs, vq_size) < 0)) {
  		ret = VIRTIO_CRYPTO_BADMSG;
  		goto error_exit;
-@@ -989,5 +1001,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -988,5 +1000,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.src_data_len, nb_descs) < 0)) {
@@ -205,21 +206,21 @@
 +				nb_descs, vq_size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1006,5 +1019,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1005,5 +1018,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		if (unlikely(copy_data(rte_pktmbuf_mtod(m_src, uint8_t *),
  				vc_req, &desc, chain->para.src_data_len,
 -				nb_descs)) < 0) {
 +				nb_descs, vq_size)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -1018,5 +1031,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1017,5 +1030,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  	/* dst */
 -	desc = find_write_desc(vc_req->head, desc, nb_descs);
 +	desc = find_write_desc(vc_req->head, desc, nb_descs, vq_size);
  	if (unlikely(!desc)) {
  		VC_LOG_ERR("Cannot find write location");
-@@ -1037,5 +1050,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1036,5 +1049,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.dst_data_len, nb_descs) < 0)) {
@@ -227,7 +228,7 @@
 +				nb_descs, vq_size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1054,5 +1068,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1053,5 +1067,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				chain->para.hash_result_len, nb_descs) < 0)) {
@@ -235,7 +236,7 @@
 +				nb_descs, vq_size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1066,5 +1081,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1065,5 +1080,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  				chain->para.cipher_start_src_offset,
  				chain->para.dst_data_len -
 -				chain->para.cipher_start_src_offset, nb_descs);
@@ -243,14 +244,14 @@
 +				nb_descs, vq_size);
  		if (unlikely(vc_req->wb == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1080,5 +1096,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1079,5 +1095,5 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  		ewb->next = prepare_write_back_data(vc_req, &desc, &ewb2,
  				digest_addr, 0, chain->para.hash_result_len,
 -				nb_descs);
 +				nb_descs, vq_size);
  		if (unlikely(ewb->next == NULL)) {
  			ret = VIRTIO_CRYPTO_ERR;
-@@ -1087,5 +1103,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+@@ -1086,5 +1102,6 @@ prepare_sym_chain_op(struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
  
  		if (unlikely(copy_data(digest_addr, vc_req, &digest_desc,
 -				chain->para.hash_result_len, nb_descs)) < 0) {
@@ -258,21 +259,21 @@
 +				nb_descs, vq_size)) < 0) {
  			ret = VIRTIO_CRYPTO_BADMSG;
  			goto error_exit;
-@@ -1182,5 +1199,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1181,5 +1198,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  			req = &tmp_req;
  			if (unlikely(copy_data(req, vc_req, &desc, sizeof(*req),
 -					&nb_descs) < 0)) {
 +					&nb_descs, vq->size) < 0)) {
  				err = VIRTIO_CRYPTO_BADMSG;
  				VC_LOG_ERR("Invalid descriptor");
-@@ -1195,5 +1212,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1194,5 +1211,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  	} else {
  		if (unlikely(move_desc(vc_req->head, &desc,
 -				sizeof(*req), &nb_descs) < 0)) {
 +				sizeof(*req), &nb_descs, vq->size) < 0)) {
  			VC_LOG_ERR("Incorrect descriptor");
  			goto error_exit;
-@@ -1237,10 +1254,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1236,10 +1253,10 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  			err = prepare_sym_cipher_op(vcrypto, op, vc_req,
  					&req->u.sym_req.u.cipher, desc,
 -					&nb_descs);
@@ -285,7 +286,7 @@
 +					&nb_descs, vq->size);
  			break;
  		}
-@@ -1260,5 +1277,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
+@@ -1259,5 +1276,5 @@ vhost_crypto_process_one_req(struct vhost_crypto *vcrypto,
  error_exit:
  
 -	inhdr = reach_inhdr(vc_req, desc, &nb_descs);


More information about the stable mailing list