[dpdk-dev] [PATCH 05/12] lib/librte_vhost: add request handler

Fan Zhang roy.fan.zhang at intel.com
Mon Nov 27 21:01:08 CET 2017


This patch adds the implementation that parses virtio crypto request
to dpdk crypto operation

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
---
 lib/librte_vhost/vhost_crypto.c | 688 ++++++++++++++++++++++++++++++++++++++++
 1 file changed, 688 insertions(+)

diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 271083a..d12a449 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -218,6 +218,28 @@ struct vhost_crypto {
 	int socket_id;
 };
 
+struct vhost_crypto_data_req {
+	struct rte_mbuf m_src;
+	struct rte_mbuf m_dst;
+
+	struct vring_desc *descs;
+	struct vring_desc *wb_desc;
+
+	struct rte_vhost_memory *mem;
+
+	uint8_t *src_data;
+	uint8_t *dst_data;
+	uint8_t *hash_result;
+
+	struct virtio_crypto_inhdr *inhdr;
+
+	uint16_t desc_idx;
+	uint32_t len;
+	struct vhost_virtqueue *vq;
+
+	int vid;
+};
+
 static int
 transform_cipher_param(struct rte_crypto_sym_xform *xform,
 		VhostUserCryptoSessionParam *param) {
@@ -445,3 +467,669 @@ vhost_crypto_msg_handler(struct virtio_net *dev, struct VhostUserMsg *msg,
 
 	return ret;
 }
+
+static __rte_always_inline uint32_t
+get_head_desc(struct vring_desc **head_desc, struct rte_vhost_vring *vr,
+		struct rte_vhost_memory *mem, uint32_t used_idx)
+{
+	uint16_t idx = vr->avail->ring[used_idx];
+	struct vring_desc *desc = &vr->desc[idx];
+
+	if (desc->flags & VRING_DESC_F_INDIRECT) {
+		*head_desc = GPA_TO_VVA(struct vring_desc *,
+				mem, desc->addr);
+		return 0;
+	}
+
+	*head_desc = desc;
+	return used_idx;
+}
+
+static __rte_always_inline struct vring_desc *
+find_write_desc(struct vring_desc *head, struct vring_desc *desc)
+{
+	if (desc->flags & VRING_DESC_F_WRITE)
+		return desc;
+
+	while (desc->flags & VRING_DESC_F_NEXT) {
+		desc = &head[desc->next];
+		if (desc->flags & VRING_DESC_F_WRITE)
+			return desc;
+	}
+
+	return NULL;
+}
+
+static struct virtio_crypto_inhdr *
+reach_inhdr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc *desc, uint32_t off)
+{
+	if (!(desc->flags & VRING_DESC_F_NEXT) && desc->len - off >= INHDR_LEN)
+		return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem,
+				desc->addr + desc->len - INHDR_LEN);
+
+	while (desc->flags & VRING_DESC_F_NEXT)
+		desc = &head[desc->next];
+
+	return GPA_TO_VVA(struct virtio_crypto_inhdr *, mem,
+				desc->addr + desc->len - INHDR_LEN);
+}
+
+static __rte_always_inline int
+move_desc(struct vring_desc *head, struct vring_desc **cur_desc, uint32_t *off,
+		uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint32_t offset = *off;
+	uint32_t left = size;
+	uint32_t to_move;
+
+	rte_prefetch0(&head[desc->next]);
+	to_move = RTE_MIN(desc->len - offset, left);
+	if (unlikely(to_move))
+		left -= to_move;
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &head[desc->next];
+		rte_prefetch0(&head[desc->next]);
+		to_move = RTE_MIN(desc->len, left);
+		left -= to_move;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small");
+		return -1;
+	}
+
+	if (likely(to_move == desc->len && (desc->flags & VRING_DESC_F_NEXT))) {
+		*cur_desc = &head[desc->next];
+		*off = 0;
+	} else {
+		*cur_desc = desc;
+		*off = to_move;
+	}
+
+	return 0;
+}
+
+static int
+copy_data(void *dst_data, struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint32_t left = size;
+	uint32_t to_copy;
+	uint8_t *data = dst_data;
+	uint8_t *src;
+
+	rte_prefetch0(&head[desc->next]);
+	to_copy = RTE_MIN(desc->len - *off, left);
+	src = GPA_TO_VVA(uint8_t *, mem, desc->addr + *off);
+	rte_memcpy((uint8_t *)data, src, to_copy);
+	left -= to_copy;
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &head[desc->next];
+		rte_prefetch0(&head[desc->next]);
+		to_copy = RTE_MIN(desc->len, left);
+		src = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+		rte_memcpy(data + size - left, src, to_copy);
+		left -= to_copy;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small, expect %uB, short %uB", size,
+				left);
+		return -1;
+	}
+
+	if (likely(to_copy == desc->len && (desc->flags & VRING_DESC_F_NEXT))) {
+		*cur_desc = &head[desc->next];
+		*off = 0;
+	} else {
+		*cur_desc = desc;
+		*off = to_copy;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void *
+get_data_ptr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	struct vring_desc *desc = *cur_desc;
+	uint8_t *data;
+
+	data = GPA_TO_VVA(void *, mem, desc->addr + *off);
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Failed to get object");
+		return NULL;
+	}
+
+	if (unlikely(move_desc(head, cur_desc, off, size) < 0))
+		return NULL;
+
+	return data;
+}
+
+#ifdef RTE_LIBRTE_VHOST_CRYPTO_DATA_QUEUE_NO_COPY
+
+static __rte_always_inline int
+write_data(__rte_unused struct vhost_crypto_data_req *vc_req)
+{
+	return 0;
+}
+
+static __rte_always_inline void *
+get_rd_data(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	return get_data_ptr(head, mem, cur_desc, off, size);
+}
+
+static __rte_always_inline void *
+get_wb_ptr(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	return get_data_ptr(head, mem, cur_desc, off, size);
+}
+
+static __rte_always_inline void *
+free_data(__rte_unused void *data)
+{
+	return NULL;
+}
+
+#else
+
+static int
+write_data(struct vhost_crypto_data_req *vc_req)
+{
+	struct vring_desc *descs = vc_req->descs;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	struct vring_desc *desc = vc_req->wb_desc;
+	uint32_t left = vc_req->m_dst.data_len;
+	uint32_t to_write;
+	uint8_t *src_data = vc_req->m_dst.buf_addr;
+	uint8_t *dst;
+
+	rte_prefetch0(&descs[desc->next]);
+	to_write = RTE_MIN(desc->len, left);
+	dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+	rte_memcpy(dst, src_data, to_write);
+	left -= to_write;
+	src_data += to_write;
+
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+	printf("desc addr %llu len %u:", desc->addr, desc->len);
+	rte_hexdump(stdout, "", dst, to_write);
+#endif
+
+	while ((desc->flags & VRING_DESC_F_NEXT) && left) {
+		desc = &descs[desc->next];
+		rte_prefetch0(&descs[desc->next]);
+		to_write = RTE_MIN(desc->len, left);
+		dst = GPA_TO_VVA(uint8_t *, mem, desc->addr);
+		rte_memcpy(dst, src_data, to_write);
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+		printf("desc addr %llu len %u:", desc->addr, desc->len);
+		rte_hexdump(stdout, "", dst, to_write);
+#endif
+		left -= to_write;
+		src_data += to_write;
+	}
+
+	if (unlikely(left)) {
+		VC_LOG_ERR("Virtq is too small, expect %uB, short %uB",
+				vc_req->m_dst.buf_len, left);
+		return -1;
+	}
+
+	return 0;
+}
+
+static __rte_always_inline void *
+get_rd_data(struct vring_desc *head, struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	void *data = rte_malloc_socket(NULL, size, RTE_CACHE_LINE_SIZE,
+			rte_socket_id());
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Insufficient memory");
+		return NULL;
+	}
+
+	if (unlikely(copy_data(data, head, mem, cur_desc, off, size) < 0)) {
+		rte_free(data);
+		return NULL;
+	}
+
+	return (void *)data;
+}
+
+static __rte_always_inline void *
+get_wb_ptr(struct vring_desc *head,
+		__rte_unused struct rte_vhost_memory *mem,
+		struct vring_desc **cur_desc, uint32_t *off, uint32_t size)
+{
+	uint8_t *data;
+
+	if (unlikely(move_desc(head, cur_desc, off, size) < 0))
+		return NULL;
+
+	data = rte_malloc_socket(NULL, size, RTE_CACHE_LINE_SIZE,
+			rte_socket_id());
+	if (unlikely(!data)) {
+		VC_LOG_ERR("Insufficient memory");
+		return NULL;
+	}
+
+	return data;
+}
+
+static __rte_always_inline void *
+free_data(void *data)
+{
+	rte_free(data);
+	return NULL;
+}
+
+#endif /* RTE_LIBRTE_VHOST_CRYPTO_DATA_QUEUE_NO_COPY */
+
+typedef uint32_t (*prepare_sym_op_t)(struct rte_crypto_op *,
+		struct vhost_crypto_data_req *,
+		struct virtio_crypto_sym_data_req *,
+		struct vring_desc *, uint32_t, uint8_t *);
+
+static uint32_t
+prepare_not_support_op(__rte_unused struct rte_crypto_op *op,
+		__rte_unused struct vhost_crypto_data_req *vc_req,
+		__rte_unused struct virtio_crypto_sym_data_req *sym_req,
+		__rte_unused struct vring_desc *rd_desc,
+		__rte_unused uint32_t rd_offset,
+		uint8_t *retval)
+{
+	*retval = VIRTIO_CRYPTO_NOTSUPP;
+	return INHDR_LEN;
+}
+
+static uint32_t
+prepare_sym_cipher_op(struct rte_crypto_op *op,
+		struct vhost_crypto_data_req *vc_req,
+		struct virtio_crypto_sym_data_req *sym_req,
+		struct vring_desc *cur_desc, uint32_t cur_offset,
+		uint8_t *retval)
+{
+	struct virtio_crypto_cipher_data_req *cipher = &sym_req->u.cipher;
+	struct vring_desc *descs = vc_req->descs;
+	struct vring_desc *desc = cur_desc;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	uint32_t offset = cur_offset;
+	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+	uint8_t ret = 0;
+
+	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+	op->sym->cipher.data.length = cipher->para.src_data_len;
+
+	/* prepare */
+	/* iv */
+	if (unlikely(copy_data(iv_data, descs, mem, &desc, &offset,
+			cipher->para.iv_len) < 0)) {
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+	rte_hexdump(stdout, "IV:", iv_data, cipher->para.iv_len);
+#endif
+
+	/* src */
+	vc_req->src_data = get_rd_data(descs, mem, &desc, &offset,
+			cipher->para.src_data_len);
+	if (unlikely(!vc_req->src_data)) {
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+#ifdef RTE_LIBRTE_VHOST_DEBUG
+	rte_hexdump(stdout, "SRC:", vc_req->src_data, cipher->para.src_data_len);
+#endif
+	/* dst */
+	desc = find_write_desc(descs, desc);
+	if (unlikely(!desc)) {
+		VC_LOG_ERR("Cannot find write location");
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	vc_req->wb_desc = desc;
+	offset = 0;
+
+	vc_req->dst_data = get_wb_ptr(descs, mem, &desc, &offset,
+			cipher->para.dst_data_len);
+	if (unlikely(!vc_req->dst_data)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* record inhdr */
+	vc_req->inhdr = get_data_ptr(descs, mem, &desc, &offset, INHDR_LEN);
+
+	/* src data */
+	vc_req->m_src.buf_addr = (void *)vc_req->src_data;
+	vc_req->m_src.buf_physaddr = rte_mem_virt2phy(vc_req->src_data);
+	vc_req->m_src.data_off = 0;
+	vc_req->m_src.data_len = cipher->para.src_data_len;
+	op->sym->m_src = &vc_req->m_src;
+	op->sym->cipher.data.offset = 0;
+
+	/* dst data */
+	vc_req->m_dst.buf_addr = (void *)(vc_req->dst_data);
+	vc_req->m_dst.buf_physaddr = rte_mem_virt2phy(vc_req->m_dst.buf_addr);
+	vc_req->m_dst.data_off = 0;
+	vc_req->m_dst.data_len = cipher->para.dst_data_len;
+	op->sym->m_dst = &vc_req->m_dst;
+
+	*retval = 0;
+	return cipher->para.dst_data_len + INHDR_LEN;
+
+error_exit:
+	*retval = ret;
+	return INHDR_LEN;
+}
+
+static uint32_t
+prepare_sym_chain_op(struct rte_crypto_op *op,
+		struct vhost_crypto_data_req *vc_req,
+		struct virtio_crypto_sym_data_req *sym_req,
+		struct vring_desc *cur_desc, uint32_t cur_offset,
+		uint8_t *retval)
+{
+	struct virtio_crypto_alg_chain_data_req *chain = &sym_req->u.chain;
+	struct vring_desc *descs = vc_req->descs;
+	struct vring_desc *desc = cur_desc;
+	struct rte_vhost_memory *mem = vc_req->mem;
+	uint32_t offset = cur_offset;
+	uint8_t *iv_data = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+	uint8_t ret;
+
+	/* prepare */
+	/* iv */
+	if (unlikely(copy_data(iv_data, descs, mem, &desc, &offset,
+			chain->para.iv_len) < 0)) {
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	/* src */
+	vc_req->src_data = get_rd_data(descs, mem, &desc, &offset,
+			chain->para.src_data_len);
+	if (unlikely(!vc_req->src_data)) {
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* dst */
+	desc = find_write_desc(descs, desc);
+	if (unlikely(!desc)) {
+		VC_LOG_ERR("Cannot find write location");
+		ret = VIRTIO_CRYPTO_BADMSG;
+		goto error_exit;
+	}
+
+	vc_req->wb_desc = desc;
+	offset = 0;
+
+	vc_req->dst_data = get_wb_ptr(descs, mem, &desc, &offset,
+			chain->para.dst_data_len);
+	if (unlikely(!vc_req->dst_data)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* hash result */
+	vc_req->hash_result = get_wb_ptr(descs, mem, &desc, &offset,
+			chain->para.hash_result_len);
+	if (unlikely(!vc_req->hash_result)) {
+		VC_LOG_ERR("Insufficient memory");
+		ret = VIRTIO_CRYPTO_ERR;
+		goto error_exit;
+	}
+
+	/* record inhdr */
+	vc_req->inhdr = get_data_ptr(descs, mem, &desc, &offset, INHDR_LEN);
+
+	op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+	op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+
+	vc_req->m_src.buf_addr = (void *)vc_req->src_data;
+	vc_req->m_src.buf_physaddr = rte_mem_virt2phy(vc_req->m_src.buf_addr);
+	vc_req->m_src.data_off = 0;
+	vc_req->m_src.data_len = chain->para.src_data_len;
+	op->sym->m_src = &vc_req->m_src;
+	op->sym->cipher.data.offset = chain->para.cipher_start_src_offset;
+	op->sym->cipher.data.length = chain->para.src_data_len -
+			chain->para.cipher_start_src_offset;
+	/* dst data */
+	vc_req->m_dst.buf_addr = (void *)vc_req->dst_data;
+	vc_req->m_dst.buf_physaddr = rte_mem_virt2phy(vc_req->m_dst.buf_addr);
+	vc_req->m_dst.data_off = 0;
+	vc_req->m_dst.data_len = chain->para.dst_data_len +
+			chain->para.hash_result_len;
+
+	/* auth */
+	op->sym->auth.data.offset = chain->para.hash_start_src_offset;
+	op->sym->auth.data.length = chain->para.len_to_hash;
+	op->sym->auth.digest.data = (void *)vc_req->hash_result;
+	op->sym->auth.digest.phys_addr = rte_mem_virt2phy(vc_req->hash_result);
+
+	*retval = 0;
+
+	return vc_req->m_dst.data_len + INHDR_LEN;
+
+error_exit:
+	*retval = ret;
+	return INHDR_LEN;
+}
+
+const prepare_sym_op_t prepare_sym_ops[] = {
+		prepare_not_support_op, /* VIRTIO_CRYPTO_SYM_OP_NONE */
+		prepare_sym_cipher_op,
+		prepare_sym_chain_op,
+};
+
+static __rte_always_inline uint32_t
+vhost_crypto_process_cop(struct vhost_virtqueue *vq,
+		struct vhost_crypto *vcrypto, struct rte_crypto_op *op,
+		struct vring_desc *head, uint16_t desc_idx,
+		struct rte_vhost_memory *mem, int vid)
+{
+	struct vhost_crypto_data_req *vc_req = rte_crypto_op_ctod_offset(op,
+			struct vhost_crypto_data_req *, REQ_OP_OFFSET);
+	struct rte_cryptodev_sym_session *session;
+	struct virtio_crypto_op_data_req *req;
+	struct virtio_crypto_inhdr *inhdr;
+	struct vring_desc *desc;
+	uint64_t session_id;
+	uint32_t offset = 0;
+	uint32_t len = INHDR_LEN;
+	int ret = 0;
+
+	if (unlikely(!vc_req))
+		return 0;
+
+	vc_req->desc_idx = desc_idx;
+	vc_req->vq = vq;
+
+	if (likely(head->flags & VRING_DESC_F_INDIRECT)) {
+		head = GPA_TO_VVA(struct vring_desc *, mem, head->addr);
+		if (unlikely(!head))
+			return 0;
+		desc_idx = 0;
+	}
+
+	vc_req->mem = mem;
+	vc_req->descs = head;
+	vc_req->vq = vq;
+
+
+	desc = &head[desc_idx];
+
+	req = get_rd_data(head, mem, &desc, &offset, sizeof(*req));
+	if (unlikely(!req)) {
+		VC_LOG_ERR("Failed to retrieve req");
+		return 0;
+	}
+
+	if (unlikely(req->header.opcode != VIRTIO_CRYPTO_CIPHER_ENCRYPT &&
+			req->header.opcode != VIRTIO_CRYPTO_CIPHER_DECRYPT)) {
+		VC_LOG_ERR("Req %u not yet supported", req->header.opcode);
+		ret = -1;
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	session_id = req->header.session_id;
+
+	/* one branch to avoid unnecessary table lookup */
+	if (vcrypto->cache_session_id != session_id) {
+		ret = rte_hash_lookup_data(vcrypto->session_map, &session_id,
+				(void **)&session);
+		if (unlikely(ret < 0)) {
+			VC_LOG_DBG("Failed to retrieve session id %lu",
+					session_id);
+			ret = -1;
+			inhdr = reach_inhdr(head, mem, desc, 0);
+			if (likely(inhdr != NULL))
+				inhdr->status = VIRTIO_CRYPTO_ERR;
+			else
+				len = 0;
+			goto finalize;
+		}
+
+		vcrypto->cache_session = session;
+		vcrypto->cache_session_id = session_id;
+	}
+
+	session = vcrypto->cache_session;
+
+	ret = rte_crypto_op_attach_sym_session(op, session);
+	if (unlikely(ret < 0)) {
+		VC_LOG_ERR("Failed to attach session to op");
+		ret = -1;
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	len = (*prepare_sym_ops[req->u.sym_req.op_type])(op, vc_req,
+			&req->u.sym_req, desc, offset, (uint8_t *)&ret);
+	if (unlikely(ret)) {
+		inhdr = reach_inhdr(head, mem, desc, 0);
+		if (likely(inhdr != NULL))
+			inhdr->status = VIRTIO_CRYPTO_ERR;
+		else
+			len = 0;
+		goto finalize;
+	}
+
+	vc_req->len = len;
+
+	inhdr = reach_inhdr(head, mem, desc, 0);
+	if (unlikely(!inhdr)) {
+		ret = -1;
+		len = 0;
+		goto finalize;
+	}
+	vc_req->inhdr = inhdr;
+
+	vc_req->vid = vid;
+
+finalize:
+	free_data(req);
+	if (unlikely(ret)) {
+		free_data(vc_req->src_data);
+		free_data(vc_req->dst_data);
+		free_data(vc_req->hash_result);
+	}
+
+	return len;
+}
+
+static __rte_always_inline struct vhost_virtqueue *
+vhost_crypto_finalize_one_request(struct rte_crypto_op *op,
+		struct vhost_virtqueue *old_vq)
+{
+	struct vhost_crypto_data_req *vc_req = rte_crypto_op_ctod_offset(op,
+			struct vhost_crypto_data_req *, REQ_OP_OFFSET);
+	uint16_t desc_idx;
+
+	if (unlikely(!vc_req)) {
+		VC_LOG_ERR("Failed to retrieve vc_req");
+		return NULL;
+	}
+
+	if (old_vq && (vc_req->vq != old_vq))
+		return vc_req->vq;
+
+	vc_req->inhdr->status = VIRTIO_CRYPTO_ERR;
+	desc_idx = vc_req->desc_idx;
+
+	if (likely(op->status == RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+		if (likely(write_data(vc_req) == 0))
+			vc_req->inhdr->status = VIRTIO_CRYPTO_OK;
+	}
+
+	vc_req->src_data = free_data(vc_req->src_data);
+	vc_req->dst_data = free_data(vc_req->dst_data);
+	vc_req->hash_result = free_data(vc_req->hash_result);
+
+	vc_req->vq->used->ring[desc_idx].id = desc_idx;
+	vc_req->vq->used->ring[desc_idx].len = vc_req->len;
+
+	return vc_req->vq;
+}
+
+static __rte_always_inline uint16_t
+vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	uint16_t processed = 1;
+	struct vhost_virtqueue *vq, *tmp_vq;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	vq = vhost_crypto_finalize_one_request(ops[0], NULL);
+	if (unlikely(vq == NULL))
+		return 0;
+	tmp_vq = vq;
+
+	while ((processed < nb_ops)) {
+		rte_prefetch0(rte_crypto_op_ctod_offset(ops[processed + 1],
+				void *, REQ_OP_OFFSET));
+
+		tmp_vq = vhost_crypto_finalize_one_request(ops[processed],
+				tmp_vq);
+
+		if (unlikely(vq != tmp_vq))
+			break;
+
+		processed++;
+	}
+
+	*(volatile uint16_t *)&vq->used->idx += processed;
+	eventfd_write(vq->callfd, (eventfd_t)1);
+
+	return processed;
+}
-- 
2.9.5



More information about the dev mailing list