[dpdk-dev] [PATCH v4 5/8] lib/librte_vhost: add public function implementation

Fan Zhang roy.fan.zhang at intel.com
Thu Mar 29 14:52:52 CEST 2018


This patch adds public API implementation to vhost crypto.

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
---
 lib/librte_vhost/rte_vhost_crypto.h    | 125 +++++++++++++++++
 lib/librte_vhost/rte_vhost_version.map |   5 +
 lib/librte_vhost/vhost_crypto.c        | 248 +++++++++++++++++++++++++++++++++
 3 files changed, 378 insertions(+)
 create mode 100644 lib/librte_vhost/rte_vhost_crypto.h

diff --git a/lib/librte_vhost/rte_vhost_crypto.h b/lib/librte_vhost/rte_vhost_crypto.h
new file mode 100644
index 0000000..339a939
--- /dev/null
+++ b/lib/librte_vhost/rte_vhost_crypto.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017-2018 Intel Corporation
+ */
+
+#ifndef _VHOST_CRYPTO_H_
+#define _VHOST_CRYPTO_H_
+
+#include <linux/virtio_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_hash.h>
+#include <rte_pause.h>
+#include "rte_vhost.h"
+
+#ifndef MAX_DATA_QUEUES
+#define MAX_DATA_QUEUES	(1)
+#endif
+
+#define VIRTIO_CRYPTO_CTRL_QUEUE	(0)
+#define VIRTIO_CRYPTO_MAX_NUM_DEVS	(64)
+#define VIRTIO_CRYPTO_MAX_NUM_BURST_VQS	(64)
+
+/** Feature bits */
+#define VIRTIO_CRYPTO_F_CIPHER_SESSION_MODE	(1)
+#define VIRTIO_CRYPTO_F_HASH_SESSION_MODE	(2)
+#define VIRTIO_CRYPTO_F_MAC_SESSION_MODE	(3)
+#define VIRTIO_CRYPTO_F_AEAD_SESSION_MODE	(4)
+
+#define VHOST_CRYPTO_MBUF_POOL_SIZE		(8192)
+#define VHOST_CRYPTO_MAX_BURST_SIZE		(64)
+
+enum rte_vhost_crypto_zero_copy {
+	RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE = 0,
+	RTE_VHOST_CRYPTO_ZERO_COPY_ENABLE,
+	RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS
+};
+
+/**
+ *  Create Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param cryptodev_id
+ *  The identifier of DPDK Cryptodev, the same cryptodev_id can be assigned to
+ *  multiple Vhost-crypto devices.
+ * @param sess_pool
+ *  The pointer to the created cryptodev session pool with the private data size
+ *  matches the target DPDK Cryptodev.
+ * @param socket_id
+ *  NUMA Socket ID to allocate resources on. *
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise
+ */
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id);
+
+/**
+ *  Free the Vhost-crypto instance
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @return
+ *  0 if the Vhost Crypto Instance is created successfully.
+ *  Negative integer if otherwise.
+ */
+int
+rte_vhost_crypto_free(int vid);
+
+/**
+ *  Enable or disable zero copy feature
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param option
+ *  Flag of zero copy feature.
+ * @return
+ *  0 if completed successfully.
+ *  Negative integer if otherwise.
+ */
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option);
+
+/**
+ * Fetch a number of vring descriptors from virt-queue and translate to DPDK
+ * crypto operations. After this function is executed, the user can enqueue
+ * the processed ops to the target cryptodev.
+ *
+ * @param vid
+ *  The identifier of the vhost device.
+ * @param qid
+ *  Virtio queue index.
+ * @param ops
+ *  The address of an array of pointers to *rte_crypto_op* structures that must
+ *  be large enough to store *nb_ops* pointers in it.
+ * @param nb_ops
+ *  The maximum number of operations to be fetched and translated.
+ * @return
+ *  The number of fetched and processed vhost crypto request operations.
+ */
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops);
+/**
+ * Finalize the dequeued crypto ops. After the translated crypto ops are
+ * dequeued from the cryptodev, this function shall be called to write the
+ * processed data back to the vring descriptor (if no-copy is turned off).
+ *
+ * @param ops
+ *  The address of an array of *rte_crypto_op* structure that was dequeued
+ *  from cryptodev.
+ * @param nb_ops
+ *  The number of operations contained in the array.
+ * @callfds
+ *  The callfd number(s) contained in this burst
+ * @nb_callfds
+ *  The number of call_fd numbers exist in the callfds
+ * @return
+ *  The number of ops processed.
+ */
+uint16_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds);
+
+#endif /**< _VHOST_CRYPTO_H_ */
diff --git a/lib/librte_vhost/rte_vhost_version.map b/lib/librte_vhost/rte_vhost_version.map
index 91bf9f0..5bec669 100644
--- a/lib/librte_vhost/rte_vhost_version.map
+++ b/lib/librte_vhost/rte_vhost_version.map
@@ -64,4 +64,9 @@ DPDK_18.05 {
 	global:
 
 	rte_vhost_user_register_extern_ops;
+	rte_vhost_crypto_create;
+	rte_vhost_crypto_free;
+	rte_vhost_crypto_fetch_requests;
+	rte_vhost_crypto_finalize_requests;
+
 } DPDK_18.02;
diff --git a/lib/librte_vhost/vhost_crypto.c b/lib/librte_vhost/vhost_crypto.c
index 8fdb33f..59c6ad0 100644
--- a/lib/librte_vhost/vhost_crypto.c
+++ b/lib/librte_vhost/vhost_crypto.c
@@ -1035,3 +1035,251 @@ vhost_crypto_complete_one_vm_requests(struct rte_crypto_op **ops,
 
 	return processed;
 }
+
+int
+rte_vhost_crypto_create(int vid, uint8_t cryptodev_id,
+		struct rte_mempool *sess_pool, int socket_id)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct rte_hash_parameters params = {0};
+	struct vhost_user_extern_ops ops;
+	struct vhost_crypto *vcrypto;
+	char name[128];
+	int ret;
+
+	if (vid >= VIRTIO_CRYPTO_MAX_NUM_DEVS || !dev) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	ret = rte_vhost_driver_set_features(dev->ifname,
+			VIRTIO_CRYPTO_FEATURES);
+	if (ret < 0) {
+		VC_LOG_ERR("Error setting features");
+		return -1;
+	}
+
+	vcrypto = rte_zmalloc_socket(NULL, sizeof(*vcrypto),
+			RTE_CACHE_LINE_SIZE, socket_id);
+	if (!vcrypto) {
+		VC_LOG_ERR("Insufficient memory");
+		return -ENOMEM;
+	}
+
+	vcrypto->sess_pool = sess_pool;
+	vcrypto->cid = cryptodev_id;
+	vcrypto->cache_session_id = UINT64_MAX;
+	vcrypto->last_session_id = 1;
+	vcrypto->dev = dev;
+	vcrypto->option = RTE_VHOST_CRYPTO_ZERO_COPY_DISABLE;
+
+	snprintf(name, 127, "HASH_VHOST_CRYPT_%u", (uint32_t)vid);
+	params.name = name;
+	params.entries = SESSION_MAP_ENTRIES;
+	params.hash_func = rte_jhash;
+	params.key_len = sizeof(uint64_t);
+	params.socket_id = socket_id;
+	vcrypto->session_map = rte_hash_create(&params);
+	if (!vcrypto->session_map) {
+		VC_LOG_ERR("Failed to creath session map");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	snprintf(name, 127, "MBUF_POOL_VM_%u", (uint32_t)vid);
+	vcrypto->mbuf_pool = rte_pktmbuf_pool_create(name,
+			VHOST_CRYPTO_MBUF_POOL_SIZE, 512,
+			sizeof(struct vhost_crypto_data_req),
+			RTE_MBUF_DEFAULT_DATAROOM * 2 + RTE_PKTMBUF_HEADROOM,
+			rte_socket_id());
+	if (!vcrypto->mbuf_pool) {
+		VC_LOG_ERR("Failed to creath mbuf pool");
+		ret = -ENOMEM;
+		goto error_exit;
+	}
+
+	ops.pre_msg_handle = NULL;
+	ops.post_msg_handle = vhost_crypto_msg_post_handler;
+
+	dev->extern_data = vcrypto;
+	if (rte_vhost_user_register_extern_ops(dev->vid, &ops) < 0) {
+		VC_LOG_ERR("Failed to register device");
+		goto error_exit;
+	}
+
+	return 0;
+
+error_exit:
+	if (vcrypto->session_map)
+		rte_hash_free(vcrypto->session_map);
+	if (vcrypto->mbuf_pool)
+		rte_mempool_free(vcrypto->mbuf_pool);
+
+	rte_free(vcrypto);
+
+	return ret;
+}
+
+int
+rte_vhost_crypto_free(int vid)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_crypto *vcrypto;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	vcrypto = dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	rte_hash_free(vcrypto->session_map);
+	rte_mempool_free(vcrypto->mbuf_pool);
+	rte_free(vcrypto);
+
+	dev->extern_data = NULL;
+	dev->extern_ops.pre_msg_handle = NULL;
+	dev->extern_ops.post_msg_handle = NULL;
+
+	return 0;
+}
+
+int __rte_experimental
+rte_vhost_crypto_set_zero_copy(int vid, enum rte_vhost_crypto_zero_copy option)
+{
+	struct virtio_net *dev = get_device(vid);
+	struct vhost_crypto *vcrypto;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	if (unlikely(option < 0 || option >=
+			RTE_VHOST_CRYPTO_MAX_ZERO_COPY_OPTIONS)) {
+		VC_LOG_ERR("Invalid option %i", option);
+		return -EINVAL;
+	}
+
+	vcrypto = (struct vhost_crypto *)dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	if (vcrypto->option == (uint8_t)option)
+		return 0;
+
+	if (!(rte_mempool_full(vcrypto->mbuf_pool))) {
+		VC_LOG_ERR("Cannot update zero copy as mempool is not full");
+		return -EINVAL;
+	}
+
+	vcrypto->option = (uint8_t)option;
+
+	return 0;
+}
+
+uint16_t
+rte_vhost_crypto_fetch_requests(int vid, uint32_t qid,
+		struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rte_mbuf *mbufs[VHOST_CRYPTO_MAX_BURST_SIZE * 2];
+	struct virtio_net *dev = get_device(vid);
+	struct rte_vhost_memory *mem;
+	struct vhost_crypto *vcrypto;
+	struct vhost_virtqueue *vq;
+	uint16_t avail_idx;
+	uint16_t start_idx;
+	uint16_t required;
+	uint16_t count;
+	uint16_t i;
+
+	if (unlikely(dev == NULL)) {
+		VC_LOG_ERR("Invalid vid %i", vid);
+		return -EINVAL;
+	}
+
+	vcrypto = (struct vhost_crypto *)dev->extern_data;
+	if (unlikely(vcrypto == NULL)) {
+		VC_LOG_ERR("Cannot find required data, is it initialized?");
+		return -ENOENT;
+	}
+
+	vq = dev->virtqueue[qid];
+	mem = dev->mem;
+
+	avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+	start_idx = vq->last_used_idx;
+	count = avail_idx - start_idx;
+	count = RTE_MIN(count, VHOST_CRYPTO_MAX_BURST_SIZE);
+	count = RTE_MIN(count, nb_ops);
+
+	if (unlikely(count == 0))
+		return 0;
+
+	/* for zero copy, we need 2 empty mbufs for src and dst, otherwise
+	 * we need only 1 mbuf as src and dst
+	 */
+	required = count * 2;
+	if (unlikely(rte_mempool_get_bulk(vcrypto->mbuf_pool, (void **)mbufs,
+			required) < 0)) {
+		VC_LOG_ERR("Insufficient memory");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < count; i++) {
+		uint16_t used_idx = (start_idx + i) & (vq->size - 1);
+		uint16_t desc_idx = vq->avail->ring[used_idx];
+		struct vring_desc *head = &vq->desc[desc_idx];
+		struct rte_crypto_op *op = ops[i];
+
+		op->sym->m_src = mbufs[i * 2];
+		op->sym->m_dst = mbufs[i * 2 + 1];
+		op->sym->m_src->data_off = 0;
+		op->sym->m_dst->data_off = 0;
+
+		if (unlikely(vhost_crypto_process_one_req(vcrypto, vq, op, head,
+				desc_idx, mem)) < 0)
+			break;
+	}
+
+	vq->last_used_idx += i;
+
+	return i;
+}
+
+uint16_t
+rte_vhost_crypto_finalize_requests(struct rte_crypto_op **ops,
+		uint16_t nb_ops, int *callfds, uint16_t *nb_callfds)
+{
+	struct rte_crypto_op **tmp_ops = ops;
+	uint16_t count = 0, left = nb_ops;
+	int callfd;
+	uint16_t idx = 0;
+
+	while (left) {
+		count = vhost_crypto_complete_one_vm_requests(tmp_ops, left,
+				&callfd);
+		if (unlikely(count == 0))
+			break;
+
+		tmp_ops = &tmp_ops[count];
+		left -= count;
+
+		callfds[idx++] = callfd;
+
+		if (unlikely(idx >= VIRTIO_CRYPTO_MAX_NUM_BURST_VQS)) {
+			VC_LOG_ERR("Too many vqs");
+			break;
+		}
+	}
+
+	*nb_callfds = idx;
+
+	return nb_ops - left;
+}
-- 
2.7.4



More information about the dev mailing list