[dpdk-dev,v2,3/7] cryptodev/virtio: core code of crypto devices

Message ID 02e4c16f6c0f330f8c7941160a69e8d20510713f.1519477564.git.jianjay.zhou@huawei.com (mailing list archive)
State Superseded, archived
Delegated to: Pablo de Lara Guarch
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Zhoujian (jay) Feb. 24, 2018, 1:14 p.m. UTC
  The idea comes from QAT and virtio-net devices.

Signed-off-by: Jay Zhou <jianjay.zhou@huawei.com>
---
 drivers/crypto/virtio/virtio_cryptodev.c | 1544 ++++++++++++++++++++++++++++++
 drivers/crypto/virtio/virtio_cryptodev.h |   66 ++
 drivers/crypto/virtio/virtio_rxtx.c      |  533 +++++++++++
 3 files changed, 2143 insertions(+)
 create mode 100644 drivers/crypto/virtio/virtio_cryptodev.c
 create mode 100644 drivers/crypto/virtio/virtio_cryptodev.h
 create mode 100644 drivers/crypto/virtio/virtio_rxtx.c
  

Comments

Fan Zhang March 21, 2018, 4:08 a.m. UTC | #1
Hi Jay,

Excellent work! The patch, although need some minor rework, has improved the performance.
Some comments:

1. you need to set up capabilities for virtio_crypto PMD. As in Qemu vhost crypto proxy backend only AESCBC and SHA1 are supported (in cryptodev_vhost_user_init() definition), I believe in this version these two algorithms shall be enough.  Actually for the same reason I suggest you to remove all AES_CTR test cases in the virtio_crypto PMD functional test, as they will fail when vhost_user crypto backend is used.

You may use driver/crypto/qat/qat_crypto_capabilities.h as example. The const capabilities array shall be returned to the application when virtio_crypto_dev_info_get() is called.
2. there is a bug in virtio_crypto_queue_setup(), you declared " uint32_t i, j;" while i may be used uninitialized later.

Regards,
Fan

> diff --git a/drivers/crypto/virtio/virtio_rxtx.c
> b/drivers/crypto/virtio/virtio_rxtx.c
> new file mode 100644
> index 0000000..b7d8066
> --- /dev/null
> +++ b/drivers/crypto/virtio/virtio_rxtx.c
> @@ -0,0 +1,533 @@
> +/* SPDX-License-Identifier: BSD-3-Clause
> + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
> + */
> +
  
Zhoujian (jay) March 21, 2018, 4:58 a.m. UTC | #2
Hi Fan,

Really thank for your response and testing!

> -----Original Message-----
> From: Zhang, Roy Fan [mailto:roy.fan.zhang@intel.com]
> Sent: Wednesday, March 21, 2018 12:09 PM
> To: Zhoujian (jay) <jianjay.zhou@huawei.com>; dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>;
> thomas@monjalon.net; Gonglei (Arei) <arei.gonglei@huawei.com>; Zeng, Xin
> <xin.zeng@intel.com>; Huangweidong (C) <weidong.huang@huawei.com>; wangxin (U)
> <wangxinxin.wang@huawei.com>; longpeng <longpeng2@huawei.com>; Zhang, Roy Fan
> <roy.fan.zhang@intel.com>
> Subject: RE: [PATCH v2 3/7] cryptodev/virtio: core code of crypto devices
> 
> Hi Jay,
> 
> Excellent work! The patch, although need some minor rework, has improved the
> performance.
> Some comments:
> 
> 1. you need to set up capabilities for virtio_crypto PMD. As in Qemu vhost
> crypto proxy backend only AESCBC and SHA1 are supported (in
> cryptodev_vhost_user_init() definition), I believe in this version these two
> algorithms shall be enough.  Actually for the same reason I suggest you to
> remove all AES_CTR test cases in the virtio_crypto PMD functional test, as
> they will fail when vhost_user crypto backend is used.
> 
> You may use driver/crypto/qat/qat_crypto_capabilities.h as example. The const
> capabilities array shall be returned to the application when
> virtio_crypto_dev_info_get() is called.

Okay. Thank for your suggestion.

> 2. there is a bug in virtio_crypto_queue_setup(), you declared " uint32_t i,
> j;" while i may be used uninitialized later.

Maybe the compiled result is different using different GCC, but will fix then.

Regards,
Jay

> 
> Regards,
> Fan
> 
> > diff --git a/drivers/crypto/virtio/virtio_rxtx.c
> > b/drivers/crypto/virtio/virtio_rxtx.c
> > new file mode 100644
> > index 0000000..b7d8066
> > --- /dev/null
> > +++ b/drivers/crypto/virtio/virtio_rxtx.c
> > @@ -0,0 +1,533 @@
> > +/* SPDX-License-Identifier: BSD-3-Clause
> > + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
> > + */
> > +
  

Patch

diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 0000000..8d84526
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1544 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <unistd.h>
+#ifdef RTE_EXEC_ENV_LINUXAPP
+#include <dirent.h>
+#include <fcntl.h>
+#endif
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_pci.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+
+#include <rte_memory.h>
+#include <rte_eal.h>
+#include <rte_dev.h>
+#include <rte_log.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+		struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+		uint16_t queue_pair_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id,
+		struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+		uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+		struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *session,
+		struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+	{ RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+						VIRTIO_CRYPTO_PCI_DEVICEID) },
+	{ .vendor_id = 0, /* sentinel */ },
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+		struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+		uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+	uint8_t idx = 0;
+	uint8_t needed = 1;
+	uint32_t head = 0;
+	uint32_t len_cipher_key = 0;
+	uint32_t len_auth_key = 0;
+	uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+	uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+	uint32_t len_total = 0;
+	uint32_t input_offset = 0;
+	void *virt_addr_started = NULL;
+	phys_addr_t phys_addr_started;
+	struct vring_desc *desc;
+	uint32_t desc_offset;
+	struct virtio_crypto_session_input *input;
+	int ret;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (session == NULL) {
+		PMD_SESSION_LOG(ERR, "session is NULL.");
+		return -EINVAL;
+	}
+	/* cipher only is supported, it is available if auth_key is NULL */
+	if (!cipher_key) {
+		PMD_SESSION_LOG(ERR, "cipher key is NULL.");
+		return -EINVAL;
+	}
+
+	head = vq->vq_desc_head_idx;
+	PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, vq = %p", head, vq);
+
+	if (vq->vq_free_cnt < needed) {
+		PMD_SESSION_LOG(ERR, "Not enough entry");
+		return -ENOSPC;
+	}
+
+	/* calculate the length of cipher key */
+	if (cipher_key) {
+		switch (ctrl->u.sym_create_session.op_type) {
+		case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+			len_cipher_key
+				= ctrl->u.sym_create_session.u.cipher
+							.para.keylen;
+			break;
+		case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+			len_cipher_key
+				= ctrl->u.sym_create_session.u.chain
+					.para.cipher_param.keylen;
+			break;
+		default:
+			PMD_SESSION_LOG(ERR, "invalid op type");
+			return -EINVAL;
+		}
+	}
+
+	/* calculate the length of auth key */
+	if (auth_key) {
+		len_auth_key =
+			ctrl->u.sym_create_session.u.chain.para.u.mac_param
+				.auth_key_len;
+	}
+
+	/*
+	 * malloc memory to store indirect vring_desc entries, including
+	 * ctrl request, cipher key, auth key, session input and desc vring
+	 */
+	desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+		+ len_session_input;
+	virt_addr_started = rte_malloc(NULL,
+		desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+			* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+	if (virt_addr_started == NULL) {
+		PMD_SESSION_LOG(ERR, "not enough heap memory");
+		return -ENOSPC;
+	}
+	phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+	/* address to store indirect vring desc entries */
+	desc = (struct vring_desc *)
+		((uint8_t *)virt_addr_started + desc_offset);
+
+	/*  ctrl req part */
+	memcpy(virt_addr_started, ctrl, len_ctrl_req);
+	desc[idx].addr = phys_addr_started;
+	desc[idx].len = len_ctrl_req;
+	desc[idx].flags = VRING_DESC_F_NEXT;
+	desc[idx].next = idx + 1;
+	idx++;
+	len_total += len_ctrl_req;
+	input_offset += len_ctrl_req;
+
+	/* cipher key part */
+	if (len_cipher_key > 0) {
+		memcpy((uint8_t *)virt_addr_started + len_total,
+			cipher_key, len_cipher_key);
+
+		desc[idx].addr = phys_addr_started + len_total;
+		desc[idx].len = len_cipher_key;
+		desc[idx].flags = VRING_DESC_F_NEXT;
+		desc[idx].next = idx + 1;
+		idx++;
+		len_total += len_cipher_key;
+		input_offset += len_cipher_key;
+	}
+
+	/* auth key part */
+	if (len_auth_key > 0) {
+		memcpy((uint8_t *)virt_addr_started + len_total,
+			auth_key, len_auth_key);
+
+		desc[idx].addr = phys_addr_started + len_total;
+		desc[idx].len = len_auth_key;
+		desc[idx].flags = VRING_DESC_F_NEXT;
+		desc[idx].next = idx + 1;
+		idx++;
+		len_total += len_auth_key;
+		input_offset += len_auth_key;
+	}
+
+	/* input part */
+	input = (struct virtio_crypto_session_input *)
+		((uint8_t *)virt_addr_started + input_offset);
+	input->status = VIRTIO_CRYPTO_ERR;
+	input->session_id = ~0ULL;
+	desc[idx].addr = phys_addr_started + len_total;
+	desc[idx].len = len_session_input;
+	desc[idx].flags = VRING_DESC_F_WRITE;
+	idx++;
+
+	/* use a single desc entry */
+	vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+	vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+	vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+	vq->vq_free_cnt--;
+
+	vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+	vq_update_avail_ring(vq, head);
+	vq_update_avail_idx(vq);
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+	virtqueue_notify(vq);
+
+	rte_rmb();
+	while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+		rte_rmb();
+		usleep(100);
+	}
+
+	while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+		uint32_t idx, desc_idx, used_idx;
+		struct vring_used_elem *uep;
+
+		used_idx = (uint32_t)(vq->vq_used_cons_idx
+				& (vq->vq_nentries - 1));
+		uep = &vq->vq_ring.used->ring[used_idx];
+		idx = (uint32_t) uep->id;
+		desc_idx = idx;
+
+		while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+			desc_idx = vq->vq_ring.desc[desc_idx].next;
+			vq->vq_free_cnt++;
+		}
+
+		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+		vq->vq_desc_head_idx = idx;
+
+		vq->vq_used_cons_idx++;
+		vq->vq_free_cnt++;
+	}
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+			vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+	/* get the result */
+	if (input->status != VIRTIO_CRYPTO_OK) {
+		PMD_SESSION_LOG(ERR, "Something wrong on backend! "
+				"status=%"PRIu32", session_id=%"PRIu64"",
+				input->status, input->session_id);
+		rte_free(virt_addr_started);
+		ret = -1;
+	} else {
+		session->session_id = input->session_id;
+
+		PMD_SESSION_LOG(INFO, "Create session successfully, "
+				"session_id=%"PRIu64"", input->session_id);
+		rte_free(virt_addr_started);
+		ret = 0;
+	}
+
+	return ret;
+}
+
+void virtio_crypto_queue_release(struct virtqueue *vq)
+{
+	struct virtio_crypto_hw *hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (vq) {
+		hw = vq->hw;
+		/* Select and deactivate the queue */
+		VTPCI_OPS(hw)->del_queue(hw, vq);
+
+		rte_memzone_free(vq->mz);
+		rte_mempool_free(vq->mpool);
+		rte_free(vq);
+	}
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+		int queue_type,
+		uint16_t vtpci_queue_idx,
+		uint16_t nb_desc,
+		int socket_id,
+		struct virtqueue **pvq)
+{
+	char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+	char mpool_name[MPOOL_MAX_NAME_SZ];
+	const struct rte_memzone *mz;
+	unsigned int vq_size, size;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+	struct virtqueue *vq = NULL;
+	uint32_t i, j;
+
+	PMD_INIT_FUNC_TRACE();
+
+	PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+
+	/*
+	 * Read the virtqueue size from the Queue Size field
+	 * Always power of 2 and if 0 virtqueue does not exist
+	 */
+	vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+	if (vq_size == 0) {
+		PMD_INIT_LOG(ERR, "virtqueue does not exist");
+		return -EINVAL;
+	}
+	PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
+
+	if (!rte_is_power_of_2(vq_size)) {
+		PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
+		return -EINVAL;
+	}
+
+	if (queue_type == VTCRYPTO_DATAQ) {
+		snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+				dev->data->dev_id, vtpci_queue_idx);
+		snprintf(mpool_name, sizeof(mpool_name), "dev%d_dataqueue%d_mpool",
+				dev->data->dev_id, vtpci_queue_idx);
+	} else if (queue_type == VTCRYPTO_CTRLQ) {
+		snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+				dev->data->dev_id);
+		snprintf(mpool_name, sizeof(mpool_name), "dev%d_controlqueue_mpool",
+				dev->data->dev_id);
+	}
+	size = RTE_ALIGN_CEIL(sizeof(*vq) +
+				vq_size * sizeof(struct vq_desc_extra),
+				RTE_CACHE_LINE_SIZE);
+	vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+				socket_id);
+	if (vq == NULL) {
+		PMD_INIT_LOG(ERR, "Can not allocate virtqueue");
+		return -ENOMEM;
+	}
+
+	if (queue_type == VTCRYPTO_DATAQ) {
+		/* pre-allocate a mempool and use it in the data plane to
+		 * improve performance
+		 */
+		vq->mpool = rte_mempool_lookup(mpool_name);
+		if (vq->mpool == NULL)
+			vq->mpool = rte_mempool_create(mpool_name,
+					vq_size,
+					sizeof(struct virtio_crypto_op_cookie),
+					RTE_CACHE_LINE_SIZE, 0,
+					NULL, NULL, NULL, NULL, socket_id,
+					0);
+		if (!vq->mpool) {
+			PMD_DRV_LOG(ERR, "Virtio Crypto PMD Cannot create"
+					" mempool");
+			goto mpool_create_err;
+		}
+		for (i = 0; i < vq_size; i++) {
+			vq->vq_descx[i].cookie =
+				rte_zmalloc("crypto PMD op cookie pointer",
+					sizeof(struct virtio_crypto_op_cookie),
+					RTE_CACHE_LINE_SIZE);
+			if (vq->vq_descx[i].cookie == NULL) {
+				PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
+				goto cookie_alloc_err;
+			}
+		}
+	}
+
+	vq->hw = hw;
+	vq->dev_id = dev->data->dev_id;
+	vq->vq_queue_index = vtpci_queue_idx;
+	vq->vq_nentries = vq_size;
+
+	/*
+	 * Using part of the vring entries is permitted, but the maximum
+	 * is vq_size
+	 */
+	if (nb_desc == 0 || nb_desc > vq_size)
+		nb_desc = vq_size;
+	vq->vq_free_cnt = nb_desc;
+
+	/*
+	 * Reserve a memzone for vring elements
+	 */
+	size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+	vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+	PMD_INIT_LOG(DEBUG, "%s vring_size: %d, rounded_vring_size: %d",
+			(queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+			size, vq->vq_ring_size);
+
+	mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+			socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+	if (mz == NULL) {
+		if (rte_errno == EEXIST)
+			mz = rte_memzone_lookup(vq_name);
+		if (mz == NULL) {
+			PMD_INIT_LOG(ERR, "not enough memory");
+			goto mz_reserve_err;
+		}
+	}
+
+	/*
+	 * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+	 * and only accepts 32 bit page frame number.
+	 * Check if the allocated physical memory exceeds 16TB.
+	 */
+	if ((mz->phys_addr + vq->vq_ring_size - 1)
+				>> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+		PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
+		goto vring_addr_err;
+	}
+
+	memset(mz->addr, 0, sizeof(mz->len));
+	vq->mz = mz;
+	vq->vq_ring_mem = mz->phys_addr;
+	vq->vq_ring_virt_mem = mz->addr;
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem(physical): 0x%"PRIx64,
+					(uint64_t)mz->phys_addr);
+	PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64,
+					(uint64_t)(uintptr_t)mz->addr);
+
+	*pvq = vq;
+
+	return 0;
+
+vring_addr_err:
+	rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+	rte_mempool_free(vq->mpool);
+	if (i != 0) {
+		for (j = 0; j < i; j++)
+			rte_free(vq->vq_descx[j].cookie);
+	}
+mpool_create_err:
+	rte_free(vq);
+	return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+	int ret;
+	struct virtqueue *vq;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	/* if virtio device has started, do not touch the virtqueues */
+	if (dev->data->dev_started)
+		return 0;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+			0, SOCKET_ID_ANY, &vq);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR, "control vq initialization failed");
+		return ret;
+	}
+
+	hw->cvq = vq;
+
+	return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+	unsigned int i;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* control queue release */
+	virtio_crypto_queue_release(hw->cvq);
+
+	/* data queue release */
+	for (i = 0; i < hw->max_dataqueues; i++)
+		virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+	return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+	/* Device related operations */
+	.dev_configure			 = virtio_crypto_dev_configure,
+	.dev_start			     = virtio_crypto_dev_start,
+	.dev_stop			     = virtio_crypto_dev_stop,
+	.dev_close			     = virtio_crypto_dev_close,
+	.dev_infos_get			 = virtio_crypto_dev_info_get,
+
+	.stats_get			     = virtio_crypto_dev_stats_get,
+	.stats_reset			 = virtio_crypto_dev_stats_reset,
+
+	.queue_pair_setup                = virtio_crypto_qp_setup,
+	.queue_pair_release              = virtio_crypto_qp_release,
+	.queue_pair_start                = NULL,
+	.queue_pair_stop                 = NULL,
+	.queue_pair_count                = NULL,
+
+	/* Crypto related operations */
+	.session_get_size	= virtio_crypto_sym_get_session_private_size,
+	.session_configure	= virtio_crypto_sym_configure_session,
+	.session_clear		= virtio_crypto_sym_clear_session,
+	.qp_attach_session = NULL,
+	.qp_detach_session = NULL
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	unsigned int i;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (stats == NULL) {
+		PMD_DRV_LOG(ERR, "invalid pointer");
+		return;
+	}
+
+	for (i = 0; i < hw->max_dataqueues; i++) {
+		const struct virtqueue *data_queue
+			= dev->data->queue_pairs[i];
+		if (data_queue == NULL)
+			continue;
+
+		stats->enqueued_count += data_queue->packets_sent_total;
+		stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+		stats->dequeued_count += data_queue->packets_received_total;
+		stats->dequeue_err_count
+			+= data_queue->packets_received_failed;
+	}
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_stats *stats)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+	unsigned int i;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	for (i = 0; i < hw->max_dataqueues; i++) {
+		struct virtqueue *data_queue = dev->data->queue_pairs[i];
+		if (data_queue == NULL)
+			continue;
+
+		data_queue->packets_sent_total = 0;
+		data_queue->packets_sent_failed = 0;
+
+		data_queue->packets_received_total = 0;
+		data_queue->packets_received_failed = 0;
+	}
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+		const struct rte_cryptodev_qp_conf *qp_conf,
+		int socket_id,
+		struct rte_mempool *session_pool __rte_unused)
+{
+	int ret;
+	struct virtqueue *vq;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* if virtio dev is started, do not touch the virtqueues */
+	if (dev->data->dev_started)
+		return 0;
+
+	ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+			qp_conf->nb_descriptors, socket_id, &vq);
+	if (ret < 0) {
+		PMD_INIT_LOG(ERR,
+			"virtio crypto data queue initialization failed\n");
+		return ret;
+	}
+
+	dev->data->queue_pairs[queue_pair_id] = vq;
+
+	return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+	struct virtqueue *vq
+		= (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (vq == NULL) {
+		PMD_DRV_LOG(DEBUG, "vq already freed");
+		return 0;
+	}
+
+	virtio_crypto_queue_release(vq);
+	return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+	uint64_t host_features;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Prepare guest_features: feature that driver wants to support */
+	PMD_INIT_LOG(DEBUG, "guest_features before negotiate = %" PRIx64,
+		req_features);
+
+	/* Read device(host) feature bits */
+	host_features = VTPCI_OPS(hw)->get_features(hw);
+	PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64,
+		host_features);
+
+	/*
+	 * Negotiate features: Subset of device feature bits are written back
+	 * guest feature bits.
+	 */
+	hw->guest_features = req_features;
+	hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+							host_features);
+	PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
+		hw->guest_features);
+
+	if (hw->modern) {
+		if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+			PMD_INIT_LOG(ERR,
+				"VIRTIO_F_VERSION_1 features is not enabled.");
+			return -1;
+		}
+		vtpci_cryptodev_set_status(hw,
+			VIRTIO_CONFIG_STATUS_FEATURES_OK);
+		if (!(vtpci_cryptodev_get_status(hw) &
+			VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+			PMD_INIT_LOG(ERR, "failed to set FEATURES_OK status!");
+			return -1;
+		}
+	}
+
+	hw->req_guest_features = req_features;
+
+	return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+	uint64_t req_features)
+{
+	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+	struct virtio_crypto_config local_config;
+	struct virtio_crypto_config *config = &local_config;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Reset the device although not necessary at startup */
+	vtpci_cryptodev_reset(hw);
+
+	/* Tell the host we've noticed this device. */
+	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+	/* Tell the host we've known how to drive the device. */
+	vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+	if (virtio_negotiate_features(hw, req_features) < 0)
+		return -1;
+
+	/* Get status of the device */
+	vtpci_read_cryptodev_config(hw,
+		offsetof(struct virtio_crypto_config, status),
+		&config->status, sizeof(config->status));
+	if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+		PMD_DRV_LOG(ERR, "accelerator hardware is "
+				"not ready");
+		return -1;
+	}
+
+	/* Get number of data queues */
+	vtpci_read_cryptodev_config(hw,
+		offsetof(struct virtio_crypto_config, max_dataqueues),
+		&config->max_dataqueues,
+		sizeof(config->max_dataqueues));
+	hw->max_dataqueues = config->max_dataqueues;
+
+	PMD_INIT_LOG(DEBUG, "hw->max_dataqueues=%d",
+		hw->max_dataqueues);
+
+	return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+		struct rte_cryptodev_pmd_init_params *init_params)
+{
+	struct rte_cryptodev *cryptodev;
+	struct virtio_crypto_hw *hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+					init_params);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	cryptodev->driver_id = cryptodev_virtio_driver_id;
+	cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+	cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+	cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+	cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+		RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+	hw = cryptodev->data->dev_private;
+	hw->dev_id = cryptodev->data->dev_id;
+
+	PMD_INIT_LOG(DEBUG, "dev %d vendorID=0x%x deviceID=0x%x",
+		cryptodev->data->dev_id, pci_dev->id.vendor_id,
+		pci_dev->id.device_id);
+
+	/* pci device init */
+	if (vtpci_cryptodev_init(pci_dev, hw))
+		return -1;
+
+	if (virtio_crypto_init_device(cryptodev,
+			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+		return -EPERM;
+
+	if (cryptodev->data->dev_started) {
+		virtio_crypto_dev_stop(cryptodev);
+		virtio_crypto_dev_close(cryptodev);
+	}
+
+	cryptodev->dev_ops = NULL;
+	cryptodev->enqueue_burst = NULL;
+	cryptodev->dequeue_burst = NULL;
+
+	/* release control queue */
+	virtio_crypto_queue_release(hw->cvq);
+
+	rte_free(cryptodev->data);
+	cryptodev->data = NULL;
+
+	PMD_DRV_LOG(INFO, "dev_uninit completed");
+
+	return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+	struct rte_cryptodev_config *config __rte_unused)
+{
+	struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (virtio_crypto_init_device(cryptodev,
+			VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+		return -1;
+
+	/* setup control queue
+	 * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+	 * config->max_dataqueues is the control queue
+	 */
+	if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+		PMD_INIT_LOG(ERR, "control queue setup error");
+		return -1;
+	}
+	virtio_crypto_ctrlq_start(cryptodev);
+
+	return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+	PMD_DRV_LOG(DEBUG, "virtio_dev_stop");
+
+	vtpci_cryptodev_reset(hw);
+
+	virtio_crypto_dev_free_mbufs(dev);
+	virtio_crypto_free_queues(dev);
+
+	dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	if (dev->data->dev_started)
+		return 0;
+
+	/* Do final configuration before queue engine starts */
+	virtio_crypto_dataq_start(dev);
+	vtpci_cryptodev_reinit_complete(hw);
+
+	dev->data->dev_started = 1;
+
+	return 0;
+}
+
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+	uint32_t i;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	for (i = 0; i < hw->max_dataqueues; i++) {
+		PMD_INIT_LOG(DEBUG, "Before freeing dataq[%d] used "
+			"and unused buf", i);
+		VIRTQUEUE_DUMP((struct virtqueue *)
+			dev->data->queue_pairs[i]);
+
+		PMD_INIT_LOG(DEBUG, "queue_pairs[%d]=%p",
+				i, dev->data->queue_pairs[i]);
+
+		virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+		PMD_INIT_LOG(DEBUG, "After freeing dataq[%d] used and "
+					"unused buf", i);
+		VIRTQUEUE_DUMP(
+			(struct virtqueue *)dev->data->queue_pairs[i]);
+	}
+}
+
+static unsigned int virtio_crypto_sym_get_session_private_size(
+		struct rte_cryptodev *dev __rte_unused)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int virtio_crypto_check_sym_session_paras(
+		struct rte_cryptodev *dev)
+{
+	struct virtio_crypto_hw *hw;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (unlikely(dev == NULL)) {
+		PMD_SESSION_LOG(ERR, "dev is NULL");
+		return -1;
+	}
+	if (unlikely(dev->data == NULL)) {
+		PMD_SESSION_LOG(ERR, "dev->data is NULL");
+		return -1;
+	}
+	hw = dev->data->dev_private;
+	if (unlikely(hw == NULL)) {
+		PMD_SESSION_LOG(ERR, "hw is NULL");
+		return -1;
+	}
+	if (unlikely(hw->cvq == NULL)) {
+		PMD_SESSION_LOG(ERR, "vq is NULL");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int virtio_crypto_check_sym_clear_session_paras(
+		struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess)
+{
+	PMD_INIT_FUNC_TRACE();
+
+	if (sess == NULL) {
+		PMD_SESSION_LOG(ERR, "sym_session is NULL");
+		return -1;
+	}
+
+	return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void  virtio_crypto_sym_clear_session(
+		struct rte_cryptodev *dev,
+		struct rte_cryptodev_sym_session *sess)
+{
+	struct virtio_crypto_hw *hw;
+	struct virtqueue *vq;
+	struct virtio_crypto_session *session;
+	struct virtio_crypto_op_ctrl_req *ctrl;
+	struct vring_desc *desc;
+	uint8_t *status;
+	uint8_t needed = 1;
+	uint32_t head;
+	uint8_t *malloc_virt_addr;
+	uint64_t malloc_phys_addr;
+	uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+	uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+	uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+		return;
+
+	hw = dev->data->dev_private;
+	vq = hw->cvq;
+	session = (struct virtio_crypto_session *)get_session_private_data(
+		sess, cryptodev_virtio_driver_id);
+	if (session == NULL) {
+		PMD_SESSION_LOG(ERR, "Invalid session parameter");
+		return;
+	}
+
+	PMD_SESSION_LOG(INFO, "vq->vq_desc_head_idx = %d, "
+			"vq = %p", vq->vq_desc_head_idx, vq);
+
+	if (vq->vq_free_cnt < needed) {
+		PMD_SESSION_LOG(ERR,
+				"vq->vq_free_cnt = %d is less than %d, "
+				"not enough", vq->vq_free_cnt, needed);
+		return;
+	}
+
+	/*
+	 * malloc memory to store information of ctrl request op,
+	 * returned status and desc vring
+	 */
+	malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+		+ NUM_ENTRY_SYM_CLEAR_SESSION
+		* sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+	if (malloc_virt_addr == NULL) {
+		PMD_SESSION_LOG(ERR, "not enough heap room");
+		return;
+	}
+	malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+	/* assign ctrl request op part */
+	ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+	ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+	/* default data virtqueue is 0 */
+	ctrl->header.queue_id = 0;
+	ctrl->u.destroy_session.session_id = session->session_id;
+
+	/* status part */
+	status = &(((struct virtio_crypto_inhdr *)
+		((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+	*status = VIRTIO_CRYPTO_ERR;
+
+	/* indirect desc vring part */
+	desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+		+ desc_offset);
+
+	/* ctrl request part */
+	desc[0].addr = malloc_phys_addr;
+	desc[0].len = len_op_ctrl_req;
+	desc[0].flags = VRING_DESC_F_NEXT;
+	desc[0].next = 1;
+
+	/* status part */
+	desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+	desc[1].len = len_inhdr;
+	desc[1].flags = VRING_DESC_F_WRITE;
+
+	/* use only a single desc entry */
+	head = vq->vq_desc_head_idx;
+	vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+	vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+	vq->vq_ring.desc[head].len
+		= NUM_ENTRY_SYM_CLEAR_SESSION
+		* sizeof(struct vring_desc);
+
+	vq->vq_free_cnt -= needed;
+
+	vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+	vq_update_avail_ring(vq, head);
+	vq_update_avail_idx(vq);
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_queue_index = %d", vq->vq_queue_index);
+
+	virtqueue_notify(vq);
+
+	rte_rmb();
+	while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+		rte_rmb();
+		usleep(100);
+	}
+
+	while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+		uint32_t idx, desc_idx, used_idx;
+		struct vring_used_elem *uep;
+
+		used_idx = (uint32_t)(vq->vq_used_cons_idx
+				& (vq->vq_nentries - 1));
+		uep = &vq->vq_ring.used->ring[used_idx];
+		idx = (uint32_t) uep->id;
+		desc_idx = idx;
+		while (vq->vq_ring.desc[desc_idx].flags
+				& VRING_DESC_F_NEXT) {
+			desc_idx = vq->vq_ring.desc[desc_idx].next;
+			vq->vq_free_cnt++;
+		}
+
+		vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+		vq->vq_desc_head_idx = idx;
+		vq->vq_used_cons_idx++;
+		vq->vq_free_cnt++;
+	}
+
+	if (*status != VIRTIO_CRYPTO_OK) {
+		PMD_SESSION_LOG(ERR, "Close session failed "
+				"status=%"PRIu32", session_id=%"PRIu64"",
+				*status, session->session_id);
+		rte_free(malloc_virt_addr);
+		return;
+	}
+
+	PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
+			vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+	PMD_SESSION_LOG(INFO, "Close session %"PRIu64" successfully ",
+			session->session_id);
+
+	memset(sess, 0, sizeof(struct virtio_crypto_session));
+	rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+			return &xform->cipher;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+	do {
+		if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+			return &xform->auth;
+
+		xform = xform->next;
+	} while (xform);
+
+	return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+	if (xform == NULL)
+		return -1;
+
+	/* Cipher Only */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next == NULL)
+		return VIRTIO_CRYPTO_CMD_CIPHER;
+
+	/* Authentication Only */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next == NULL)
+		return VIRTIO_CRYPTO_CMD_AUTH;
+
+	/* Authenticate then Cipher */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+		return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+	/* Cipher then Authenticate */
+	if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+			xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+		return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+	return -1;
+}
+
+static int virtio_crypto_sym_pad_cipher_param(
+		struct virtio_crypto_cipher_session_para *para,
+		struct rte_crypto_cipher_xform *cipher_xform)
+{
+	switch (cipher_xform->algo) {
+	case RTE_CRYPTO_CIPHER_NULL:
+		para->algo = VIRTIO_CRYPTO_NO_CIPHER;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CBC:
+		para->algo = VIRTIO_CRYPTO_CIPHER_3DES_CBC;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_CTR:
+		para->algo = VIRTIO_CRYPTO_CIPHER_3DES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_3DES_ECB:
+		para->algo = VIRTIO_CRYPTO_CIPHER_3DES_ECB;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_CBC:
+		para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+		break;
+	case  RTE_CRYPTO_CIPHER_AES_CTR:
+		para->algo = VIRTIO_CRYPTO_CIPHER_AES_CTR;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_ECB:
+		para->algo = VIRTIO_CRYPTO_CIPHER_AES_ECB;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_F8:
+		para->algo = VIRTIO_CRYPTO_CIPHER_AES_F8;
+		break;
+	case RTE_CRYPTO_CIPHER_AES_XTS:
+		para->algo = VIRTIO_CRYPTO_CIPHER_AES_XTS;
+		break;
+	case RTE_CRYPTO_CIPHER_ARC4:
+		para->algo = VIRTIO_CRYPTO_CIPHER_ARC4;
+		break;
+	case RTE_CRYPTO_CIPHER_KASUMI_F8:
+		para->algo = VIRTIO_CRYPTO_CIPHER_KASUMI_F8;
+		break;
+	case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+		para->algo = VIRTIO_CRYPTO_CIPHER_SNOW3G_UEA2;
+		break;
+	case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+		para->algo = VIRTIO_CRYPTO_CIPHER_ZUC_EEA3;
+		break;
+	case RTE_CRYPTO_CIPHER_DES_CBC:
+		para->algo = VIRTIO_CRYPTO_CIPHER_DES_CBC;
+		break;
+	default:
+		PMD_SESSION_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
+				cipher_xform->algo);
+		return -1;
+	}
+
+	para->keylen = cipher_xform->key.length;
+	switch (cipher_xform->op) {
+	case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+		para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+		break;
+	case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+		para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+		break;
+	default:
+		PMD_SESSION_LOG(ERR, "Unsupported cipher operation parameter");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int virtio_crypto_sym_pad_auth_param(
+		struct virtio_crypto_op_ctrl_req *ctrl,
+		struct rte_crypto_auth_xform *auth_xform)
+{
+	uint32_t *algo;
+	struct virtio_crypto_alg_chain_session_para *para =
+		&(ctrl->u.sym_create_session.u.chain.para);
+
+	switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+	case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+		algo = &(para->u.hash_param.algo);
+		break;
+	case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+		algo = &(para->u.mac_param.algo);
+		break;
+	default:
+		PMD_SESSION_LOG(ERR, "Unsupported hash mode %u specified",
+			ctrl->u.sym_create_session.u.chain.para.hash_mode);
+		return -1;
+	}
+
+	switch (auth_xform->algo) {
+	case RTE_CRYPTO_AUTH_NULL:
+		*algo = VIRTIO_CRYPTO_NO_MAC;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+		*algo = VIRTIO_CRYPTO_MAC_CBCMAC_AES;
+		break;
+	case RTE_CRYPTO_AUTH_AES_CMAC:
+		*algo = VIRTIO_CRYPTO_MAC_CMAC_AES;
+		break;
+	case RTE_CRYPTO_AUTH_AES_GMAC:
+		*algo = VIRTIO_CRYPTO_MAC_GMAC_AES;
+		break;
+	case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+		*algo = VIRTIO_CRYPTO_MAC_XCBC_AES;
+		break;
+	case RTE_CRYPTO_AUTH_KASUMI_F9:
+		*algo = VIRTIO_CRYPTO_MAC_KASUMI_F9;
+		break;
+	case RTE_CRYPTO_AUTH_MD5:
+		*algo = VIRTIO_CRYPTO_HASH_MD5;
+		break;
+	case RTE_CRYPTO_AUTH_MD5_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_MD5;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1:
+		*algo = VIRTIO_CRYPTO_HASH_SHA1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA1_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224:
+		*algo = VIRTIO_CRYPTO_HASH_SHA_224;
+		break;
+	case RTE_CRYPTO_AUTH_SHA224_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_224;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256:
+		*algo = VIRTIO_CRYPTO_HASH_SHA_256;
+		break;
+	case RTE_CRYPTO_AUTH_SHA256_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_256;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384:
+		*algo = VIRTIO_CRYPTO_HASH_SHA_384;
+		break;
+	case RTE_CRYPTO_AUTH_SHA384_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_384;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512:
+		*algo = VIRTIO_CRYPTO_HASH_SHA_512;
+		break;
+	case RTE_CRYPTO_AUTH_SHA512_HMAC:
+		*algo = VIRTIO_CRYPTO_MAC_HMAC_SHA_512;
+		break;
+	case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+		*algo = VIRTIO_CRYPTO_MAC_SNOW3G_UIA2;
+		break;
+	default:
+		PMD_SESSION_LOG(ERR,
+			"Crypto: Undefined Hash algo %u specified",
+			auth_xform->algo);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int virtio_crypto_sym_pad_op_ctrl_req(
+		struct virtio_crypto_op_ctrl_req *ctrl,
+		struct rte_crypto_sym_xform *xform, bool is_chainned,
+		uint8_t **cipher_key_data, uint8_t **auth_key_data,
+		struct virtio_crypto_session *session)
+{
+	int ret;
+	struct rte_crypto_auth_xform *auth_xform = NULL;
+	struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+	/* Get cipher xform from crypto xform chain */
+	cipher_xform = virtio_crypto_get_cipher_xform(xform);
+	if (cipher_xform) {
+		if (is_chainned)
+			ret = virtio_crypto_sym_pad_cipher_param(
+				&ctrl->u.sym_create_session.u.chain.para
+						.cipher_param, cipher_xform);
+		else
+			ret = virtio_crypto_sym_pad_cipher_param(
+				&ctrl->u.sym_create_session.u.cipher.para,
+				cipher_xform);
+
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR,
+				"pad cipher parameter failed");
+			return -1;
+		}
+
+		*cipher_key_data = cipher_xform->key.data;
+
+		session->iv.offset = cipher_xform->iv.offset;
+		session->iv.length = cipher_xform->iv.length;
+	}
+
+	/* Get auth xform from crypto xform chain */
+	auth_xform = virtio_crypto_get_auth_xform(xform);
+	if (auth_xform) {
+		/* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+		struct virtio_crypto_alg_chain_session_para *para =
+			&(ctrl->u.sym_create_session.u.chain.para);
+		if (auth_xform->key.length) {
+			para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+			para->u.mac_param.auth_key_len =
+				(uint32_t)auth_xform->key.length;
+			para->u.mac_param.hash_result_len =
+				auth_xform->digest_length;
+
+			*auth_key_data = auth_xform->key.data;
+		} else {
+			para->hash_mode	= VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+			para->u.hash_param.hash_result_len =
+				auth_xform->digest_length;
+		}
+
+		ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR, "pad auth parameter failed");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int virtio_crypto_check_sym_configure_session_paras(
+		struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sym_sess,
+		struct rte_mempool *mempool)
+{
+	if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+		unlikely(mempool == NULL)) {
+		PMD_SESSION_LOG(ERR, "NULL pointer");
+		return -1;
+	}
+
+	if (virtio_crypto_check_sym_session_paras(dev) < 0)
+		return -1;
+
+	return 0;
+}
+
+static int virtio_crypto_sym_configure_session(
+		struct rte_cryptodev *dev,
+		struct rte_crypto_sym_xform *xform,
+		struct rte_cryptodev_sym_session *sess,
+		struct rte_mempool *mempool)
+{
+	int ret;
+	struct virtio_crypto_session crypto_sess;
+	void *session_private = &crypto_sess;
+	struct virtio_crypto_session *session;
+	struct virtio_crypto_op_ctrl_req *ctrl_req;
+	enum virtio_crypto_cmd_id cmd_id;
+	uint8_t *cipher_key_data = NULL;
+	uint8_t *auth_key_data = NULL;
+	struct virtio_crypto_hw *hw;
+	struct virtqueue *control_vq;
+
+	PMD_INIT_FUNC_TRACE();
+
+	ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+			sess, mempool);
+	if (ret < 0) {
+		PMD_SESSION_LOG(ERR, "Invalid parameters");
+		return ret;
+	}
+
+	if (rte_mempool_get(mempool, &session_private)) {
+		PMD_SESSION_LOG(ERR,
+			"Couldn't get object from session mempool");
+		return -ENOMEM;
+	}
+
+	session = (struct virtio_crypto_session *)session_private;
+	memset(session, 0, sizeof(struct virtio_crypto_session));
+	ctrl_req = &session->ctrl;
+	ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+	/* FIXME: support multiqueue */
+	ctrl_req->header.queue_id = 0;
+
+	hw = dev->data->dev_private;
+	control_vq = hw->cvq;
+
+	cmd_id = virtio_crypto_get_chain_order(xform);
+	if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+		ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+			= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+	if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+		ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+			= VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+	switch (cmd_id) {
+	case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+	case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+		ctrl_req->u.sym_create_session.op_type
+			= VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+		ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+			xform, true, &cipher_key_data, &auth_key_data, session);
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR,
+				"padding sym op ctrl req failed");
+			goto error_out;
+		}
+		ret = virtio_crypto_send_command(control_vq, ctrl_req,
+			cipher_key_data, auth_key_data, session);
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR,
+				"create session failed: %d", ret);
+			goto error_out;
+		}
+		break;
+	case VIRTIO_CRYPTO_CMD_CIPHER:
+		ctrl_req->u.sym_create_session.op_type
+			= VIRTIO_CRYPTO_SYM_OP_CIPHER;
+		ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+			false, &cipher_key_data, &auth_key_data, session);
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR,
+				"padding sym op ctrl req failed");
+			goto error_out;
+		}
+		ret = virtio_crypto_send_command(control_vq, ctrl_req,
+			cipher_key_data, NULL, session);
+		if (ret < 0) {
+			PMD_SESSION_LOG(ERR,
+				"create session failed: %d", ret);
+			goto error_out;
+		}
+		break;
+	default:
+		PMD_SESSION_LOG(ERR,
+			"Unsupported operation chain order parameter");
+		goto error_out;
+	}
+
+	set_session_private_data(sess, dev->driver_id,
+		session_private);
+
+	return 0;
+
+error_out:
+	return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *info)
+{
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	if (info != NULL) {
+		info->driver_id = cryptodev_virtio_driver_id;
+		info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+		info->feature_flags = dev->feature_flags;
+		info->max_nb_queue_pairs = hw->max_dataqueues;
+		info->sym.max_nb_sessions =
+			RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
+	}
+}
+
+static int crypto_virtio_pci_probe(
+	struct rte_pci_driver *pci_drv __rte_unused,
+	struct rte_pci_device *pci_dev)
+{
+	struct rte_cryptodev_pmd_init_params init_params = {
+		.name = "",
+		.socket_id = rte_socket_id(),
+		.private_data_size = sizeof(struct virtio_crypto_hw),
+		.max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
+	};
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	PMD_DRV_LOG(DEBUG, "Found Crypto device at %02x:%02x.%x",
+			pci_dev->addr.bus,
+			pci_dev->addr.devid,
+			pci_dev->addr.function);
+
+	rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+	return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int crypto_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+	struct rte_cryptodev *cryptodev;
+	char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	if (pci_dev == NULL)
+		return -EINVAL;
+
+	rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+			sizeof(cryptodev_name));
+
+	cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+	if (cryptodev == NULL)
+		return -ENODEV;
+
+	return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+	.id_table = pci_id_virtio_crypto_map,
+	.drv_flags = 0,
+	.probe = crypto_virtio_pci_probe,
+	.remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, rte_virtio_crypto_driver,
+		cryptodev_virtio_driver_id);
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 0000000..875400c
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,66 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include <linux/virtio_crypto.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+#ifndef PAGE_SIZE
+#define PAGE_SIZE 4096
+#endif
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+	VIRTIO_CRYPTO_CMD_CIPHER = 0,
+	VIRTIO_CRYPTO_CMD_AUTH = 1,
+	VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+	VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+	struct virtio_crypto_op_data_req data_req;
+	struct virtio_crypto_inhdr inhdr;
+	struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+		int queue_type,
+		uint16_t vtpci_queue_idx,
+		uint16_t nb_desc,
+		int socket_id,
+		struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+		struct rte_crypto_op **tx_pkts,
+		uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+		struct rte_crypto_op **tx_pkts,
+		uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 0000000..b7d8066
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,533 @@ 
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <rte_cycles.h>
+#include <rte_memory.h>
+#include <rte_memcpy.h>
+#include <rte_memzone.h>
+#include <rte_branch_prediction.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_cryptodev.h>
+#include <rte_prefetch.h>
+#include <rte_string_fns.h>
+#include <rte_errno.h>
+#include <rte_byteorder.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+#ifdef RTE_LIBRTE_PMD_VIRTIO_CRYPTO_DEBUG_DUMP
+#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
+#else
+#define  VIRTIO_DUMP_PACKET(m, len) do { } while (0)
+#endif
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+	struct vring_desc *dp, *dp_tail;
+	struct vq_desc_extra *dxp;
+	uint16_t desc_idx_last = desc_idx;
+
+	dp = &vq->vq_ring.desc[desc_idx];
+	dxp = &vq->vq_descx[desc_idx];
+	vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+	if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+		while (dp->flags & VRING_DESC_F_NEXT) {
+			desc_idx_last = dp->next;
+			dp = &vq->vq_ring.desc[dp->next];
+		}
+	}
+	dxp->ndescs = 0;
+
+	/*
+	 * We must append the existing free chain, if any, to the end of
+	 * newly freed chain. If the virtqueue was completely used, then
+	 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+	 */
+	if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+		vq->vq_desc_head_idx = desc_idx;
+	} else {
+		dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+		dp_tail->next = desc_idx;
+	}
+
+	vq->vq_desc_tail_idx = desc_idx_last;
+	dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+		struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+	struct vring_used_elem *uep;
+	struct rte_crypto_op *cop;
+	uint16_t used_idx, desc_idx;
+	uint16_t i;
+	struct virtio_crypto_inhdr *inhdr;
+	struct virtio_crypto_op_cookie *op_cookie;
+
+	/* Caller does the check */
+	for (i = 0; i < num ; i++) {
+		used_idx = (uint16_t)(vq->vq_used_cons_idx
+				& (vq->vq_nentries - 1));
+		uep = &vq->vq_ring.used->ring[used_idx];
+		desc_idx = (uint16_t)uep->id;
+		cop = (struct rte_crypto_op *)
+				vq->vq_descx[desc_idx].crypto_op;
+		op_cookie = (struct virtio_crypto_op_cookie *)
+						vq->vq_descx[desc_idx].cookie;
+		inhdr = &(op_cookie->inhdr);
+		switch (inhdr->status) {
+		case VIRTIO_CRYPTO_OK:
+			cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+			break;
+		case VIRTIO_CRYPTO_ERR:
+			cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+			vq->packets_received_failed++;
+			break;
+		case VIRTIO_CRYPTO_BADMSG:
+			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			vq->packets_received_failed++;
+			break;
+		case VIRTIO_CRYPTO_NOTSUPP:
+			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+			vq->packets_received_failed++;
+			break;
+		case VIRTIO_CRYPTO_INVSESS:
+			cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+			vq->packets_received_failed++;
+			break;
+		default:
+			break;
+		}
+
+		vq->packets_received_total++;
+
+		rx_pkts[i] = cop;
+		rte_mempool_put(vq->mpool, op_cookie);
+
+		vq->vq_used_cons_idx++;
+		vq_ring_free_chain(vq, desc_idx);
+		vq->vq_descx[desc_idx].crypto_op = NULL;
+	}
+
+	return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+		struct rte_crypto_op *cop,
+		struct virtio_crypto_op_data_req *data,
+		struct virtio_crypto_session *session)
+{
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct virtio_crypto_op_data_req *req_data = data;
+	struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+	struct virtio_crypto_sym_create_session_req *sym_sess_req =
+		&ctrl->u.sym_create_session;
+	struct virtio_crypto_alg_chain_session_para *chain_para =
+		&sym_sess_req->u.chain.para;
+	struct virtio_crypto_cipher_session_para *cipher_para;
+
+	req_data->header.session_id = session->session_id;
+
+	switch (sym_sess_req->op_type) {
+	case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+		req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+		cipher_para = &sym_sess_req->u.cipher.para;
+		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+		else
+			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+		req_data->u.sym_req.u.cipher.para.iv_len
+			= session->iv.length;
+
+		req_data->u.sym_req.u.cipher.para.src_data_len =
+			(sym_op->cipher.data.length +
+				sym_op->cipher.data.offset);
+		req_data->u.sym_req.u.cipher.para.dst_data_len =
+			req_data->u.sym_req.u.cipher.para.src_data_len;
+		break;
+	case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+		req_data->u.sym_req.op_type =
+			VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+		cipher_para = &chain_para->cipher_param;
+		if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+		else
+			req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+		req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+		req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+		req_data->u.sym_req.u.chain.para.src_data_len =
+			(sym_op->cipher.data.length +
+				sym_op->cipher.data.offset);
+		req_data->u.sym_req.u.chain.para.dst_data_len =
+			req_data->u.sym_req.u.chain.para.src_data_len;
+		req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+			sym_op->cipher.data.offset;
+		req_data->u.sym_req.u.chain.para.len_to_cipher =
+			sym_op->cipher.data.length;
+		req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+			sym_op->auth.data.offset;
+		req_data->u.sym_req.u.chain.para.len_to_hash =
+			sym_op->auth.data.length;
+		req_data->u.sym_req.u.chain.para.aad_len =
+			chain_para->aad_len;
+
+		if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+			req_data->u.sym_req.u.chain.para.hash_result_len =
+				chain_para->u.hash_param.hash_result_len;
+		if (chain_para->hash_mode ==
+			VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+			req_data->u.sym_req.u.chain.para.hash_result_len =
+				chain_para->u.mac_param.hash_result_len;
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+		struct virtqueue *txvq,
+		struct rte_crypto_op *cop)
+{
+	uint16_t idx = 0;
+	uint16_t num_entry;
+	uint16_t needed = 1;
+	uint16_t head_idx;
+	struct vq_desc_extra *dxp;
+	struct vring_desc *start_dp;
+	struct vring_desc *desc;
+	uint64_t indirect_op_data_req_phys_addr;
+	uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+	uint32_t indirect_vring_addr_offset = req_data_len +
+		sizeof(struct virtio_crypto_inhdr);
+	struct rte_crypto_sym_op *sym_op = cop->sym;
+	struct virtio_crypto_session *session =
+		(struct virtio_crypto_session *)get_session_private_data(
+		cop->sym->session, cryptodev_virtio_driver_id);
+	struct virtio_crypto_op_data_req *op_data_req;
+	uint32_t hash_result_len = 0;
+	struct virtio_crypto_op_cookie *crypto_op_cookie;
+	struct virtio_crypto_alg_chain_session_para *para;
+
+	if (unlikely(sym_op->m_src->nb_segs != 1))
+		return -EMSGSIZE;
+	if (unlikely(txvq->vq_free_cnt == 0))
+		return -ENOSPC;
+	if (unlikely(txvq->vq_free_cnt < needed))
+		return -EMSGSIZE;
+	head_idx = txvq->vq_desc_head_idx;
+	if (unlikely(head_idx >= txvq->vq_nentries))
+		return -EFAULT;
+	if (unlikely(session == NULL))
+		return -EFAULT;
+
+	dxp = &txvq->vq_descx[head_idx];
+
+	if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+		PMD_TX_LOG(ERR, "can not get cookie");
+		return -EFAULT;
+	}
+	crypto_op_cookie = dxp->cookie;
+	indirect_op_data_req_phys_addr =
+		rte_mempool_virt2iova(crypto_op_cookie);
+	op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+	if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+		return -EFAULT;
+
+	/* status is initialized to VIRTIO_CRYPTO_ERR */
+	((struct virtio_crypto_inhdr *)
+		((uint8_t *)op_data_req + req_data_len))->status =
+		VIRTIO_CRYPTO_ERR;
+
+	/* point to indirect vring entry */
+	desc = (struct vring_desc *)
+		((uint8_t *)op_data_req + indirect_vring_addr_offset);
+	for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+		desc[idx].next = idx + 1;
+	desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+	idx = 0;
+
+	/* indirect vring: first part, virtio_crypto_op_data_req */
+	desc[idx].addr = indirect_op_data_req_phys_addr;
+	desc[idx].len = req_data_len;
+	desc[idx++].flags = VRING_DESC_F_NEXT;
+
+	/* indirect vring: iv of cipher */
+	if (session->iv.length) {
+		desc[idx].addr = cop->phys_addr + session->iv.offset;
+		desc[idx].len = session->iv.length;
+		desc[idx++].flags = VRING_DESC_F_NEXT;
+	}
+
+	/* indirect vring: additional auth data */
+	if (session->aad.length) {
+		desc[idx].addr = session->aad.phys_addr;
+		desc[idx].len = session->aad.length;
+		desc[idx++].flags = VRING_DESC_F_NEXT;
+	}
+
+	/* indirect vring: src data */
+	desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+	desc[idx].len = (sym_op->cipher.data.offset
+		+ sym_op->cipher.data.length);
+	desc[idx++].flags = VRING_DESC_F_NEXT;
+
+	/* indirect vring: dst data */
+	if (sym_op->m_dst) {
+		desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+		desc[idx].len = (sym_op->cipher.data.offset
+			+ sym_op->cipher.data.length);
+	} else {
+		desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+		desc[idx].len = (sym_op->cipher.data.offset
+			+ sym_op->cipher.data.length);
+	}
+	desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+	/* indirect vring: digest result */
+	para = &(session->ctrl.u.sym_create_session.u.chain.para);
+	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+		hash_result_len = para->u.hash_param.hash_result_len;
+	if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+		hash_result_len = para->u.mac_param.hash_result_len;
+	if (hash_result_len > 0) {
+		desc[idx].addr = sym_op->auth.digest.phys_addr;
+		desc[idx].len = hash_result_len;
+		desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+	}
+
+	/* indirect vring: last part, status returned */
+	desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+	desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+	desc[idx++].flags = VRING_DESC_F_WRITE;
+
+	num_entry = idx;
+
+	/* save the infos to use when receiving packets */
+	dxp->crypto_op = (void *)cop;
+	dxp->ndescs = needed;
+
+	/* use a single buffer */
+	start_dp = txvq->vq_ring.desc;
+	start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+		indirect_vring_addr_offset;
+	start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+	start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+	idx = start_dp[head_idx].next;
+	txvq->vq_desc_head_idx = idx;
+	if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+		txvq->vq_desc_tail_idx = idx;
+	txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+	vq_update_avail_ring(txvq, head_idx);
+
+	return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+		struct rte_crypto_op *cop)
+{
+	int ret;
+
+	switch (cop->type) {
+	case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+		ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+		break;
+	default:
+		PMD_TX_LOG(ERR, "invalid crypto op type %u", cop->type);
+		ret = -EFAULT;
+		break;
+	}
+
+	return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+	struct virtio_crypto_hw *hw = vq->hw;
+	int i, size = vq->vq_nentries;
+	struct vring *vr = &vq->vq_ring;
+	uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+	PMD_INIT_FUNC_TRACE();
+
+	vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+	vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+	vq->vq_free_cnt = vq->vq_nentries;
+
+	/* Chain all the descriptors in the ring with an END */
+	for (i = 0; i < size - 1; i++)
+		vr->desc[i].next = (uint16_t)(i + 1);
+	vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+	/*
+	 * Disable device(host) interrupting guest
+	 */
+	virtqueue_disable_intr(vq);
+
+	/*
+	 * Set guest physical address of the virtqueue
+	 * in VIRTIO_PCI_QUEUE_PFN config register of device
+	 * to share with the backend
+	 */
+	if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+		PMD_INIT_LOG(ERR, "setup_queue failed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	if (hw->cvq) {
+		virtio_crypto_vring_start(hw->cvq);
+		VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+	}
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+	/*
+	 * Start data vrings
+	 * -	Setup vring structure for data queues
+	 */
+	uint16_t i;
+	struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+	PMD_INIT_FUNC_TRACE();
+
+	/* Start data vring. */
+	for (i = 0; i < hw->max_dataqueues; i++) {
+		virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+		VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+	}
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+		uint16_t nb_pkts)
+{
+	struct virtqueue *txvq = tx_queue;
+	uint16_t nb_used, num, nb_rx;
+
+	nb_used = VIRTQUEUE_NUSED(txvq);
+
+	virtio_rmb();
+
+	num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+	num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+		? num : VIRTIO_MBUF_BURST_SZ);
+
+	if (num == 0)
+		return 0;
+
+	nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+	PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
+
+	return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+		uint16_t nb_pkts)
+{
+	struct virtqueue *txvq;
+	uint16_t nb_tx;
+	int error;
+
+	if (unlikely(nb_pkts < 1))
+		return nb_pkts;
+	if (unlikely(tx_queue == NULL)) {
+		PMD_TX_LOG(ERR, "tx_queue is NULL");
+		return 0;
+	}
+	txvq = tx_queue;
+
+	PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
+
+	for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+		struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+		/* nb_segs is always 1 at virtio crypto situation */
+		int need = txm->nb_segs - txvq->vq_free_cnt;
+
+		/*
+		 * Positive value indicates it hasn't enough space in vring
+		 * descriptors
+		 */
+		if (unlikely(need > 0)) {
+			/*
+			 * try it again because the receive process may be
+			 * free some space
+			 */
+			need = txm->nb_segs - txvq->vq_free_cnt;
+			if (unlikely(need > 0)) {
+				PMD_TX_LOG(ERR, "No free tx descriptors "
+						"to transmit");
+				break;
+			}
+		}
+
+		txvq->packets_sent_total++;
+
+		/* Enqueue Packet buffers */
+		error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+		if (unlikely(error)) {
+			if (error == ENOSPC)
+				PMD_TX_LOG(ERR,
+					"virtqueue_enqueue Free count = 0");
+			else if (error == EMSGSIZE)
+				PMD_TX_LOG(ERR,
+					"virtqueue_enqueue Free count < 1");
+			else
+				PMD_TX_LOG(ERR,
+					"virtqueue_enqueue error: %d", error);
+			txvq->packets_sent_failed++;
+			break;
+		}
+	}
+
+	if (likely(nb_tx)) {
+		vq_update_avail_idx(txvq);
+
+		if (unlikely(virtqueue_kick_prepare(txvq))) {
+			virtqueue_notify(txvq);
+			PMD_TX_LOG(DEBUG, "Notified backend after xmit");
+		}
+	}
+
+	return nb_tx;
+}