[dpdk-dev] [PATCH v3] Scheduler: add driver for scheduler crypto pmd

Fan Zhang roy.fan.zhang at intel.com
Tue Jan 3 18:16:40 CET 2017


This patch provides the initial implementation of the scheduler poll mode
driver using DPDK cryptodev framework.

Scheduler PMD is used to schedule and enqueue the crypto ops to the
hardware and/or software crypto devices attached to it (slaves). The
dequeue operation from the slave(s), and the possible dequeued crypto op
reordering, are then carried out by the scheduler.

As the initial version, the scheduler PMD currently supports only the
Round-robin mode, which distributes the enqueued burst of crypto ops
among its slaves in a round-robin manner. This mode may help to fill
the throughput gap between the physical core and the existing cryptodevs
to increase the overall performance. Moreover, the scheduler PMD is
provided the APIs for user to create his/her own scheduler.

Build instructions:
To build DPDK with CRYTPO_SCHEDULER_PMD the user is required to set
CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=y in config/common_base

Notice:
- Scheduler PMD shares same EAL commandline options as other cryptodevs.
  However, apart from socket_id, the rest of cryptodev options are
  ignored. The scheduler PMD's max_nb_queue_pairs and max_nb_sessions
  options are set as the minimum values of the attached slaves'. For
  example, a scheduler cryptodev is attached 2 cryptodevs with
  max_nb_queue_pairs of 2 and 8, respectively. The scheduler cryptodev's
  max_nb_queue_pairs will be automatically updated as 2.

- The scheduler cryptodev cannot be started unless the scheduling mode
  is set and at least one slave is attached. Also, to configure the
  scheduler in the run-time, like attach/detach slave(s), change
  scheduling mode, or enable/disable crypto op ordering, one should stop
  the scheduler first, otherwise an error will be returned.

Changes in v3:
Fixed config/common_base.

Changes in v2:
New approaches in API to suit future scheduling modes.

Signed-off-by: Fan Zhang <roy.fan.zhang at intel.com>
Signed-off-by: Declan Doherty <declan.doherty at intel.com>
---
 config/common_base                                 |   6 +
 drivers/crypto/Makefile                            |   1 +
 drivers/crypto/scheduler/Makefile                  |  67 +++
 drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 598 +++++++++++++++++++++
 drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 183 +++++++
 .../scheduler/rte_cryptodev_scheduler_ioctls.h     |  92 ++++
 .../scheduler/rte_cryptodev_scheduler_operations.h |  71 +++
 .../scheduler/rte_pmd_crypto_scheduler_version.map |  12 +
 drivers/crypto/scheduler/scheduler_pmd.c           | 168 ++++++
 drivers/crypto/scheduler/scheduler_pmd_ops.c       | 495 +++++++++++++++++
 drivers/crypto/scheduler/scheduler_pmd_private.h   | 122 +++++
 drivers/crypto/scheduler/scheduler_roundrobin.c    | 419 +++++++++++++++
 lib/librte_cryptodev/rte_cryptodev.h               |   4 +
 mk/rte.app.mk                                      |   3 +-
 14 files changed, 2240 insertions(+), 1 deletion(-)
 create mode 100644 drivers/crypto/scheduler/Makefile
 create mode 100644 drivers/crypto/scheduler/rte_cryptodev_scheduler.c
 create mode 100644 drivers/crypto/scheduler/rte_cryptodev_scheduler.h
 create mode 100644 drivers/crypto/scheduler/rte_cryptodev_scheduler_ioctls.h
 create mode 100644 drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
 create mode 100644 drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
 create mode 100644 drivers/crypto/scheduler/scheduler_pmd.c
 create mode 100644 drivers/crypto/scheduler/scheduler_pmd_ops.c
 create mode 100644 drivers/crypto/scheduler/scheduler_pmd_private.h
 create mode 100644 drivers/crypto/scheduler/scheduler_roundrobin.c

diff --git a/config/common_base b/config/common_base
index 4bff83a..79d120d 100644
--- a/config/common_base
+++ b/config/common_base
@@ -400,6 +400,12 @@ CONFIG_RTE_LIBRTE_PMD_KASUMI=n
 CONFIG_RTE_LIBRTE_PMD_KASUMI_DEBUG=n
 
 #
+# Compile PMD for Crypto Scheduler device
+#
+CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER=n
+CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
+
+#
 # Compile PMD for ZUC device
 #
 CONFIG_RTE_LIBRTE_PMD_ZUC=n
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 745c614..cdd3c94 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -38,6 +38,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
 DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
 
 include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
new file mode 100644
index 0000000..976a565
--- /dev/null
+++ b/drivers/crypto/scheduler/Makefile
@@ -0,0 +1,67 @@
+#   BSD LICENSE
+#
+#   Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of Intel Corporation nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_crypto_scheduler.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_crypto_scheduler_version.map
+
+#
+# Export include files
+#
+SYMLINK-y-include += rte_cryptodev_scheduler_ioctls.h
+SYMLINK-y-include += rte_cryptodev_scheduler_operations.h
+SYMLINK-y-include += rte_cryptodev_scheduler.h
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_reorder
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
new file mode 100644
index 0000000..d2d068c
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -0,0 +1,598 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_jhash.h>
+#include <rte_reorder.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_scheduler.h>
+#include <rte_malloc.h>
+
+#include "scheduler_pmd_private.h"
+
+/** update the scheduler pmd's capability with attaching device's
+ *  capability.
+ *  For each device to be attached, the scheduler's capability should be
+ *  the common capability set of all slaves
+ **/
+static uint32_t
+sync_caps(struct rte_cryptodev_capabilities *caps,
+		uint32_t nb_caps,
+		const struct rte_cryptodev_capabilities *slave_caps)
+{
+	uint32_t sync_nb_caps = nb_caps, nb_slave_caps = 0;
+	uint32_t i;
+
+	while (slave_caps[nb_slave_caps].op != RTE_CRYPTO_OP_TYPE_UNDEFINED)
+		nb_slave_caps++;
+
+	if (nb_caps == 0) {
+		rte_memcpy(caps, slave_caps, sizeof(*caps) * nb_slave_caps);
+		return nb_slave_caps;
+	}
+
+	for (i = 0; i < sync_nb_caps; i++) {
+		struct rte_cryptodev_capabilities *cap = &caps[i];
+		uint32_t j;
+
+		for (j = 0; j < nb_slave_caps; j++) {
+			const struct rte_cryptodev_capabilities *s_cap =
+					&slave_caps[i];
+
+			if (s_cap->op != cap->op || s_cap->sym.xform_type !=
+					cap->sym.xform_type)
+				continue;
+
+			if (s_cap->sym.xform_type ==
+					RTE_CRYPTO_SYM_XFORM_AUTH) {
+				if (s_cap->sym.auth.algo !=
+						cap->sym.auth.algo)
+					continue;
+
+				cap->sym.auth.digest_size.min =
+					s_cap->sym.auth.digest_size.min <
+					cap->sym.auth.digest_size.min ?
+					s_cap->sym.auth.digest_size.min :
+					cap->sym.auth.digest_size.min;
+				cap->sym.auth.digest_size.max =
+					s_cap->sym.auth.digest_size.max <
+					cap->sym.auth.digest_size.max ?
+					s_cap->sym.auth.digest_size.max :
+					cap->sym.auth.digest_size.max;
+
+			}
+
+			if (s_cap->sym.xform_type ==
+					RTE_CRYPTO_SYM_XFORM_CIPHER)
+				if (s_cap->sym.cipher.algo !=
+						cap->sym.cipher.algo)
+					continue;
+
+			/* no common cap found */
+			break;
+		}
+
+		if (j < nb_slave_caps)
+			continue;
+
+		/* remove a uncommon cap from the array */
+		for (j = i; j < sync_nb_caps - 1; j++)
+			rte_memcpy(&caps[j], &caps[j+1], sizeof(*cap));
+
+		memset(&caps[sync_nb_caps - 1], 0, sizeof(*cap));
+		sync_nb_caps--;
+	}
+
+	return sync_nb_caps;
+}
+
+static int
+update_scheduler_capability(struct scheduler_ctx *sched_ctx)
+{
+	struct rte_cryptodev_capabilities tmp_caps[256] = {0};
+	uint32_t nb_caps = 0, i;
+
+	if (sched_ctx->capabilities)
+		rte_free(sched_ctx->capabilities);
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		struct rte_cryptodev_info dev_info;
+
+		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+		nb_caps = sync_caps(tmp_caps, nb_caps, dev_info.capabilities);
+		if (nb_caps == 0)
+			return -1;
+	}
+
+	sched_ctx->capabilities = rte_zmalloc_socket(NULL,
+			sizeof(struct rte_cryptodev_capabilities) *
+			(nb_caps + 1), 0, SOCKET_ID_ANY);
+	if (!sched_ctx->capabilities)
+		return -ENOMEM;
+
+	rte_memcpy(sched_ctx->capabilities, tmp_caps,
+			sizeof(struct rte_cryptodev_capabilities) * nb_caps);
+
+	return 0;
+}
+
+static void
+update_scheduler_feature_flag(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	uint32_t i;
+
+	dev->feature_flags = 0;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		struct rte_cryptodev_info dev_info;
+
+		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+
+		dev->feature_flags |= dev_info.feature_flags;
+	}
+}
+
+static void
+update_max_nb_qp(struct scheduler_ctx *sched_ctx)
+{
+	uint32_t i;
+	uint32_t max_nb_qp;
+
+	if (!sched_ctx->nb_slaves)
+		return;
+
+	max_nb_qp = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		struct rte_cryptodev_info dev_info;
+
+		rte_cryptodev_info_get(sched_ctx->slaves[i].dev_id, &dev_info);
+		max_nb_qp = dev_info.max_nb_queue_pairs < max_nb_qp ?
+				dev_info.max_nb_queue_pairs : max_nb_qp;
+	}
+
+	sched_ctx->max_nb_queue_pairs = max_nb_qp;
+}
+
+/** Attach a device to the scheduler. */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+	struct scheduler_slave *slave;
+	struct rte_cryptodev_info dev_info;
+	uint32_t i;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->data->dev_started) {
+		CS_LOG_ERR("Illegal operation");
+		return -EBUSY;
+	}
+
+	sched_ctx = dev->data->dev_private;
+	if (sched_ctx->nb_slaves >= MAX_SLAVES_NUM) {
+		CS_LOG_ERR("Too many slaves attached");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++)
+		if (sched_ctx->slaves[i].dev_id == slave_id) {
+			CS_LOG_ERR("Slave already added");
+			return -ENOTSUP;
+		}
+
+	slave = &sched_ctx->slaves[sched_ctx->nb_slaves];
+
+	rte_cryptodev_info_get(slave_id, &dev_info);
+
+	slave->dev_id = slave_id;
+	slave->dev_type = dev_info.dev_type;
+	sched_ctx->nb_slaves++;
+
+	if (update_scheduler_capability(sched_ctx) < 0) {
+		slave->dev_id = 0;
+		slave->dev_type = 0;
+		sched_ctx->nb_slaves--;
+
+		CS_LOG_ERR("capabilities update failed");
+		return -ENOTSUP;
+	}
+
+	update_scheduler_feature_flag(dev);
+
+	update_max_nb_qp(sched_ctx);
+
+	return 0;
+}
+
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+	uint32_t i, slave_pos;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->data->dev_started) {
+		CS_LOG_ERR("Illegal operation");
+		return -EBUSY;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	for (slave_pos = 0; slave_pos < sched_ctx->nb_slaves; slave_pos++)
+		if (sched_ctx->slaves[slave_pos].dev_id == slave_id)
+			break;
+	if (slave_pos == sched_ctx->nb_slaves) {
+		CS_LOG_ERR("Cannot find slave");
+		return -ENOTSUP;
+	}
+
+	if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) {
+		CS_LOG_ERR("Failed to detach slave");
+		return -ENOTSUP;
+	}
+
+	for (i = slave_pos; i < sched_ctx->nb_slaves - 1; i++) {
+		memcpy(&sched_ctx->slaves[i], &sched_ctx->slaves[i+1],
+				sizeof(struct scheduler_slave));
+	}
+	memset(&sched_ctx->slaves[sched_ctx->nb_slaves - 1], 0,
+			sizeof(struct scheduler_slave));
+	sched_ctx->nb_slaves--;
+
+	if (update_scheduler_capability(sched_ctx) < 0) {
+		CS_LOG_ERR("capabilities update failed");
+		return -ENOTSUP;
+	}
+
+	update_scheduler_feature_flag(dev);
+
+	update_max_nb_qp(sched_ctx);
+
+	return 0;
+}
+
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+		enum rte_cryptodev_scheduler_mode mode)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+	int ret;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->data->dev_started) {
+		CS_LOG_ERR("Illegal operation");
+		return -EBUSY;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	if (mode == sched_ctx->mode && mode != CDEV_SCHED_MODE_USERDEFINED)
+		return 0;
+
+	switch (mode) {
+	case CDEV_SCHED_MODE_ROUNDROBIN:
+		if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+				roundrobin_scheduler) < 0) {
+			CS_LOG_ERR("Failed to load scheduler");
+			return -1;
+		}
+		break;
+	case CDEV_SCHED_MODE_MIGRATION:
+	case CDEV_SCHED_MODE_FALLBACK:
+	default:
+		CS_LOG_ERR("Not yet supported");
+		return -ENOTSUP;
+	}
+
+	if (sched_ctx->private_ctx)
+		rte_free(sched_ctx->private_ctx);
+
+	ret = (*sched_ctx->ops.create_private_ctx)(dev);
+	if (ret < 0) {
+		CS_LOG_ERR("Unable to create scheduler private context");
+		return ret;
+	}
+
+	sched_ctx->mode = mode;
+
+	return 0;
+}
+
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	return sched_ctx->mode;
+}
+
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+		uint32_t enable_reorder)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->data->dev_started) {
+		CS_LOG_ERR("Illegal operation");
+		return -EBUSY;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	sched_ctx->reordering_enabled = enable_reorder;
+
+	return 0;
+}
+
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	return (int)sched_ctx->reordering_enabled;
+}
+
+int
+rte_cryptodev_scheduler_ioctl(uint8_t scheduler_id, uint16_t ioctl_id,
+		void *ioctl_param) {
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	if (ioctl_id >= sched_ctx->ioctl_count) {
+		CS_LOG_ERR("Invalid IOCTL ID");
+		return -EINVAL;
+	}
+
+	return (*(sched_ctx->ioctls[ioctl_id]->ioctl))(ioctl_param);
+}
+
+int
+rte_cryptodev_scheduler_ioctl_count(uint8_t scheduler_id) {
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	return sched_ctx->ioctl_count;
+}
+
+int
+rte_cryptodev_scheduler_ioctl_list(uint8_t scheduler_id,
+		struct rte_cryptodev_scheduler_ioctl_description **ioctls_desc,
+		uint16_t nb_ioctls)
+{
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx;
+	uint32_t i;
+
+	if (!dev) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+		CS_LOG_ERR("Operation not supported");
+		return -ENOTSUP;
+	}
+
+	sched_ctx = dev->data->dev_private;
+
+	if (nb_ioctls > sched_ctx->ioctl_count) {
+		CS_LOG_ERR("Invalid IOCTL number");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < nb_ioctls; i++) {
+		ioctls_desc[i]->request_id = sched_ctx->ioctls[i]->id;
+		ioctls_desc[i]->name = sched_ctx->ioctls[i]->name;
+		ioctls_desc[i]->description = sched_ctx->ioctls[i]->description;
+	}
+
+	return 0;
+}
+
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+		struct rte_cryptodev_scheduler *scheduler) {
+
+	struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	uint32_t i;
+	size_t size;
+
+	/* check device stopped */
+	if (dev->data->dev_started) {
+		CS_LOG_ERR("Device should be stopped before loading scheduler");
+		return -EBUSY;
+	}
+
+	strncpy(sched_ctx->name, scheduler->name,
+			RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+	strncpy(sched_ctx->description, scheduler->description,
+			RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+
+	/* load scheduler instance ioctls */
+	if (sched_ctx->ioctls)
+		rte_free(sched_ctx->ioctls);
+	if (scheduler->nb_ioctls) {
+		size = sizeof(struct rte_cryptodev_scheduler_ioctl) *
+				scheduler->nb_ioctls;
+		sched_ctx->ioctls = rte_zmalloc_socket(NULL, size, 0,
+				SOCKET_ID_ANY);
+		if (!sched_ctx->ioctls) {
+			CS_LOG_ERR("Failed to allocate memory");
+			return -ENOMEM;
+		}
+	}
+
+
+	for (i = 0; i < scheduler->nb_ioctls; i++) {
+		struct rte_cryptodev_scheduler_ioctl *ioctl =
+				sched_ctx->ioctls[scheduler->ioctls[i]->id];
+
+		strncpy(ioctl->name, scheduler->ioctls[i]->name,
+				RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+		strncpy(ioctl->description, scheduler->ioctls[i]->description,
+				RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+		ioctl->ioctl = scheduler->ioctls[i]->ioctl;
+	}
+
+	sched_ctx->ioctl_count = scheduler->nb_ioctls;
+
+	/* load scheduler instance options */
+	if (sched_ctx->options)
+		rte_free(sched_ctx->options);
+	if (scheduler->nb_options) {
+		size = sizeof(struct rte_cryptodev_scheduler_option) *
+				scheduler->nb_options;
+		sched_ctx->options = rte_zmalloc_socket(NULL, size, 0,
+				SOCKET_ID_ANY);
+		if (!sched_ctx->options) {
+			CS_LOG_ERR("Failed to allocate memory");
+			return -ENOMEM;
+		}
+	}
+
+	for (i = 0; i < scheduler->nb_options; i++) {
+		struct rte_cryptodev_scheduler_option *option =
+				sched_ctx->options[i];
+
+		strncpy(option->name, scheduler->options[i]->name,
+				RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+		strncpy(option->description, scheduler->options[i]->description,
+				RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+		option->option_parser = scheduler->options[i]->option_parser;
+	}
+	sched_ctx->nb_options = scheduler->nb_options;
+
+	/* load scheduler instance operations functions */
+	sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;
+	sched_ctx->ops.create_private_ctx = scheduler->ops->create_private_ctx;
+	sched_ctx->ops.scheduler_start = scheduler->ops->scheduler_start;
+	sched_ctx->ops.scheduler_stop = scheduler->ops->scheduler_stop;
+	sched_ctx->ops.slave_attach = scheduler->ops->slave_attach;
+	sched_ctx->ops.slave_detach = scheduler->ops->slave_detach;
+
+	return 0;
+}
+
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
new file mode 100644
index 0000000..ee5eeb4
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -0,0 +1,183 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_H
+#define _RTE_CRYPTO_SCHEDULER_H
+
+#include <rte_cryptodev_scheduler_ioctls.h>
+#include <rte_cryptodev_scheduler_operations.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Crypto scheduler PMD operation modes
+ */
+enum rte_cryptodev_scheduler_mode {
+	CDEV_SCHED_MODE_NOT_SET = 0,
+	CDEV_SCHED_MODE_USERDEFINED,
+	CDEV_SCHED_MODE_ROUNDROBIN,
+	CDEV_SCHED_MODE_MIGRATION,
+	CDEV_SCHED_MODE_FALLBACK,
+	CDEV_SCHED_MODE_MULTICORE,
+
+	CDEV_SCHED_MODE_COUNT /* number of modes */
+};
+
+#define RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN	(64)
+#define RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN	(256)
+
+struct rte_cryptodev_scheduler;
+
+/**
+ * Load a user defined scheduler
+ *
+ * @param	scheduler_id	The target scheduler device ID
+ *		scheduler	Pointer to the user defined scheduler
+ *
+ * @return
+ *	0 if loading successful, negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
+		struct rte_cryptodev_scheduler *scheduler);
+
+/**
+ * Attach a pre-configured crypto device to the scheduler
+ *
+ * @param	scheduler_id	The target scheduler device ID
+ *		slave_id	crypto device ID to be attached
+ *
+ * @return
+ *	0 if attaching successful, negative int if otherwise.
+ */
+int
+rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Detach a attached crypto device to the scheduler
+ *
+ * @param	scheduler_id	The target scheduler device ID
+ *		slave_id	crypto device ID to be detached
+ *
+ * @return
+ *	0 if detaching successful, negative int if otherwise.
+ */
+int
+rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id);
+
+/**
+ * Set the scheduling mode
+ *
+ * @param	scheduler_id	The target scheduler device ID
+ *		mode		The scheduling mode
+ *
+ * @return
+ *	0 if attaching successful, negative integer if otherwise.
+ */
+int
+rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
+		enum rte_cryptodev_scheduler_mode mode);
+
+/**
+ * Get the current scheduling mode
+ *
+ * @param	scheduler_id	The target scheduler device ID
+ *		mode		Pointer to write the scheduling mode
+ */
+enum rte_cryptodev_scheduler_mode
+rte_crpytodev_scheduler_mode_get(uint8_t scheduler_id);
+
+/**
+ * Set the crypto ops reordering feature on/off
+ *
+ * @param	dev_id		The target scheduler device ID
+ *		enable_reorder	set the crypto op reordering feature
+ *				0: disable reordering
+ *				1: enable reordering
+ *
+ * @return
+ *	0 if setting successful, negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
+		uint32_t enable_reorder);
+
+/**
+ * Get the current crypto ops reordering feature
+ *
+ * @param	dev_id		The target scheduler device ID
+ *
+ * @return
+ *	0 if reordering is disabled
+ *	1 if reordering is enabled
+ *	negative integer if otherwise.
+ */
+int
+rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+
+typedef int (*rte_cryptodev_scheduler_option_parser)(
+		const char *key, const char *value, void *extra_args);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
+		struct rte_crypto_op **ops, uint16_t nb_ops);
+
+typedef uint16_t (*rte_cryptodev_scheduler_burst_dequeue_t)(void *qp_ctx,
+		struct rte_crypto_op **ops, uint16_t nb_ops);
+
+struct rte_cryptodev_scheduler_option {
+	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+
+	rte_cryptodev_scheduler_option_parser option_parser;
+};
+
+struct rte_cryptodev_scheduler {
+	const char *name;
+	const char *description;
+	struct rte_cryptodev_scheduler_option **options;
+	unsigned nb_options;
+
+	struct rte_cryptodev_scheduler_ioctl **ioctls;
+	unsigned nb_ioctls;
+
+	struct rte_cryptodev_scheduler_ops *ops;
+};
+
+extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_H */
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_ioctls.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_ioctls.h
new file mode 100644
index 0000000..c19a9d3
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_ioctls.h
@@ -0,0 +1,92 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RTE_CRYPTODEV_SCHEDULER_IOCTLS
+#define _RTE_CRYPTODEV_SCHEDULER_IOCTLS
+
+#include <rte_cryptodev_scheduler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define RTE_CRYPTODEV_SCHEDULER_IOCTL_NAME_MAX_LEN	(64)
+#define RTE_CRYPTODEV_SCHEDULER_IOCTL_DESC_MAX_LEN	(256)
+
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_IOCTLS	(8)
+
+#define CDEV_SCHED_IOCTL_LIVE_MIGRATION_SCHED_STATE_GET		(1)
+#define CDEV_SCHED_IOCTL_LIVE_MIGRATION_SCHED_MIGRATE		(2)
+#define CDEV_SCHED_IOCTL_FALLBACK_SCHED_SET_PRIMARY		(3)
+
+struct ioctl_migration_scheduler_state_get {
+	uint8_t slave_id;
+	/**< Active crypto device id */
+	enum migration_scheduler_state {
+		MIGRATION_SCHEDULER_STATE_ACTIVE,
+		MIGRATION_SCHEDULER_STATE_AWAITING_MIGRATE,
+		MIGRATION_SCHEDULER_STATE_MIGRATION
+	} state;
+	/**< Migration Scheduler State */
+};
+
+int
+rte_cryptodev_scheduler_ioctl(uint8_t scheduler_id, uint16_t request_id,
+		void *request_params);
+
+int
+rte_cryptodev_scheduler_ioctl_count(uint8_t scheduler_id);
+
+struct rte_cryptodev_scheduler_ioctl_description {
+	uint16_t request_id;
+	const char *name;
+	const char *description;
+};
+
+int
+rte_cryptodev_scheduler_ioctl_list(uint8_t scheduler_id,
+		struct rte_cryptodev_scheduler_ioctl_description **ioctls_desc,
+		uint16_t nb_ioctls);
+
+typedef int (*rte_cryptodev_scheduler_ioctl_fn)(void *request_params);
+
+struct rte_cryptodev_scheduler_ioctl {
+	int id;
+	char name[RTE_CRYPTODEV_SCHEDULER_IOCTL_NAME_MAX_LEN];
+	char description[RTE_CRYPTODEV_SCHEDULER_IOCTL_DESC_MAX_LEN];
+
+	rte_cryptodev_scheduler_ioctl_fn ioctl;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTODEV_SCHEDULER_IOCTLS */
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
new file mode 100644
index 0000000..ab8595b
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler_operations.h
@@ -0,0 +1,71 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+#define _RTE_CRYPTO_SCHEDULER_OPERATIONS_H
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef int (*rte_cryptodev_scheduler_slave_attach_t)(
+		struct rte_cryptodev *dev, uint8_t slave_id);
+typedef int (*rte_cryptodev_scheduler_slave_detach_t)(
+		struct rte_cryptodev *dev, uint8_t slave_id);
+
+typedef int (*rte_cryptodev_scheduler_start_t)(struct rte_cryptodev *dev);
+typedef int (*rte_cryptodev_scheduler_stop_t)(struct rte_cryptodev *dev);
+
+typedef int (*rte_cryptodev_scheduler_config_queue_pair)(
+		struct rte_cryptodev *dev, uint16_t qp_id);
+
+typedef int (*rte_cryptodev_scheduler_create_private_ctx)(
+		struct rte_cryptodev *dev);
+
+struct rte_cryptodev_scheduler_ops {
+	rte_cryptodev_scheduler_slave_attach_t slave_attach;
+	rte_cryptodev_scheduler_slave_attach_t slave_detach;
+
+	rte_cryptodev_scheduler_start_t scheduler_start;
+	rte_cryptodev_scheduler_stop_t scheduler_stop;
+
+	rte_cryptodev_scheduler_config_queue_pair config_queue_pair;
+
+	rte_cryptodev_scheduler_create_private_ctx create_private_ctx;
+};
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RTE_CRYPTO_SCHEDULER_OPERATIONS_H */
diff --git a/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
new file mode 100644
index 0000000..0510f68
--- /dev/null
+++ b/drivers/crypto/scheduler/rte_pmd_crypto_scheduler_version.map
@@ -0,0 +1,12 @@
+DPDK_17.02 {
+	global:
+
+	rte_cryptodev_scheduler_load_user_scheduler;
+	rte_cryptodev_scheduler_slave_attach;
+	rte_cryptodev_scheduler_slave_detach;
+	rte_crpytodev_scheduler_mode_set;
+	rte_crpytodev_scheduler_mode_get;
+	rte_cryptodev_scheduler_ordering_set;
+	rte_cryptodev_scheduler_ordering_get;
+
+} DPDK_17.02;
\ No newline at end of file
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
new file mode 100644
index 0000000..0c13b55
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -0,0 +1,168 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_reorder.h>
+#include <rte_cryptodev_scheduler.h>
+
+#include "scheduler_pmd_private.h"
+
+static uint16_t
+scheduler_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct scheduler_qp_ctx *qp_ctx = queue_pair;
+	uint16_t processed_ops;
+
+	processed_ops = (*qp_ctx->schedule_enqueue)(qp_ctx, ops,
+			nb_ops);
+
+	return processed_ops;
+}
+
+static uint16_t
+scheduler_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct scheduler_qp_ctx *qp_ctx = queue_pair;
+	uint16_t processed_ops;
+
+	processed_ops = (*qp_ctx->schedule_dequeue)(qp_ctx, ops,
+			nb_ops);
+
+	return processed_ops;
+}
+
+static uint32_t unique_name_id;
+
+static int
+cryptodev_scheduler_create(const char *name,
+	struct rte_crypto_vdev_init_params *init_params)
+{
+	char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+	struct rte_cryptodev *dev;
+	struct scheduler_ctx *sched_ctx;
+
+	if (snprintf(crypto_dev_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%u",
+		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), unique_name_id++) < 0) {
+		CS_LOG_ERR("driver %s: failed to create unique cryptodev "
+			"name", name);
+		return -EFAULT;
+	}
+
+	dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+			sizeof(struct scheduler_ctx),
+			init_params->socket_id);
+	if (dev == NULL) {
+		CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
+			name);
+		return -EFAULT;
+	}
+
+	dev->dev_type = RTE_CRYPTODEV_SCHEDULER_PMD;
+	dev->dev_ops = rte_crypto_scheduler_pmd_ops;
+
+	dev->enqueue_burst = scheduler_enqueue_burst;
+	dev->dequeue_burst = scheduler_dequeue_burst;
+
+	sched_ctx = dev->data->dev_private;
+	sched_ctx->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+
+	return 0;
+}
+
+static int
+cryptodev_scheduler_remove(const char *name)
+{
+	struct rte_cryptodev *dev;
+	struct scheduler_ctx *sched_ctx;
+
+	if (name == NULL)
+		return -EINVAL;
+
+	dev = rte_cryptodev_pmd_get_named_dev(name);
+	if (dev == NULL)
+		return -EINVAL;
+
+	sched_ctx = dev->data->dev_private;
+
+	if (sched_ctx->nb_slaves) {
+		uint32_t i;
+
+		for (i = 0; i < sched_ctx->nb_slaves; i++)
+			rte_cryptodev_scheduler_slave_detach(dev->data->dev_id,
+					sched_ctx->slaves[i].dev_id);
+	}
+
+	RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
+		"socket %u\n", name, rte_socket_id());
+
+	return 0;
+}
+
+static int
+cryptodev_scheduler_probe(const char *name, const char *input_args)
+{
+	struct rte_crypto_vdev_init_params init_params = {
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+		RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+		rte_socket_id()
+	};
+
+	rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+	RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+			init_params.socket_id);
+	RTE_LOG(INFO, PMD, "  Max number of queue pairs = %d\n",
+			init_params.max_nb_queue_pairs);
+	RTE_LOG(INFO, PMD, "  Max number of sessions = %d\n",
+			init_params.max_nb_sessions);
+
+	return cryptodev_scheduler_create(name, &init_params);
+}
+
+static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
+	.probe = cryptodev_scheduler_probe,
+	.remove = cryptodev_scheduler_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
+	cryptodev_scheduler_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
+	"max_nb_queue_pairs=<int> "
+	"max_nb_sessions=<int> "
+	"socket_id=<int>");
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
new file mode 100644
index 0000000..972a355
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -0,0 +1,495 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <string.h>
+
+#include <rte_config.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_dev.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_reorder.h>
+
+#include "scheduler_pmd_private.h"
+
+/** Configure device */
+static int
+scheduler_pmd_config(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned i;
+	int ret = 0;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+		ret = (*slave_dev->dev_ops->dev_configure)(slave_dev);
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
+
+static int
+update_reorder_buff(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+	if (sched_ctx->reordering_enabled) {
+		char reorder_buff_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+		uint32_t buff_size = sched_ctx->nb_slaves * PER_SLAVE_BUFF_SIZE;
+
+		if (qp_ctx->reorder_buf) {
+			rte_reorder_free(qp_ctx->reorder_buf);
+			qp_ctx->reorder_buf = NULL;
+		}
+
+		if (!buff_size)
+			return 0;
+
+		if (snprintf(reorder_buff_name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),
+			dev->data->dev_id, qp_id) < 0) {
+			CS_LOG_ERR("failed to create unique reorder buffer "
+					"name");
+			return -ENOMEM;
+		}
+
+		qp_ctx->reorder_buf = rte_reorder_create(reorder_buff_name,
+				rte_socket_id(), buff_size);
+		if (!qp_ctx->reorder_buf) {
+			CS_LOG_ERR("failed to create reorder buffer");
+			return -ENOMEM;
+		}
+	} else {
+		if (qp_ctx->reorder_buf) {
+			rte_reorder_free(qp_ctx->reorder_buf);
+			qp_ctx->reorder_buf = NULL;
+		}
+	}
+
+	return 0;
+}
+
+/** Start device */
+static int
+scheduler_pmd_start(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	uint32_t i;
+	int ret;
+
+	if (dev->data->dev_started)
+		return 0;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+		ret = update_reorder_buff(dev, i);
+		if (ret < 0) {
+			CS_LOG_ERR("Failed to update reorder buffer");
+			return ret;
+		}
+	}
+
+	if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) {
+		CS_LOG_ERR("Scheduler mode is not set");
+		return -1;
+	}
+
+	if (!sched_ctx->nb_slaves) {
+		CS_LOG_ERR("No slave in the scheduler");
+		return -1;
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.slave_attach, -ENOTSUP);
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+		if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) {
+			CS_LOG_ERR("Failed to attach slave");
+			return -ENOTSUP;
+		}
+	}
+
+	RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP);
+
+	if ((*sched_ctx->ops.scheduler_start)(dev) < 0) {
+		CS_LOG_ERR("Scheduler start failed");
+		return -1;
+	}
+
+	/* start all slaves */
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+		ret = (*slave_dev->dev_ops->dev_start)(slave_dev);
+		if (ret < 0) {
+			CS_LOG_ERR("Failed to start slave dev %u",
+					slave_dev_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/** Stop device */
+static void
+scheduler_pmd_stop(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned i;
+
+	if (!dev->data->dev_started)
+		return;
+
+	/* stop all slaves first */
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+		(*slave_dev->dev_ops->dev_stop)(slave_dev);
+	}
+
+	if (*sched_ctx->ops.scheduler_stop)
+		(*sched_ctx->ops.scheduler_stop)(dev);
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+
+		if (*sched_ctx->ops.slave_detach)
+			(*sched_ctx->ops.slave_detach)(dev, slave_dev_id);
+	}
+}
+
+/** Close device */
+static int
+scheduler_pmd_close(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned i;
+	int ret;
+
+	/* the dev should be stopped before being closed */
+	if (dev->data->dev_started)
+		return -EBUSY;
+
+	/* close all slaves first */
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+		ret = (*slave_dev->dev_ops->dev_close)(slave_dev);
+		if (ret < 0)
+			return ret;
+	}
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+
+		if (qp_ctx->reorder_buf) {
+			rte_reorder_free(qp_ctx->reorder_buf);
+			qp_ctx->reorder_buf = NULL;
+		}
+
+		if (qp_ctx->private_qp_ctx) {
+			rte_free(qp_ctx->private_qp_ctx);
+			qp_ctx->private_qp_ctx = NULL;
+		}
+	}
+
+	if (sched_ctx->private_ctx)
+		rte_free(sched_ctx->private_ctx);
+
+	if (sched_ctx->capabilities)
+		rte_free(sched_ctx->capabilities);
+
+	if (sched_ctx->ioctls)
+		rte_free(sched_ctx->ioctls);
+
+	if (sched_ctx->options)
+		rte_free(sched_ctx->options);
+
+	return 0;
+}
+
+/** Get device statistics */
+static void
+scheduler_pmd_stats_get(struct rte_cryptodev *dev,
+	struct rte_cryptodev_stats *stats)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned i;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+		struct rte_cryptodev_stats slave_stats = {0};
+
+		(*slave_dev->dev_ops->stats_get)(slave_dev, &slave_stats);
+
+		stats->enqueued_count += slave_stats.enqueued_count;
+		stats->dequeued_count += slave_stats.dequeued_count;
+
+		stats->enqueue_err_count += slave_stats.enqueue_err_count;
+		stats->dequeue_err_count += slave_stats.dequeue_err_count;
+	}
+}
+
+/** Reset device statistics */
+static void
+scheduler_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned i;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev *slave_dev =
+				rte_cryptodev_pmd_get_dev(slave_dev_id);
+
+		(*slave_dev->dev_ops->stats_reset)(slave_dev);
+	}
+}
+
+/** Get device info */
+static void
+scheduler_pmd_info_get(struct rte_cryptodev *dev,
+		struct rte_cryptodev_info *dev_info)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	unsigned max_nb_sessions = sched_ctx->nb_slaves ? UINT32_MAX : 0;
+	unsigned i;
+
+	if (!dev_info)
+		return;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id;
+		struct rte_cryptodev_info slave_info;
+
+		rte_cryptodev_info_get(slave_dev_id, &slave_info);
+		max_nb_sessions = slave_info.sym.max_nb_sessions <
+				max_nb_sessions ?
+				slave_info.sym.max_nb_sessions :
+				max_nb_sessions;
+	}
+
+	dev_info->dev_type = dev->dev_type;
+	dev_info->feature_flags = dev->feature_flags;
+	dev_info->capabilities = sched_ctx->capabilities;
+	dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs;
+	dev_info->sym.max_nb_sessions = max_nb_sessions;
+}
+
+/** Release queue pair */
+static int
+scheduler_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+
+	if (!qp_ctx)
+		return 0;
+
+	if (qp_ctx->reorder_buf)
+		rte_reorder_free(qp_ctx->reorder_buf);
+	if (qp_ctx->private_qp_ctx)
+		rte_free(qp_ctx->private_qp_ctx);
+
+	rte_free(qp_ctx);
+	dev->data->queue_pairs[qp_id] = NULL;
+
+	return 0;
+}
+
+/** Setup a queue pair */
+static int
+scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+	__rte_unused const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+	struct scheduler_qp_ctx *qp_ctx;
+	char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+	if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN,
+			"CRYTO_SCHE PMD %u QP %u",
+			dev->data->dev_id, qp_id) < 0) {
+		CS_LOG_ERR("Failed to create unique queue pair name");
+		return -EFAULT;
+	}
+
+	/* Free memory prior to re-allocation if needed. */
+	if (dev->data->queue_pairs[qp_id] != NULL)
+		scheduler_pmd_qp_release(dev, qp_id);
+
+	/* Allocate the queue pair data structure. */
+	qp_ctx = rte_zmalloc_socket(name, sizeof(*qp_ctx), RTE_CACHE_LINE_SIZE,
+			socket_id);
+	if (qp_ctx == NULL)
+		return -ENOMEM;
+
+	dev->data->queue_pairs[qp_id] = qp_ctx;
+
+	if (*sched_ctx->ops.config_queue_pair) {
+		if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) {
+			CS_LOG_ERR("Unable to configure queue pair");
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+/** Start queue pair */
+static int
+scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint16_t queue_pair_id)
+{
+	return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+scheduler_pmd_qp_count(struct rte_cryptodev *dev)
+{
+	return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+	return sizeof(struct scheduler_session);
+}
+
+static int
+config_slave_sess(struct scheduler_ctx *sched_ctx,
+		struct rte_crypto_sym_xform *xform,
+		struct scheduler_session *sess,
+		uint32_t create)
+{
+	unsigned i;
+
+	for (i = 0; i < sched_ctx->nb_slaves; i++) {
+		struct scheduler_slave *slave = &sched_ctx->slaves[i];
+		struct rte_cryptodev *dev = &rte_cryptodev_globals->
+				devs[slave->dev_id];
+
+		if (sess->sessions[i]) {
+			if (create)
+				continue;
+			/* !create */
+			(*dev->dev_ops->session_clear)(dev,
+					(void *)sess->sessions[i]);
+			sess->sessions[i] = NULL;
+		} else {
+			if (!create)
+				continue;
+			/* create */
+			sess->sessions[i] =
+					rte_cryptodev_sym_session_create(
+							slave->dev_id, xform);
+			if (!sess->sessions[i]) {
+				config_slave_sess(sched_ctx, NULL, sess, 0);
+				return -1;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+scheduler_pmd_session_clear(struct rte_cryptodev *dev,
+	void *sess)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+	config_slave_sess(sched_ctx, NULL, sess, 0);
+
+	memset(sess, 0, sizeof(struct scheduler_session));
+}
+
+static void *
+scheduler_pmd_session_configure(struct rte_cryptodev *dev,
+	struct rte_crypto_sym_xform *xform, void *sess)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+	if (config_slave_sess(sched_ctx, xform, sess, 1) < 0) {
+		CS_LOG_ERR("unabled to config sym session");
+		return NULL;
+	}
+
+	return sess;
+}
+
+struct rte_cryptodev_ops scheduler_pmd_ops = {
+		.dev_configure		= scheduler_pmd_config,
+		.dev_start		= scheduler_pmd_start,
+		.dev_stop		= scheduler_pmd_stop,
+		.dev_close		= scheduler_pmd_close,
+
+		.stats_get		= scheduler_pmd_stats_get,
+		.stats_reset		= scheduler_pmd_stats_reset,
+
+		.dev_infos_get		= scheduler_pmd_info_get,
+
+		.queue_pair_setup	= scheduler_pmd_qp_setup,
+		.queue_pair_release	= scheduler_pmd_qp_release,
+		.queue_pair_start	= scheduler_pmd_qp_start,
+		.queue_pair_stop	= scheduler_pmd_qp_stop,
+		.queue_pair_count	= scheduler_pmd_qp_count,
+
+		.session_get_size	= scheduler_pmd_session_get_size,
+		.session_configure	= scheduler_pmd_session_configure,
+		.session_clear		= scheduler_pmd_session_clear,
+};
+
+struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops;
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
new file mode 100644
index 0000000..550fdcc
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -0,0 +1,122 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCHEDULER_PMD_PRIVATE_H
+#define _SCHEDULER_PMD_PRIVATE_H
+
+#include <rte_hash.h>
+#include <rte_reorder.h>
+#include <rte_cryptodev_scheduler.h>
+#include <rte_cryptodev_scheduler_ioctls.h>
+
+/**< Maximum number of bonded devices per devices */
+#ifndef MAX_SLAVES_NUM
+#define MAX_SLAVES_NUM				(8)
+#endif
+
+#define PER_SLAVE_BUFF_SIZE			(256)
+
+#define CS_LOG_ERR(fmt, args...)					\
+	RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",		\
+		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
+		__func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG
+#define CS_LOG_INFO(fmt, args...)					\
+	RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
+		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
+		__func__, __LINE__, ## args)
+
+#define CS_LOG_DBG(fmt, args...)					\
+	RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n",	\
+		RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD),			\
+		__func__, __LINE__, ## args)
+#else
+#define CS_LOG_INFO(fmt, args...)
+#define CS_LOG_DBG(fmt, args...)
+#endif
+
+struct scheduler_slave {
+	uint8_t dev_id;
+	uint16_t qp_id;
+	uint32_t nb_inflight_cops;
+
+	enum rte_cryptodev_type dev_type;
+};
+
+struct scheduler_ctx {
+	void *private_ctx;
+	/**< private scheduler context pointer */
+
+	struct rte_cryptodev_capabilities *capabilities;
+	unsigned nb_capabilities;
+
+	unsigned max_nb_queue_pairs;
+
+	struct scheduler_slave slaves[MAX_SLAVES_NUM];
+	unsigned nb_slaves;
+
+	enum rte_cryptodev_scheduler_mode mode;
+
+	uint32_t ioctl_count;
+	struct rte_cryptodev_scheduler_ioctl **ioctls;
+
+	uint32_t nb_options;
+	struct rte_cryptodev_scheduler_option **options;
+
+	struct rte_cryptodev_scheduler_ops ops;
+
+	uint8_t reordering_enabled;
+
+	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
+	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
+} __rte_cache_aligned;
+
+struct scheduler_qp_ctx {
+	void *private_qp_ctx;
+
+	rte_cryptodev_scheduler_burst_enqueue_t schedule_enqueue;
+	rte_cryptodev_scheduler_burst_dequeue_t schedule_dequeue;
+
+	struct rte_reorder_buffer *reorder_buf;
+	uint32_t seqn;
+} __rte_cache_aligned;
+
+struct scheduler_session {
+	struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM];
+};
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
+
+#endif /* _SCHEDULER_PMD_PRIVATE_H */
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
new file mode 100644
index 0000000..be0b7fd
--- /dev/null
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -0,0 +1,419 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_scheduler_operations.h>
+
+#include "scheduler_pmd_private.h"
+
+struct roundround_scheduler_ctx {
+};
+
+struct rr_scheduler_qp_ctx {
+	struct scheduler_slave slaves[MAX_SLAVES_NUM];
+	unsigned nb_slaves;
+
+	unsigned last_enq_slave_idx;
+	unsigned last_deq_slave_idx;
+};
+
+static uint16_t
+schedule_enqueue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rr_scheduler_qp_ctx *rr_qp_ctx =
+			((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+	uint16_t i, processed_ops;
+	struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	for (i = 0; i < nb_ops && i < 4; i++)
+		rte_prefetch0(ops[i]->sym->session);
+
+	for (i = 0; i < nb_ops - 8; i += 4) {
+		sess0 = (struct scheduler_session *)
+				ops[i]->sym->session->_private;
+		sess1 = (struct scheduler_session *)
+				ops[i+1]->sym->session->_private;
+		sess2 = (struct scheduler_session *)
+				ops[i+2]->sym->session->_private;
+		sess3 = (struct scheduler_session *)
+				ops[i+3]->sym->session->_private;
+
+		ops[i]->sym->session = sess0->sessions[slave_idx];
+		ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+		ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+		ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+
+		rte_prefetch0(ops[i + 4]->sym->session);
+		rte_prefetch0(ops[i + 5]->sym->session);
+		rte_prefetch0(ops[i + 6]->sym->session);
+		rte_prefetch0(ops[i + 7]->sym->session);
+	}
+
+	for (; i < nb_ops; i++) {
+		sess0 = (struct scheduler_session *)
+				ops[i]->sym->session->_private;
+		ops[i]->sym->session = sess0->sessions[slave_idx];
+	}
+
+	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+			slave->qp_id, ops, nb_ops);
+
+	slave->nb_inflight_cops += processed_ops;
+
+	rr_qp_ctx->last_enq_slave_idx += 1;
+	if (unlikely(rr_qp_ctx->last_enq_slave_idx >= rr_qp_ctx->nb_slaves))
+		rr_qp_ctx->last_enq_slave_idx = 0;
+
+	return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct scheduler_qp_ctx *gen_qp_ctx = qp_ctx;
+	struct rr_scheduler_qp_ctx *rr_qp_ctx =
+			gen_qp_ctx->private_qp_ctx;
+	uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
+	struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
+	uint16_t i, processed_ops;
+	struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+	if (unlikely(nb_ops == 0))
+		return 0;
+
+	for (i = 0; i < nb_ops && i < 4; i++) {
+		rte_prefetch0(ops[i]->sym->session);
+		rte_prefetch0(ops[i]->sym->m_src);
+	}
+
+	for (i = 0; i < nb_ops - 8; i += 4) {
+		sess0 = (struct scheduler_session *)
+				ops[i]->sym->session->_private;
+		sess1 = (struct scheduler_session *)
+				ops[i+1]->sym->session->_private;
+		sess2 = (struct scheduler_session *)
+				ops[i+2]->sym->session->_private;
+		sess3 = (struct scheduler_session *)
+				ops[i+3]->sym->session->_private;
+
+		ops[i]->sym->session = sess0->sessions[slave_idx];
+		ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+		ops[i + 1]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+		ops[i + 2]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+		ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+		ops[i + 3]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+
+		rte_prefetch0(ops[i + 4]->sym->session);
+		rte_prefetch0(ops[i + 4]->sym->m_src);
+		rte_prefetch0(ops[i + 5]->sym->session);
+		rte_prefetch0(ops[i + 5]->sym->m_src);
+		rte_prefetch0(ops[i + 6]->sym->session);
+		rte_prefetch0(ops[i + 6]->sym->m_src);
+		rte_prefetch0(ops[i + 7]->sym->session);
+		rte_prefetch0(ops[i + 7]->sym->m_src);
+	}
+
+	for (; i < nb_ops; i++) {
+		sess0 = (struct scheduler_session *)
+				ops[i]->sym->session->_private;
+		ops[i]->sym->session = sess0->sessions[slave_idx];
+		ops[i]->sym->m_src->seqn = gen_qp_ctx->seqn++;
+	}
+
+	processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+			slave->qp_id, ops, nb_ops);
+
+	slave->nb_inflight_cops += processed_ops;
+
+	rr_qp_ctx->last_enq_slave_idx += 1;
+	if (unlikely(rr_qp_ctx->last_enq_slave_idx >= rr_qp_ctx->nb_slaves))
+		rr_qp_ctx->last_enq_slave_idx = 0;
+
+	return processed_ops;
+}
+
+
+static uint16_t
+schedule_dequeue(void *qp_ctx, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+	struct rr_scheduler_qp_ctx *rr_qp_ctx =
+			((struct scheduler_qp_ctx *)qp_ctx)->private_qp_ctx;
+	struct scheduler_slave *slave;
+	uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+	uint16_t nb_deq_ops;
+
+	if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+		do {
+			last_slave_idx += 1;
+
+			if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+				last_slave_idx = 0;
+			/* looped back, means no inflight cops in the queue */
+			if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+				return 0;
+		} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+				== 0);
+	}
+
+	slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+	nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+			slave->qp_id, ops, nb_ops);
+
+	last_slave_idx += 1;
+	if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+		last_slave_idx = 0;
+
+	rr_qp_ctx->last_deq_slave_idx = last_slave_idx;
+
+	slave->nb_inflight_cops -= nb_deq_ops;
+
+	return nb_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp_ctx, struct rte_crypto_op **ops,
+		uint16_t nb_ops)
+{
+	struct scheduler_qp_ctx *gen_qp_ctx = (struct scheduler_qp_ctx *)qp_ctx;
+	struct rr_scheduler_qp_ctx *rr_qp_ctx = (gen_qp_ctx->private_qp_ctx);
+	struct scheduler_slave *slave;
+	struct rte_reorder_buffer *reorder_buff = gen_qp_ctx->reorder_buf;
+	struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+	uint16_t nb_deq_ops, nb_drained_mbufs;
+	const uint16_t nb_op_ops = nb_ops;
+	struct rte_crypto_op *op_ops[nb_op_ops];
+	struct rte_mbuf *reorder_mbufs[nb_op_ops];
+	uint32_t last_slave_idx = rr_qp_ctx->last_deq_slave_idx;
+	uint16_t i;
+
+	if (unlikely(rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops == 0)) {
+		do {
+			last_slave_idx += 1;
+
+			if (unlikely(last_slave_idx >= rr_qp_ctx->nb_slaves))
+				last_slave_idx = 0;
+			/* looped back, means no inflight cops in the queue */
+			if (last_slave_idx == rr_qp_ctx->last_deq_slave_idx)
+				return 0;
+		} while (rr_qp_ctx->slaves[last_slave_idx].nb_inflight_cops
+				== 0);
+	}
+
+	slave = &rr_qp_ctx->slaves[last_slave_idx];
+
+	nb_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+			slave->qp_id, op_ops, nb_ops);
+
+	rr_qp_ctx->last_deq_slave_idx += 1;
+	if (unlikely(rr_qp_ctx->last_deq_slave_idx >= rr_qp_ctx->nb_slaves))
+		rr_qp_ctx->last_deq_slave_idx = 0;
+
+	slave->nb_inflight_cops -= nb_deq_ops;
+
+	for (i = 0; i < nb_deq_ops && i < 4; i++)
+		rte_prefetch0(op_ops[i]->sym->m_src);
+
+	for (i = 0; i < nb_deq_ops - 8; i += 4) {
+		mbuf0 = op_ops[i]->sym->m_src;
+		mbuf1 = op_ops[i + 1]->sym->m_src;
+		mbuf2 = op_ops[i + 2]->sym->m_src;
+		mbuf3 = op_ops[i + 3]->sym->m_src;
+
+		rte_memcpy(mbuf0->buf_addr, &op_ops[i], sizeof(op_ops[i]));
+		rte_memcpy(mbuf1->buf_addr, &op_ops[i+1], sizeof(op_ops[i+1]));
+		rte_memcpy(mbuf2->buf_addr, &op_ops[i+2], sizeof(op_ops[i+2]));
+		rte_memcpy(mbuf3->buf_addr, &op_ops[i+3], sizeof(op_ops[i+3]));
+
+		rte_reorder_insert(reorder_buff, mbuf0);
+		rte_reorder_insert(reorder_buff, mbuf1);
+		rte_reorder_insert(reorder_buff, mbuf2);
+		rte_reorder_insert(reorder_buff, mbuf3);
+
+		rte_prefetch0(op_ops[i + 4]->sym->m_src);
+		rte_prefetch0(op_ops[i + 5]->sym->m_src);
+		rte_prefetch0(op_ops[i + 6]->sym->m_src);
+		rte_prefetch0(op_ops[i + 7]->sym->m_src);
+	}
+
+	for (; i < nb_deq_ops; i++) {
+		mbuf0 = op_ops[i]->sym->m_src;
+		rte_memcpy(mbuf0->buf_addr, &op_ops[i], sizeof(op_ops[i]));
+		rte_reorder_insert(reorder_buff, mbuf0);
+	}
+
+	nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
+			nb_ops);
+	for (i = 0; i < nb_drained_mbufs && i < 4; i++)
+		rte_prefetch0(reorder_mbufs[i]);
+
+	for (i = 0; i < nb_drained_mbufs - 8; i += 4) {
+		ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->buf_addr;
+		ops[i + 1] = *(struct rte_crypto_op **)
+			reorder_mbufs[i + 1]->buf_addr;
+		ops[i + 2] = *(struct rte_crypto_op **)
+			reorder_mbufs[i + 2]->buf_addr;
+		ops[i + 3] = *(struct rte_crypto_op **)
+			reorder_mbufs[i + 3]->buf_addr;
+
+		*(struct rte_crypto_op **)reorder_mbufs[i]->buf_addr = NULL;
+		*(struct rte_crypto_op **)reorder_mbufs[i + 1]->buf_addr = NULL;
+		*(struct rte_crypto_op **)reorder_mbufs[i + 2]->buf_addr = NULL;
+		*(struct rte_crypto_op **)reorder_mbufs[i + 3]->buf_addr = NULL;
+
+		rte_prefetch0(reorder_mbufs[i + 4]);
+		rte_prefetch0(reorder_mbufs[i + 5]);
+		rte_prefetch0(reorder_mbufs[i + 6]);
+		rte_prefetch0(reorder_mbufs[i + 7]);
+	}
+
+	for (; i < nb_drained_mbufs; i++) {
+		ops[i] = *(struct rte_crypto_op **)
+			reorder_mbufs[i]->buf_addr;
+		*(struct rte_crypto_op **)reorder_mbufs[i]->buf_addr = NULL;
+	}
+
+	return nb_drained_mbufs;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t slave_id)
+{
+	return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+		__rte_unused uint8_t slave_id)
+{
+	return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+	struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+
+	uint16_t i;
+
+	for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+		struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+		struct rr_scheduler_qp_ctx *rr_qp_ctx =
+				qp_ctx->private_qp_ctx;
+		uint32_t j;
+		uint16_t qp_id = rr_qp_ctx->slaves[0].qp_id;
+
+		memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
+				sizeof(struct scheduler_slave));
+		for (j = 0; j < sched_ctx->nb_slaves; j++) {
+			rr_qp_ctx->slaves[j].dev_id =
+					sched_ctx->slaves[i].dev_id;
+			rr_qp_ctx->slaves[j].qp_id = qp_id;
+		}
+
+		rr_qp_ctx->nb_slaves = sched_ctx->nb_slaves;
+
+		rr_qp_ctx->last_enq_slave_idx = 0;
+		rr_qp_ctx->last_deq_slave_idx = 0;
+
+		if (sched_ctx->reordering_enabled) {
+			qp_ctx->schedule_enqueue = &schedule_enqueue_ordering;
+			qp_ctx->schedule_dequeue = &schedule_dequeue_ordering;
+		} else {
+			qp_ctx->schedule_enqueue = &schedule_enqueue;
+			qp_ctx->schedule_dequeue = &schedule_dequeue;
+		}
+	}
+
+	return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+	struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+	struct rr_scheduler_qp_ctx *rr_qp_ctx;
+
+	rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0,
+			rte_socket_id());
+	if (!rr_qp_ctx) {
+		CS_LOG_ERR("failed allocate memory for private queue pair");
+		return -ENOMEM;
+	}
+
+	qp_ctx->private_qp_ctx = (void *)rr_qp_ctx;
+
+	return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+	return 0;
+}
+
+struct rte_cryptodev_scheduler_ops ops = {
+	slave_attach,
+	slave_detach,
+	scheduler_start,
+	scheduler_stop,
+	scheduler_config_qp,
+	scheduler_create_private_ctx
+};
+
+struct rte_cryptodev_scheduler scheduler = {
+		.name = "roundrobin-scheduler",
+		.description = "scheduler which will round robin burst across "
+				"slave crypto devices",
+		.options = NULL,
+		.ops = &ops,
+		.ioctls = NULL
+};
+
+
+struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h
index 8f63e8f..61a3ce0 100644
--- a/lib/librte_cryptodev/rte_cryptodev.h
+++ b/lib/librte_cryptodev/rte_cryptodev.h
@@ -66,6 +66,7 @@ extern "C" {
 /**< KASUMI PMD device name */
 #define CRYPTODEV_NAME_ZUC_PMD		crypto_zuc
 /**< KASUMI PMD device name */
+#define CRYPTODEV_NAME_SCHEDULER_PMD	crypto_scheduler
 
 /** Crypto device type */
 enum rte_cryptodev_type {
@@ -77,6 +78,9 @@ enum rte_cryptodev_type {
 	RTE_CRYPTODEV_KASUMI_PMD,	/**< KASUMI PMD */
 	RTE_CRYPTODEV_ZUC_PMD,		/**< ZUC PMD */
 	RTE_CRYPTODEV_OPENSSL_PMD,    /**<  OpenSSL PMD */
+	RTE_CRYPTODEV_SCHEDULER_PMD,	/**< Crypto Scheduler PMD */
+
+	RTE_CRYPTODEV_TYPE_COUNT
 };
 
 extern const char **rte_cyptodev_names;
diff --git a/mk/rte.app.mk b/mk/rte.app.mk
index f75f0e2..ee34688 100644
--- a/mk/rte.app.mk
+++ b/mk/rte.app.mk
@@ -70,7 +70,6 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PORT)           += -lrte_port
 
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PDUMP)          += -lrte_pdump
 _LDLIBS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR)    += -lrte_distributor
-_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER)        += -lrte_reorder
 _LDLIBS-$(CONFIG_RTE_LIBRTE_IP_FRAG)        += -lrte_ip_frag
 _LDLIBS-$(CONFIG_RTE_LIBRTE_METER)          += -lrte_meter
 _LDLIBS-$(CONFIG_RTE_LIBRTE_SCHED)          += -lrte_sched
@@ -98,6 +97,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_RING)           += -lrte_ring
 _LDLIBS-$(CONFIG_RTE_LIBRTE_EAL)            += -lrte_eal
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CMDLINE)        += -lrte_cmdline
 _LDLIBS-$(CONFIG_RTE_LIBRTE_CFGFILE)        += -lrte_cfgfile
+_LDLIBS-$(CONFIG_RTE_LIBRTE_REORDER)        += -lrte_reorder
 
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND)       += -lrte_pmd_bond
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT)    += -lrte_pmd_xenvirt -lxenstore
@@ -145,6 +145,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)      += -lrte_pmd_kasumi
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI)      += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC)         += -lrte_pmd_zuc
 _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_ZUC)         += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER)  += -lrte_pmd_crypto_scheduler
 endif # CONFIG_RTE_LIBRTE_CRYPTODEV
 
 endif # !CONFIG_RTE_BUILD_SHARED_LIBS
-- 
2.7.4



More information about the dev mailing list