@@ -54,6 +54,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_pmd_ops.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += rte_cryptodev_scheduler.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_roundrobin.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler_failover.c
# library dependencies
DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += lib/librte_cryptodev
@@ -336,6 +336,12 @@ rte_crpytodev_scheduler_mode_set(uint8_t scheduler_id,
return -1;
}
break;
+ case CDEV_SCHED_MODE_FAILOVER:
+ if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
+ failover_scheduler) < 0) {
+ CS_LOG_ERR("Failed to load scheduler");
+ return -1;
+ }
default:
CS_LOG_ERR("Not yet supported");
return -ENOTSUP;
@@ -47,6 +47,7 @@ enum rte_cryptodev_scheduler_mode {
CDEV_SCHED_MODE_NOT_SET = 0,
CDEV_SCHED_MODE_USERDEFINED,
CDEV_SCHED_MODE_ROUNDROBIN,
+ CDEV_SCHED_MODE_FAILOVER,
CDEV_SCHED_MODE_COUNT /* number of modes */
};
@@ -158,6 +159,7 @@ struct rte_cryptodev_scheduler {
};
extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+extern struct rte_cryptodev_scheduler *failover_scheduler;
#ifdef __cplusplus
}
new file mode 100644
@@ -0,0 +1,427 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+
+#include "rte_cryptodev_scheduler_operations.h"
+#include "scheduler_pmd_private.h"
+
+#define PRIMARY_SLAVE_IDX 0
+#define SECONDARY_SLAVE_IDX 1
+
+struct fo_scheduler_qp_ctx {
+ struct scheduler_slave *primary_slave;
+ struct scheduler_slave *secondary_slave;
+
+ uint32_t last_deq_idx;
+};
+
+static inline uint16_t __attribute__((always_inline))
+failover_one_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ for (i = 0; i < nb_ops && i < 4; i++)
+ rte_prefetch0(ops[i]->sym->session);
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->session);
+
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ if (unlikely(processed_ops < nb_ops))
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct fo_scheduler_qp_ctx *qp_ctx =
+ ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_one_enqueue(qp_ctx->primary_slave,
+ PRIMARY_SLAVE_IDX, ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_one_enqueue(qp_ctx->secondary_slave,
+ SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
+ nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+static inline uint16_t __attribute__((always_inline))
+failover_one_enqueue_ordering(struct scheduler_qp_ctx *qp_ctx,
+ struct scheduler_slave *slave, uint8_t slave_idx,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ uint16_t i, processed_ops;
+ struct rte_cryptodev_sym_session *sessions[nb_ops];
+ struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+
+ for (i = 0; i < nb_ops && i < 4; i++) {
+ rte_prefetch0(ops[i]->sym->session);
+ rte_prefetch0(ops[i]->sym->m_src);
+ }
+
+ for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sess1 = (struct scheduler_session *)
+ ops[i+1]->sym->session->_private;
+ sess2 = (struct scheduler_session *)
+ ops[i+2]->sym->session->_private;
+ sess3 = (struct scheduler_session *)
+ ops[i+3]->sym->session->_private;
+
+ sessions[i] = ops[i]->sym->session;
+ sessions[i + 1] = ops[i + 1]->sym->session;
+ sessions[i + 2] = ops[i + 2]->sym->session;
+ sessions[i + 3] = ops[i + 3]->sym->session;
+
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
+ ops[i + 1]->sym->session = sess1->sessions[slave_idx];
+ ops[i + 1]->sym->m_src->seqn = qp_ctx->seqn++;
+ ops[i + 2]->sym->session = sess2->sessions[slave_idx];
+ ops[i + 2]->sym->m_src->seqn = qp_ctx->seqn++;
+ ops[i + 3]->sym->session = sess3->sessions[slave_idx];
+ ops[i + 3]->sym->m_src->seqn = qp_ctx->seqn++;
+
+ rte_prefetch0(ops[i + 4]->sym->session);
+ rte_prefetch0(ops[i + 4]->sym->m_src);
+ rte_prefetch0(ops[i + 5]->sym->session);
+ rte_prefetch0(ops[i + 5]->sym->m_src);
+ rte_prefetch0(ops[i + 6]->sym->session);
+ rte_prefetch0(ops[i + 6]->sym->m_src);
+ rte_prefetch0(ops[i + 7]->sym->session);
+ rte_prefetch0(ops[i + 7]->sym->m_src);
+ }
+
+ for (; i < nb_ops; i++) {
+ sess0 = (struct scheduler_session *)
+ ops[i]->sym->session->_private;
+ sessions[i] = ops[i]->sym->session;
+ ops[i]->sym->session = sess0->sessions[slave_idx];
+ ops[i]->sym->m_src->seqn = qp_ctx->seqn++;
+ }
+
+ processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_ops);
+ slave->nb_inflight_cops += processed_ops;
+
+ if (unlikely(processed_ops < nb_ops)) {
+ for (i = processed_ops; i < nb_ops; i++)
+ ops[i]->sym->session = sessions[i];
+ qp_ctx->seqn -= nb_ops - processed_ops;
+ }
+
+ return processed_ops;
+}
+
+static uint16_t
+schedule_enqueue_ordering(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx;
+ uint16_t enqueued_ops;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ enqueued_ops = failover_one_enqueue_ordering(qp_ctx,
+ fo_qp_ctx->primary_slave,
+ PRIMARY_SLAVE_IDX, ops, nb_ops);
+
+ if (enqueued_ops < nb_ops)
+ enqueued_ops += failover_one_enqueue_ordering(qp_ctx,
+ fo_qp_ctx->secondary_slave, SECONDARY_SLAVE_IDX,
+ &ops[enqueued_ops], nb_ops - enqueued_ops);
+
+ return enqueued_ops;
+}
+
+static uint16_t
+schedule_dequeue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx;
+ struct scheduler_slave *slave = fo_qp_ctx->primary_slave;
+ uint16_t nb_pri_to_deq;
+ uint16_t nb_sec_to_deq = fo_qp_ctx->secondary_slave->nb_inflight_cops;
+ uint16_t nb_pri_deq_ops = 0, nb_sec_deq_ops = 0;
+
+ /* if secondary slave has inflight ops and smaller than nb_ops,
+ * dequeue it first, else deq ;
+ */
+ if (!slave->nb_inflight_cops)
+ nb_sec_to_deq = nb_ops;
+ else if (nb_sec_to_deq >= nb_ops)
+ nb_sec_to_deq = nb_ops >> 1;
+
+ nb_pri_to_deq = nb_ops - nb_sec_to_deq;
+
+ nb_pri_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_pri_to_deq);
+ if (unlikely(nb_pri_deq_ops < nb_pri_to_deq))
+ nb_sec_to_deq += nb_pri_to_deq - nb_pri_deq_ops;
+ slave->nb_inflight_cops -= nb_pri_deq_ops;
+
+ slave = fo_qp_ctx->secondary_slave;
+
+ nb_sec_deq_ops = rte_cryptodev_dequeue_burst(slave->dev_id,
+ slave->qp_id, ops, nb_sec_to_deq);
+ slave->nb_inflight_cops -= nb_sec_deq_ops;
+
+ return nb_pri_deq_ops + nb_sec_deq_ops;
+}
+
+static uint16_t
+schedule_dequeue_ordering(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct rte_reorder_buffer *reorder_buff = qp_ctx->reorder_buf;
+ struct rte_mbuf *mbuf0, *mbuf1, *mbuf2, *mbuf3;
+ uint16_t nb_deq_ops, nb_drained_mbufs;
+ const uint16_t nb_op_ops = nb_ops;
+ struct rte_crypto_op *op_ops[nb_op_ops];
+ struct rte_mbuf *reorder_mbufs[nb_op_ops];
+ uint16_t i;
+
+ nb_deq_ops = schedule_dequeue(qp, ops, nb_ops);
+
+ for (i = 0; i < nb_deq_ops && i < 4; i++)
+ rte_prefetch0(op_ops[i]->sym->m_src);
+
+ for (i = 0; (i < (nb_deq_ops - 8)) && (nb_deq_ops > 8); i += 4) {
+ rte_prefetch0(op_ops[i + 4]->sym->m_src);
+ rte_prefetch0(op_ops[i + 5]->sym->m_src);
+ rte_prefetch0(op_ops[i + 6]->sym->m_src);
+ rte_prefetch0(op_ops[i + 7]->sym->m_src);
+
+ mbuf0 = op_ops[i]->sym->m_src;
+ mbuf1 = op_ops[i + 1]->sym->m_src;
+ mbuf2 = op_ops[i + 2]->sym->m_src;
+ mbuf3 = op_ops[i + 3]->sym->m_src;
+
+ mbuf0->userdata = op_ops[i];
+ mbuf1->userdata = op_ops[i + 1];
+ mbuf2->userdata = op_ops[i + 2];
+ mbuf3->userdata = op_ops[i + 3];
+
+ rte_reorder_insert(reorder_buff, mbuf0);
+ rte_reorder_insert(reorder_buff, mbuf1);
+ rte_reorder_insert(reorder_buff, mbuf2);
+ rte_reorder_insert(reorder_buff, mbuf3);
+ }
+
+ for (; i < nb_deq_ops; i++) {
+ mbuf0 = op_ops[i]->sym->m_src;
+ mbuf0->userdata = op_ops[i];
+ rte_reorder_insert(reorder_buff, mbuf0);
+ }
+
+ nb_drained_mbufs = rte_reorder_drain(reorder_buff, reorder_mbufs,
+ nb_ops);
+ for (i = 0; i < nb_drained_mbufs && i < 4; i++)
+ rte_prefetch0(reorder_mbufs[i]);
+
+ for (i = 0; (i < (nb_drained_mbufs - 8)) && (nb_drained_mbufs > 8);
+ i += 4) {
+ rte_prefetch0(reorder_mbufs[i + 4]);
+ rte_prefetch0(reorder_mbufs[i + 5]);
+ rte_prefetch0(reorder_mbufs[i + 6]);
+ rte_prefetch0(reorder_mbufs[i + 7]);
+
+ ops[i] = *(struct rte_crypto_op **)reorder_mbufs[i]->userdata;
+ ops[i + 1] = *(struct rte_crypto_op **)
+ reorder_mbufs[i + 1]->userdata;
+ ops[i + 2] = *(struct rte_crypto_op **)
+ reorder_mbufs[i + 2]->userdata;
+ ops[i + 3] = *(struct rte_crypto_op **)
+ reorder_mbufs[i + 3]->userdata;
+
+ reorder_mbufs[i]->userdata = NULL;
+ reorder_mbufs[i + 1]->userdata = NULL;
+ reorder_mbufs[i + 2]->userdata = NULL;
+ reorder_mbufs[i + 3]->userdata = NULL;
+
+ }
+
+ for (; i < nb_drained_mbufs; i++) {
+ ops[i] = *(struct rte_crypto_op **)
+ reorder_mbufs[i]->userdata;
+ reorder_mbufs[i]->userdata = NULL;
+ }
+
+ return nb_drained_mbufs;
+}
+
+static int
+slave_attach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+slave_detach(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint8_t slave_id)
+{
+ return 0;
+}
+
+static int
+scheduler_start(struct rte_cryptodev *dev)
+{
+ struct scheduler_ctx *sched_ctx = dev->data->dev_private;
+ uint16_t i;
+
+ if (sched_ctx->nb_slaves < 2) {
+ CS_LOG_ERR("Number of slaves shall no less than 2");
+ return -ENOMEM;
+ }
+
+ if (sched_ctx->reordering_enabled) {
+ dev->enqueue_burst = schedule_enqueue_ordering;
+ dev->dequeue_burst = schedule_dequeue_ordering;
+ } else {
+ dev->enqueue_burst = schedule_enqueue;
+ dev->dequeue_burst = schedule_dequeue;
+ }
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[i];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx;
+
+ if (!fo_qp_ctx->primary_slave || !fo_qp_ctx->secondary_slave) {
+ fo_qp_ctx->primary_slave = &sched_ctx->slaves[0];
+ fo_qp_ctx->secondary_slave = &sched_ctx->slaves[0];
+ }
+ }
+
+ return 0;
+}
+
+static int
+scheduler_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static int
+scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct scheduler_qp_ctx *qp_ctx = dev->data->queue_pairs[qp_id];
+ struct fo_scheduler_qp_ctx *fo_qp_ctx;
+
+ fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0,
+ rte_socket_id());
+ if (!fo_qp_ctx) {
+ CS_LOG_ERR("failed allocate memory for private queue pair");
+ return -ENOMEM;
+ }
+
+ qp_ctx->private_qp_ctx = (void *)fo_qp_ctx;
+
+ return 0;
+}
+
+static int
+scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+ slave_attach,
+ slave_detach,
+ scheduler_start,
+ scheduler_stop,
+ scheduler_config_qp,
+ scheduler_create_private_ctx
+};
+
+struct rte_cryptodev_scheduler fo_scheduler = {
+ .name = "failover-scheduler",
+ .description = "scheduler which will mainly enqueue burst to"
+ "primary slave, and enqueue to secondary slave "
+ "upon failing on enqueuing to primary",
+ .mode = CDEV_SCHED_MODE_FAILOVER,
+ .ops = &scheduler_fo_ops
+};
+
+struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;