[dpdk-dev] crypto/scheduler: remove session backup and recover
Checks
Commit Message
This patch removes the unnecssary session backup and recover
steps in the round-robin and fail-over modes of cryptodev
scheduler PMD. Originally, the scheduler blindly enqueues to
the slaves regardless of their available queue rooms, and
recovers the sessions once failed. This patch predicts the
number of crypto ops the slave can be enqueued by checking
its inflight ops and thus removed the session backup and
recovery steps.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
drivers/crypto/scheduler/scheduler_failover.c | 33 +++++++++----------------
drivers/crypto/scheduler/scheduler_roundrobin.c | 33 ++++++++++---------------
2 files changed, 25 insertions(+), 41 deletions(-)
Comments
> -----Original Message-----
> From: Zhang, Roy Fan
> Sent: Friday, May 26, 2017 2:25 AM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo <pablo.de.lara.guarch@intel.com>
> Subject: [PATCH] crypto/scheduler: remove session backup and recover
>
> This patch removes the unnecssary session backup and recover steps in the
> round-robin and fail-over modes of cryptodev scheduler PMD. Originally,
> the scheduler blindly enqueues to the slaves regardless of their available
> queue rooms, and recovers the sessions once failed. This patch predicts the
> number of crypto ops the slave can be enqueued by checking its inflight ops
> and thus removed the session backup and recovery steps.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
> ---
> drivers/crypto/scheduler/scheduler_failover.c | 33 +++++++++---------------
> -
> drivers/crypto/scheduler/scheduler_roundrobin.c | 33 ++++++++++----------
> -----
> 2 files changed, 25 insertions(+), 41 deletions(-)
Hi Fan,
Thanks for the patch. I did not apply this patch so far because there was some work
ongoing about the crypto sessions. Since this work is going to be integrated for RC1,
this patch is not needed anymore.
Thanks,
Pablo
@@ -53,7 +53,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
struct scheduler_session *sess0, *sess1, *sess2, *sess3;
for (i = 0; i < nb_ops && i < 4; i++)
@@ -74,11 +73,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
sess3 = (struct scheduler_session *)
ops[i+3]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
@@ -88,7 +82,6 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
for (; i < nb_ops; i++) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
}
@@ -96,9 +89,7 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
slave->qp_id, ops, nb_ops);
slave->nb_inflight_cops += processed_ops;
- if (unlikely(processed_ops < nb_ops))
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
+ RTE_ASSERT(prcessed_ops == nb_ops);
return processed_ops;
}
@@ -106,22 +97,22 @@ failover_slave_enqueue(struct scheduler_slave *slave, uint8_t slave_idx,
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct fo_scheduler_qp_ctx *qp_ctx =
- ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
- uint16_t enqueued_ops;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct fo_scheduler_qp_ctx *fo_qp_ctx = qp_ctx->private_qp_ctx;
if (unlikely(nb_ops == 0))
return 0;
- enqueued_ops = failover_slave_enqueue(&qp_ctx->primary_slave,
- PRIMARY_SLAVE_IDX, ops, nb_ops);
-
- if (enqueued_ops < nb_ops)
- enqueued_ops += failover_slave_enqueue(&qp_ctx->secondary_slave,
- SECONDARY_SLAVE_IDX, &ops[enqueued_ops],
- nb_ops - enqueued_ops);
+ if (fo_qp_ctx->primary_slave.nb_inflight_cops + nb_ops <
+ qp_ctx->max_nb_objs)
+ return failover_slave_enqueue(&fo_qp_ctx->primary_slave,
+ PRIMARY_SLAVE_IDX, ops, nb_ops);
- return enqueued_ops;
+ return failover_slave_enqueue(&fo_qp_ctx->secondary_slave,
+ SECONDARY_SLAVE_IDX, ops, (fo_qp_ctx->secondary_slave.
+ nb_inflight_cops + nb_ops <= qp_ctx->max_nb_objs) ?
+ nb_ops : qp_ctx->max_nb_objs -
+ fo_qp_ctx->secondary_slave.nb_inflight_cops);
}
@@ -47,21 +47,24 @@ struct rr_scheduler_qp_ctx {
static uint16_t
schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
{
- struct rr_scheduler_qp_ctx *rr_qp_ctx =
- ((struct scheduler_qp_ctx *)qp)->private_qp_ctx;
+ struct scheduler_qp_ctx *qp_ctx = qp;
+ struct rr_scheduler_qp_ctx *rr_qp_ctx = qp_ctx->private_qp_ctx;
uint32_t slave_idx = rr_qp_ctx->last_enq_slave_idx;
struct scheduler_slave *slave = &rr_qp_ctx->slaves[slave_idx];
uint16_t i, processed_ops;
- struct rte_cryptodev_sym_session *sessions[nb_ops];
struct scheduler_session *sess0, *sess1, *sess2, *sess3;
+ uint16_t nb_ops_to_enq;
if (unlikely(nb_ops == 0))
return 0;
- for (i = 0; i < nb_ops && i < 4; i++)
+ nb_ops_to_enq = slave->nb_inflight_cops + nb_ops > qp_ctx->max_nb_objs ?
+ qp_ctx->max_nb_objs - slave->nb_inflight_cops : nb_ops;
+
+ for (i = 0; i < nb_ops_to_enq && i < 4; i++)
rte_prefetch0(ops[i]->sym->session);
- for (i = 0; (i < (nb_ops - 8)) && (nb_ops > 8); i += 4) {
+ for (i = 0; (i < (nb_ops_to_enq - 8)) && (nb_ops_to_enq > 8); i += 4) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
sess1 = (struct scheduler_session *)
@@ -71,11 +74,6 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
sess3 = (struct scheduler_session *)
ops[i+3]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
- sessions[i + 1] = ops[i + 1]->sym->session;
- sessions[i + 2] = ops[i + 2]->sym->session;
- sessions[i + 3] = ops[i + 3]->sym->session;
-
ops[i]->sym->session = sess0->sessions[slave_idx];
ops[i + 1]->sym->session = sess1->sessions[slave_idx];
ops[i + 2]->sym->session = sess2->sessions[slave_idx];
@@ -87,26 +85,21 @@ schedule_enqueue(void *qp, struct rte_crypto_op **ops, uint16_t nb_ops)
rte_prefetch0(ops[i + 7]->sym->session);
}
- for (; i < nb_ops; i++) {
+ for (; i < nb_ops_to_enq; i++) {
sess0 = (struct scheduler_session *)
ops[i]->sym->session->_private;
- sessions[i] = ops[i]->sym->session;
ops[i]->sym->session = sess0->sessions[slave_idx];
}
processed_ops = rte_cryptodev_enqueue_burst(slave->dev_id,
- slave->qp_id, ops, nb_ops);
+ slave->qp_id, ops, nb_ops_to_enq);
+ RTE_ASSERT(processed_ops == nb_ops_to_enq);
slave->nb_inflight_cops += processed_ops;
rr_qp_ctx->last_enq_slave_idx += 1;
- rr_qp_ctx->last_enq_slave_idx %= rr_qp_ctx->nb_slaves;
-
- /* recover session if enqueue is failed */
- if (unlikely(processed_ops < nb_ops)) {
- for (i = processed_ops; i < nb_ops; i++)
- ops[i]->sym->session = sessions[i];
- }
+ if (rr_qp_ctx->last_enq_slave_idx == rr_qp_ctx->nb_slaves)
+ rr_qp_ctx->last_enq_slave_idx = 0;
return processed_ops;
}