[dpdk-dev] [PATCH] crypto/scheduler: add multicore scheduling mode

Zhang, Roy Fan roy.fan.zhang at intel.com
Mon Jun 5 08:19:29 CEST 2017


Hi Kirill,

Great job!
It would be handy if you can enable the option of run-time adding and 
deleting the number of cores, or at least showing the lcore list that is 
used for this mode of scheduler?

Some more comments inline.


On 29/05/2017 04:08, Pablo de Lara wrote:
> From: Kirill Rybalchenko <kirill.rybalchenko at intel.com>
>
> Multi-core scheduling mode is a mode where scheduler distributes
> crypto operations in a round-robin base, between several core
> assigned as workers.
>
> Signed-off-by: Kirill Rybalchenko <kirill.rybalchenko at intel.com>
> ---
>   app/test-crypto-perf/cperf_test_throughput.c       |   2 +
>   drivers/crypto/scheduler/Makefile                  |   1 +
>   drivers/crypto/scheduler/rte_cryptodev_scheduler.c |   7 +
>   drivers/crypto/scheduler/rte_cryptodev_scheduler.h |   6 +
>   drivers/crypto/scheduler/scheduler_multicore.c     | 405 +++++++++++++++++++++
>   drivers/crypto/scheduler/scheduler_pmd.c           |  73 +++-
>   drivers/crypto/scheduler/scheduler_pmd_private.h   |   4 +
>   lib/librte_cryptodev/rte_cryptodev.c               |   2 +-
>   8 files changed, 497 insertions(+), 3 deletions(-)
>   create mode 100644 drivers/crypto/scheduler/scheduler_multicore.c
...
> +#define MC_SCHED_RING_SIZE		1024
> +#define MC_SCHED_ENQ_RING_NAME	"MCS_ENQR_"
> +#define MC_SCHED_DEQ_RING_NAME	"MCS_DEQR_"
> +
The better name for ENQ/DEQ_RING_NAME might be 
"MC_SCHED_ENQ/DEQ_RING_NAME_PREFIX"
> +#define MC_SCHED_BUFFER_SIZE 32
> +#define MC_SCHED_BUFFER_MASK (MC_SCHED_BUFFER_SIZE - 1)
> +
> +/** multi-core scheduler context */
> +struct mc_scheduler_ctx {
> +	unsigned int num_workers;             /**< Number of workers polling */
Please uses uint32_t instead of unsigned int
> +	unsigned int stop_signal;
> +
> +	struct rte_ring *sched_enq_ring[MAX_NB_WORKER_CORES];
> +	struct rte_ring *sched_deq_ring[MAX_NB_WORKER_CORES];
> +};
> +
...
> +				sessions[i] = enq_ops[i]->sym->session;
> +				sessions[i + 1] = enq_ops[i + 1]->sym->session;
> +				sessions[i + 2] = enq_ops[i + 2]->sym->session;
> +				sessions[i + 3] = enq_ops[i + 3]->sym->session;
> +
Have a look at pkt_size_scheduler, it is possible to remove this session 
backup and recovery through the run-time queue size check.
> +				enq_ops[i]->sym->session = sess0->sessions[worker_idx];
> +				enq_ops[i + 1]->sym->session = sess1->sessions[worker_idx];
> +				enq_ops[i + 2]->sym->session = sess2->sessions[worker_idx];
> +				enq_ops[i + 3]->sym->session = sess3->sessions[worker_idx];
> +
> +				rte_prefetch0(enq_ops[i + 4]->sym->session);
> +				rte_prefetch0(enq_ops[i + 5]->sym->session);
> +				rte_prefetch0(enq_ops[i + 6]->sym->session);
> +				rte_prefetch0(enq_ops[i + 7]->sym->session);
> +			}
> +#endif
>   
> +#define MAX_NB_WORKER_CORES	64
I think MAX_NB_WORKER_CORES should goes to rte_cryptodev_scheduler with 
a more formal name like "RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES" or 
better name.
> +
>   struct scheduler_slave {
>   	uint8_t dev_id;
>   	uint16_t qp_id;
> @@ -86,6 +88,8 @@ struct scheduler_ctx {
>   
>   	char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
>   	char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
> +	uint16_t wc_pool[MAX_NB_WORKER_CORES];
> +	uint16_t nb_wc;
>   
>   	char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
>   	int nb_init_slaves;
> diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c
> index b65cd9c..5aa2b8b 100644
> --- a/lib/librte_cryptodev/rte_cryptodev.c
> +++ b/lib/librte_cryptodev/rte_cryptodev.c
> @@ -1032,8 +1032,8 @@ rte_cryptodev_stop(uint8_t dev_id)
>   		return;
>   	}
>   
> -	dev->data->dev_started = 0;
>   	(*dev->dev_ops->dev_stop)(dev);
> +	dev->data->dev_started = 0;
>   }
>   
>   int



More information about the dev mailing list