[dpdk-dev] crypto/scheduler: add get attached slaves API
Checks
Commit Message
This patch adds an API to get the run-time slaves number and list
of a cryptodev scheduler PMD.
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
---
v2:
- Fixed slave number return bug
- Replaced priviate macro MAX_SLAVE_NUM with public macro
RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 34 +++++++++++++++++++++-
drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 22 ++++++++++++++
.../scheduler/rte_pmd_crypto_scheduler_version.map | 7 +++++
drivers/crypto/scheduler/scheduler_pmd.c | 4 +--
drivers/crypto/scheduler/scheduler_pmd_private.h | 10 ++-----
drivers/crypto/scheduler/scheduler_roundrobin.c | 5 ++--
6 files changed, 70 insertions(+), 12 deletions(-)
Comments
> -----Original Message-----
> From: Zhang, Roy Fan
> Sent: Thursday, March 30, 2017 2:39 PM
> To: dev@dpdk.org
> Cc: De Lara Guarch, Pablo; Gonzalez Monroy, Sergio; Doherty, Declan
> Subject: [PATCH] crypto/scheduler: add get attached slaves API
>
> This patch adds an API to get the run-time slaves number and list
> of a cryptodev scheduler PMD.
>
> Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
> -----Original Message-----
> From: De Lara Guarch, Pablo
> Sent: Friday, March 31, 2017 9:38 AM
> To: Zhang, Roy Fan; dev@dpdk.org
> Cc: Gonzalez Monroy, Sergio; Doherty, Declan
> Subject: RE: [PATCH] crypto/scheduler: add get attached slaves API
>
>
>
> > -----Original Message-----
> > From: Zhang, Roy Fan
> > Sent: Thursday, March 30, 2017 2:39 PM
> > To: dev@dpdk.org
> > Cc: De Lara Guarch, Pablo; Gonzalez Monroy, Sergio; Doherty, Declan
> > Subject: [PATCH] crypto/scheduler: add get attached slaves API
> >
> > This patch adds an API to get the run-time slaves number and list
> > of a cryptodev scheduler PMD.
> >
> > Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
>
> Acked-by: Pablo de Lara <pablo.de.lara.guarch@intel.com>
Applied to dpdk-next-crypto.
Thanks,
Pablo
@@ -209,7 +209,8 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
}
sched_ctx = dev->data->dev_private;
- if (sched_ctx->nb_slaves >= MAX_SLAVES_NUM) {
+ if (sched_ctx->nb_slaves >=
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) {
CS_LOG_ERR("Too many slaves attached");
return -ENOMEM;
}
@@ -483,3 +484,34 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
return 0;
}
+
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
+{
+ struct rte_cryptodev *dev = rte_cryptodev_pmd_get_dev(scheduler_id);
+ struct scheduler_ctx *sched_ctx;
+ uint32_t nb_slaves = 0;
+
+ if (!dev) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ if (dev->dev_type != RTE_CRYPTODEV_SCHEDULER_PMD) {
+ CS_LOG_ERR("Operation not supported");
+ return -ENOTSUP;
+ }
+
+ sched_ctx = dev->data->dev_private;
+
+ nb_slaves = sched_ctx->nb_slaves;
+
+ if (slaves && nb_slaves) {
+ uint32_t i;
+
+ for (i = 0; i < nb_slaves; i++)
+ slaves[i] = sched_ctx->slaves[i].dev_id;
+ }
+
+ return (int)nb_slaves;
+}
@@ -40,6 +40,11 @@
extern "C" {
#endif
+/**< Maximum number of bonded devices per devices */
+#ifndef RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES (8)
+#endif
+
/**
* Crypto scheduler PMD operation modes
*/
@@ -147,6 +152,23 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
int
rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id);
+/**
+ * Get the the attached slaves' count and/or ID
+ *
+ *@param scheduler_id The target scheduler device ID
+ * slaves If successful, the function will write back
+ * all slaves' device IDs to it. This
+ * parameter SHALL either be an uint8_t array
+ * of RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES
+ * elements or NULL.
+ *
+ * @return
+ * non-negative number: the number of slaves attached
+ * negative integer if error occurs.
+ */
+int
+rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves);
+
typedef uint16_t (*rte_cryptodev_scheduler_burst_enqueue_t)(void *qp_ctx,
struct rte_crypto_op **ops, uint16_t nb_ops);
@@ -10,3 +10,10 @@ DPDK_17.02 {
rte_cryptodev_scheduler_ordering_get;
};
+
+DPDK_17.05 {
+ global:
+
+ rte_cryptodev_scheduler_slaves_get;
+
+} DPDK_17.02;
@@ -44,7 +44,7 @@
struct scheduler_init_params {
struct rte_crypto_vdev_init_params def_p;
uint32_t nb_slaves;
- uint8_t slaves[MAX_SLAVES_NUM];
+ uint8_t slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
};
#define RTE_CRYPTODEV_VDEV_NAME ("name")
@@ -222,7 +222,7 @@ parse_slave_arg(const char *key __rte_unused,
return -1;
}
- if (param->nb_slaves >= MAX_SLAVES_NUM - 1) {
+ if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES - 1) {
CS_LOG_ERR("Too many slaves.\n");
return -1;
}
@@ -36,11 +36,6 @@
#include "rte_cryptodev_scheduler.h"
-/**< Maximum number of bonded devices per devices */
-#ifndef MAX_SLAVES_NUM
-#define MAX_SLAVES_NUM (8)
-#endif
-
#define PER_SLAVE_BUFF_SIZE (256)
#define CS_LOG_ERR(fmt, args...) \
@@ -80,7 +75,7 @@ struct scheduler_ctx {
uint32_t max_nb_queue_pairs;
- struct scheduler_slave slaves[MAX_SLAVES_NUM];
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
@@ -103,7 +98,8 @@ struct scheduler_qp_ctx {
} __rte_cache_aligned;
struct scheduler_session {
- struct rte_cryptodev_sym_session *sessions[MAX_SLAVES_NUM];
+ struct rte_cryptodev_sym_session *sessions[
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
};
static inline uint16_t __attribute__((always_inline))
@@ -37,7 +37,7 @@
#include "scheduler_pmd_private.h"
struct rr_scheduler_qp_ctx {
- struct scheduler_slave slaves[MAX_SLAVES_NUM];
+ struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
uint32_t last_enq_slave_idx;
@@ -211,7 +211,8 @@ scheduler_start(struct rte_cryptodev *dev)
qp_ctx->private_qp_ctx;
uint32_t j;
- memset(rr_qp_ctx->slaves, 0, MAX_SLAVES_NUM *
+ memset(rr_qp_ctx->slaves, 0,
+ RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES *
sizeof(struct scheduler_slave));
for (j = 0; j < sched_ctx->nb_slaves; j++) {
rr_qp_ctx->slaves[j].dev_id =