[dpdk-dev] [PATCH] eventdev: ease single-link queue config requirements

Gage Eads gage.eads at intel.com
Wed Aug 9 21:58:04 CEST 2017


Events sent through single-link queues are naturally in-order and
atomic, without reordering or atomic scheduling. Logically the
nb_atomic_flows and nb_atomic_order_sequences arguments don't apply to a
single link queue, but applications must set these (depending on the queue
config type) to bypass the is_valid_{ordered, atomic}_queue_conf() checks
in the eventdev layer.

This commit updates those is_valid_* functions to ignore queues with the
SINGLE_LINK flag, to simplify their configuration.

Signed-off-by: Gage Eads <gage.eads at intel.com>
---
 examples/eventdev_pipeline_sw_pmd/main.c | 6 +-----
 lib/librte_eventdev/rte_eventdev.c       | 8 ++++++--
 test/test/test_eventdev_sw.c             | 4 ----
 3 files changed, 7 insertions(+), 11 deletions(-)

diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index dd75cb7..09b90c3 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -696,11 +696,7 @@ setup_eventdev(struct prod_data *prod_data,
 	};
 	const struct rte_event_queue_conf tx_q_conf = {
 			.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
-			.event_queue_cfg =
-					RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
-					RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
-			.nb_atomic_flows = 1024,
-			.nb_atomic_order_sequences = 1024,
+			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
 	};
 
 	struct port_link worker_queues[MAX_NUM_STAGES];
diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c
index bbb3805..46bf24c 100644
--- a/lib/librte_eventdev/rte_eventdev.c
+++ b/lib/librte_eventdev/rte_eventdev.c
@@ -519,7 +519,9 @@ rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
 static inline int
 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
 {
-	if (queue_conf && (
+	if (queue_conf &&
+		!(queue_conf->event_queue_cfg &
+		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
 		((queue_conf->event_queue_cfg &
 			RTE_EVENT_QUEUE_CFG_TYPE_MASK)
 			== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
@@ -535,7 +537,9 @@ is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
 static inline int
 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
 {
-	if (queue_conf && (
+	if (queue_conf &&
+		!(queue_conf->event_queue_cfg &
+		  RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
 		((queue_conf->event_queue_cfg &
 			RTE_EVENT_QUEUE_CFG_TYPE_MASK)
 			== RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
diff --git a/test/test/test_eventdev_sw.c b/test/test/test_eventdev_sw.c
index ba8c053..7219886 100644
--- a/test/test/test_eventdev_sw.c
+++ b/test/test/test_eventdev_sw.c
@@ -267,8 +267,6 @@ create_directed_qids(struct test *t, int num_qids, const uint8_t ports[])
 	static const struct rte_event_queue_conf conf = {
 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 			.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
-			.nb_atomic_flows = 1024,
-			.nb_atomic_order_sequences = 1024,
 	};
 
 	for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) {
@@ -1334,8 +1332,6 @@ port_single_lb_reconfig(struct test *t)
 	static const struct rte_event_queue_conf conf_single_link = {
 		.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
 		.event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
-		.nb_atomic_flows = 1024,
-		.nb_atomic_order_sequences = 1024,
 	};
 	if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) {
 		printf("%d: error creating qid\n", __LINE__);
-- 
2.7.4



More information about the dev mailing list