[dpdk-dev] [PATCH v7 12/15] app/eventdev: add pipeline atq worker functions

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Tue Jan 16 18:46:04 CET 2018


Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
Acked-by: Harry van Haaren <harry.van.haaren at intel.com>
---
 app/test-eventdev/test_pipeline_atq.c | 280 +++++++++++++++++++++++++++++++++-
 1 file changed, 279 insertions(+), 1 deletion(-)

diff --git a/app/test-eventdev/test_pipeline_atq.c b/app/test-eventdev/test_pipeline_atq.c
index 6c9ac6119..36abbddda 100644
--- a/app/test-eventdev/test_pipeline_atq.c
+++ b/app/test-eventdev/test_pipeline_atq.c
@@ -15,10 +15,288 @@ pipeline_atq_nb_event_queues(struct evt_options *opt)
 	return rte_eth_dev_count();
 }
 
+static int
+pipeline_atq_worker_single_stage_tx(void *arg)
+{
+	PIPELINE_WROKER_SINGLE_STAGE_INIT;
+
+	while (t->done == false) {
+		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+		if (!event) {
+			rte_pause();
+			continue;
+		}
+
+		if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+			pipeline_tx_pkt(ev.mbuf);
+			w->processed_pkts++;
+			continue;
+		}
+		pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+		pipeline_event_enqueue(dev, port, &ev);
+	}
+
+	return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_fwd(void *arg)
+{
+	PIPELINE_WROKER_SINGLE_STAGE_INIT;
+	const uint8_t tx_queue = t->tx_service.queue_id;
+
+	while (t->done == false) {
+		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+		if (!event) {
+			rte_pause();
+			continue;
+		}
+
+		w->processed_pkts++;
+		ev.queue_id = tx_queue;
+		pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+		pipeline_event_enqueue(dev, port, &ev);
+	}
+
+	return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_burst_tx(void *arg)
+{
+	PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
+
+	while (t->done == false) {
+		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+				BURST_SIZE, 0);
+
+		if (!nb_rx) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			rte_prefetch0(ev[i + 1].mbuf);
+			if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+				pipeline_tx_pkt(ev[i].mbuf);
+				ev[i].op = RTE_EVENT_OP_RELEASE;
+				w->processed_pkts++;
+			} else
+				pipeline_fwd_event(&ev[i],
+						RTE_SCHED_TYPE_ATOMIC);
+		}
+
+		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+	}
+
+	return 0;
+}
+
+static int
+pipeline_atq_worker_single_stage_burst_fwd(void *arg)
+{
+	PIPELINE_WROKER_SINGLE_STAGE_BURST_INIT;
+	const uint8_t tx_queue = t->tx_service.queue_id;
+
+	while (t->done == false) {
+		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+				BURST_SIZE, 0);
+
+		if (!nb_rx) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			rte_prefetch0(ev[i + 1].mbuf);
+			ev[i].queue_id = tx_queue;
+			pipeline_fwd_event(&ev[i], RTE_SCHED_TYPE_ATOMIC);
+			w->processed_pkts++;
+		}
+
+		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+	}
+
+	return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_tx(void *arg)
+{
+	PIPELINE_WROKER_MULTI_STAGE_INIT;
+	const uint8_t nb_stages = t->opt->nb_stages;
+
+
+	while (t->done == false) {
+		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+		if (!event) {
+			rte_pause();
+			continue;
+		}
+
+		cq_id = ev.sub_event_type % nb_stages;
+
+		if (cq_id == last_queue) {
+			if (ev.sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+				pipeline_tx_pkt(ev.mbuf);
+				w->processed_pkts++;
+				continue;
+			}
+			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+		} else {
+			ev.sub_event_type++;
+			pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+		}
+
+		pipeline_event_enqueue(dev, port, &ev);
+	}
+	return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_fwd(void *arg)
+{
+	PIPELINE_WROKER_MULTI_STAGE_INIT;
+	const uint8_t nb_stages = t->opt->nb_stages;
+	const uint8_t tx_queue = t->tx_service.queue_id;
+
+	while (t->done == false) {
+		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);
+
+		if (!event) {
+			rte_pause();
+			continue;
+		}
+
+		cq_id = ev.sub_event_type % nb_stages;
+
+		if (cq_id == last_queue) {
+			w->processed_pkts++;
+			ev.queue_id = tx_queue;
+			pipeline_fwd_event(&ev, RTE_SCHED_TYPE_ATOMIC);
+		} else {
+			ev.sub_event_type++;
+			pipeline_fwd_event(&ev, sched_type_list[cq_id]);
+		}
+
+		pipeline_event_enqueue(dev, port, &ev);
+	}
+	return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_burst_tx(void *arg)
+{
+	PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
+	const uint8_t nb_stages = t->opt->nb_stages;
+
+	while (t->done == false) {
+		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+				BURST_SIZE, 0);
+
+		if (!nb_rx) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			rte_prefetch0(ev[i + 1].mbuf);
+			cq_id = ev[i].sub_event_type % nb_stages;
+
+			if (cq_id == last_queue) {
+				if (ev[i].sched_type == RTE_SCHED_TYPE_ATOMIC) {
+
+					pipeline_tx_pkt(ev[i].mbuf);
+					ev[i].op = RTE_EVENT_OP_RELEASE;
+					w->processed_pkts++;
+					continue;
+				}
+
+				pipeline_fwd_event(&ev[i],
+						RTE_SCHED_TYPE_ATOMIC);
+			} else {
+				ev[i].sub_event_type++;
+				pipeline_fwd_event(&ev[i],
+						sched_type_list[cq_id]);
+			}
+		}
+
+		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+	}
+	return 0;
+}
+
+static int
+pipeline_atq_worker_multi_stage_burst_fwd(void *arg)
+{
+	PIPELINE_WROKER_MULTI_STAGE_BURST_INIT;
+	const uint8_t nb_stages = t->opt->nb_stages;
+	const uint8_t tx_queue = t->tx_service.queue_id;
+
+	while (t->done == false) {
+		uint16_t nb_rx = rte_event_dequeue_burst(dev, port, ev,
+				BURST_SIZE, 0);
+
+		if (!nb_rx) {
+			rte_pause();
+			continue;
+		}
+
+		for (i = 0; i < nb_rx; i++) {
+			rte_prefetch0(ev[i + 1].mbuf);
+			cq_id = ev[i].sub_event_type % nb_stages;
+
+			if (cq_id == last_queue) {
+				w->processed_pkts++;
+				ev[i].queue_id = tx_queue;
+				pipeline_fwd_event(&ev[i],
+						RTE_SCHED_TYPE_ATOMIC);
+			} else {
+				ev[i].sub_event_type++;
+				pipeline_fwd_event(&ev[i],
+						sched_type_list[cq_id]);
+			}
+		}
+
+		pipeline_event_enqueue_burst(dev, port, ev, nb_rx);
+	}
+	return 0;
+}
+
 static int
 worker_wrapper(void *arg)
 {
-	RTE_SET_USED(arg);
+	struct worker_data *w  = arg;
+	struct evt_options *opt = w->t->opt;
+	const bool burst = evt_has_burst_mode(w->dev_id);
+	const bool mt_safe = !w->t->mt_unsafe;
+	const uint8_t nb_stages = opt->nb_stages;
+	RTE_SET_USED(opt);
+
+	if (nb_stages == 1) {
+		if (!burst && mt_safe)
+			return pipeline_atq_worker_single_stage_tx(arg);
+		else if (!burst && !mt_safe)
+			return pipeline_atq_worker_single_stage_fwd(arg);
+		else if (burst && mt_safe)
+			return pipeline_atq_worker_single_stage_burst_tx(arg);
+		else if (burst && !mt_safe)
+			return pipeline_atq_worker_single_stage_burst_fwd(arg);
+	} else {
+		if (!burst && mt_safe)
+			return pipeline_atq_worker_multi_stage_tx(arg);
+		else if (!burst && !mt_safe)
+			return pipeline_atq_worker_multi_stage_fwd(arg);
+		if (burst && mt_safe)
+			return pipeline_atq_worker_multi_stage_burst_tx(arg);
+		else if (burst && !mt_safe)
+			return pipeline_atq_worker_multi_stage_burst_fwd(arg);
+	}
 	rte_panic("invalid worker\n");
 }
 
-- 
2.14.1



More information about the dev mailing list