[dpdk-dev,15/33] app/testeventdev: order: launch lcores

Message ID 20170528195854.6064-16-jerin.jacob@caviumnetworks.com (mailing list archive)
State Superseded, archived
Delegated to: Jerin Jacob
Headers

Checks

Context Check Description
ci/checkpatch success coding style OK
ci/Intel-compilation success Compilation OK

Commit Message

Jerin Jacob May 28, 2017, 7:58 p.m. UTC
  The event producer and master lcore's test end and
failure detection logic are common for the queue and
all types queue test.Move them as the common function.

Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
---
 app/test-eventdev/test_order_common.c | 114 ++++++++++++++++++++++++++++++++++
 app/test-eventdev/test_order_common.h |   2 +
 2 files changed, 116 insertions(+)
  

Comments

Eads, Gage June 1, 2017, 8:54 p.m. UTC | #1
>  -----Original Message-----
>  From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
>  Sent: Sunday, May 28, 2017 2:59 PM
>  To: dev@dpdk.org
>  Cc: Richardson, Bruce <bruce.richardson@intel.com>; Van Haaren, Harry
>  <harry.van.haaren@intel.com>; hemant.agrawal@nxp.com; Eads, Gage
>  <gage.eads@intel.com>; nipun.gupta@nxp.com; Vangati, Narender
>  <narender.vangati@intel.com>; Rao, Nikhil <nikhil.rao@intel.com>;
>  gprathyusha@caviumnetworks.com; Jerin Jacob
>  <jerin.jacob@caviumnetworks.com>
>  Subject: [dpdk-dev] [PATCH 15/33] app/testeventdev: order: launch lcores
>  
>  The event producer and master lcore's test end and failure detection logic are
>  common for the queue and all types queue test.Move them as the common
>  function.
>  
>  Signed-off-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
>  ---
>   app/test-eventdev/test_order_common.c | 114
>  ++++++++++++++++++++++++++++++++++
>   app/test-eventdev/test_order_common.h |   2 +
>   2 files changed, 116 insertions(+)
>  
>  diff --git a/app/test-eventdev/test_order_common.c b/app/test-
>  eventdev/test_order_common.c
>  index 935c5a3fd..a7160f3dc 100644
>  --- a/app/test-eventdev/test_order_common.c
>  +++ b/app/test-eventdev/test_order_common.c
>  @@ -41,6 +41,57 @@ order_test_result(struct evt_test *test, struct
>  evt_options *opt)
>   	return t->result;
>   }
>  
>  +static inline int
>  +order_producer(void *arg)
>  +{
>  +	struct prod_data *p  = arg;
>  +	struct test_order *t = p->t;
>  +	struct evt_options *opt = t->opt;
>  +	const uint8_t dev_id = p->dev_id;
>  +	const uint8_t port = p->port_id;
>  +	struct rte_mempool *pool = t->pool;
>  +	const uint64_t nb_pkts = t->nb_pkts;
>  +	uint32_t *producer_flow_seq = t->producer_flow_seq;
>  +	const uint32_t nb_flows = t->nb_flows;
>  +	uint64_t count = 0;
>  +	struct rte_mbuf *m;
>  +	struct rte_event ev;
>  +
>  +	if (opt->verbose_level > 1)
>  +		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
>  +			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
>  +
>  +	ev.event = 0;
>  +	ev.op = RTE_EVENT_OP_NEW;
>  +	ev.queue_id = p->queue_id;
>  +	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
>  +	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
>  +	ev.event_type =  RTE_EVENT_TYPE_CPU;
>  +	ev.sub_event_type = 0; /* stage 0 */
>  +
>  +	while (count < nb_pkts && t->err == false) {
>  +		m = rte_pktmbuf_alloc(pool);
>  +		if (m == NULL)
>  +			continue;
>  +
>  +		const uint32_t flow = (uintptr_t)m % nb_flows;
>  +		/* Maintain seq number per flow */
>  +		m->seqn = producer_flow_seq[flow]++;
>  +
>  +		ev.flow_id = flow;
>  +		ev.mbuf = m;
>  +
>  +		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
>  +			if (t->err)
>  +				break;
>  +			rte_pause();
>  +		}
>  +
>  +		count++;
>  +	}
>  +	return 0;
>  +}
>  +
>   int
>   order_opt_check(struct evt_options *opt)  { @@ -185,6 +236,69 @@
>  order_opt_dump(struct evt_options *opt)  }
>  
>   int
>  +order_launch_lcores(struct evt_test *test, struct evt_options *opt,
>  +			int (*worker)(void *))
>  +{
>  +	int ret, lcore_id;
>  +	struct test_order *t = evt_test_priv(test);
>  +
>  +	int wkr_idx = 0;
>  +	/* launch workers */
>  +	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
>  +		if (!(opt->wlcores[lcore_id]))
>  +			continue;
>  +
>  +		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
>  +					 lcore_id);
>  +		if (ret) {
>  +			evt_err("failed to launch worker %d", lcore_id);
>  +			return ret;
>  +		}
>  +		wkr_idx++;
>  +	}
>  +
>  +	/* launch producer */
>  +	ret = rte_eal_remote_launch(order_producer, &t->prod, opt->plcore);
>  +	if (ret) {
>  +		evt_err("failed to launch order_producer %d", opt->plcore);
>  +		return ret;
>  +	}
>  +
>  +	uint64_t cycles = rte_get_timer_cycles();
>  +	int64_t old_remining  = -1;

s/remining/remaining/g

This spelling also occurs in test_perf_common.c in patch 23.
  

Patch

diff --git a/app/test-eventdev/test_order_common.c b/app/test-eventdev/test_order_common.c
index 935c5a3fd..a7160f3dc 100644
--- a/app/test-eventdev/test_order_common.c
+++ b/app/test-eventdev/test_order_common.c
@@ -41,6 +41,57 @@  order_test_result(struct evt_test *test, struct evt_options *opt)
 	return t->result;
 }
 
+static inline int
+order_producer(void *arg)
+{
+	struct prod_data *p  = arg;
+	struct test_order *t = p->t;
+	struct evt_options *opt = t->opt;
+	const uint8_t dev_id = p->dev_id;
+	const uint8_t port = p->port_id;
+	struct rte_mempool *pool = t->pool;
+	const uint64_t nb_pkts = t->nb_pkts;
+	uint32_t *producer_flow_seq = t->producer_flow_seq;
+	const uint32_t nb_flows = t->nb_flows;
+	uint64_t count = 0;
+	struct rte_mbuf *m;
+	struct rte_event ev;
+
+	if (opt->verbose_level > 1)
+		printf("%s(): lcore %d dev_id %d port=%d queue=%d\n",
+			 __func__, rte_lcore_id(), dev_id, port, p->queue_id);
+
+	ev.event = 0;
+	ev.op = RTE_EVENT_OP_NEW;
+	ev.queue_id = p->queue_id;
+	ev.sched_type = RTE_SCHED_TYPE_ORDERED;
+	ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+	ev.event_type =  RTE_EVENT_TYPE_CPU;
+	ev.sub_event_type = 0; /* stage 0 */
+
+	while (count < nb_pkts && t->err == false) {
+		m = rte_pktmbuf_alloc(pool);
+		if (m == NULL)
+			continue;
+
+		const uint32_t flow = (uintptr_t)m % nb_flows;
+		/* Maintain seq number per flow */
+		m->seqn = producer_flow_seq[flow]++;
+
+		ev.flow_id = flow;
+		ev.mbuf = m;
+
+		while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) {
+			if (t->err)
+				break;
+			rte_pause();
+		}
+
+		count++;
+	}
+	return 0;
+}
+
 int
 order_opt_check(struct evt_options *opt)
 {
@@ -185,6 +236,69 @@  order_opt_dump(struct evt_options *opt)
 }
 
 int
+order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+			int (*worker)(void *))
+{
+	int ret, lcore_id;
+	struct test_order *t = evt_test_priv(test);
+
+	int wkr_idx = 0;
+	/* launch workers */
+	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
+		if (!(opt->wlcores[lcore_id]))
+			continue;
+
+		ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx],
+					 lcore_id);
+		if (ret) {
+			evt_err("failed to launch worker %d", lcore_id);
+			return ret;
+		}
+		wkr_idx++;
+	}
+
+	/* launch producer */
+	ret = rte_eal_remote_launch(order_producer, &t->prod, opt->plcore);
+	if (ret) {
+		evt_err("failed to launch order_producer %d", opt->plcore);
+		return ret;
+	}
+
+	uint64_t cycles = rte_get_timer_cycles();
+	int64_t old_remining  = -1;
+
+	while (t->err == false) {
+
+		rte_event_schedule(opt->dev_id);
+
+		uint64_t new_cycles = rte_get_timer_cycles();
+		int64_t remining = rte_atomic64_read(&t->outstand_pkts);
+
+		if (remining <= 0) {
+			t->result = EVT_TEST_SUCCESS;
+			break;
+		}
+
+		if (new_cycles - cycles > rte_get_timer_hz() * 1) {
+			printf(CLGRN"\r%"PRId64""CLNRM, remining);
+			fflush(stdout);
+			if (old_remining == remining) {
+				rte_event_dev_dump(opt->dev_id, stdout);
+				evt_err("No schedules for seconds, deadlock");
+				t->err = true;
+				rte_smp_wmb();
+				break;
+			}
+			old_remining = remining;
+			cycles = new_cycles;
+		}
+	}
+	printf("\r");
+
+	return 0;
+}
+
+int
 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 				uint8_t nb_workers, uint8_t nb_queues)
 {
diff --git a/app/test-eventdev/test_order_common.h b/app/test-eventdev/test_order_common.h
index 165931860..a760b94bd 100644
--- a/app/test-eventdev/test_order_common.h
+++ b/app/test-eventdev/test_order_common.h
@@ -94,6 +94,8 @@  int order_test_result(struct evt_test *test, struct evt_options *opt);
 int order_opt_check(struct evt_options *opt);
 int order_test_setup(struct evt_test *test, struct evt_options *opt);
 int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
+int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
+			int (*worker)(void *));
 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 				uint8_t nb_workers, uint8_t nb_queues);
 void order_test_destroy(struct evt_test *test, struct evt_options *opt);