[dpdk-dev] [PATCH v2 1/4] app/eventdev: add event timer adapter as a producer

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Tue Apr 3 18:01:31 CEST 2018


Add event timer adapter as producer option that can be selected by
passing --prod_type_timerdev.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
---

 v2 Changes:
 - set timer to NOT_ARMED before trying to arm it.
 - prevent edge cases for timeout_ticks being set to 0.

 app/test-eventdev/evt_options.c      |  54 +++++++----
 app/test-eventdev/evt_options.h      |   9 ++
 app/test-eventdev/test_perf_atq.c    |  10 +--
 app/test-eventdev/test_perf_common.c | 170 +++++++++++++++++++++++++++++++++--
 app/test-eventdev/test_perf_common.h |   7 ++
 app/test-eventdev/test_perf_queue.c  |   7 +-
 6 files changed, 221 insertions(+), 36 deletions(-)

diff --git a/app/test-eventdev/evt_options.c b/app/test-eventdev/evt_options.c
index 9683b2224..49cd9c419 100644
--- a/app/test-eventdev/evt_options.c
+++ b/app/test-eventdev/evt_options.c
@@ -27,6 +27,11 @@ evt_options_default(struct evt_options *opt)
 	opt->pool_sz = 16 * 1024;
 	opt->wkr_deq_dep = 16;
 	opt->nb_pkts = (1ULL << 26); /* do ~64M packets */
+	opt->nb_timers = 1E8;
+	opt->nb_timer_adptrs = 1;
+	opt->bkt_tck_nsec = 1E3; /* 1000ns ~ 1us */
+	opt->max_tmo_nsec = 1E5; /* 100us */
+	opt->nb_bkt_tcks = 10;   /* 50us */
 	opt->prod_type = EVT_PROD_TYPE_SYNT;
 }

@@ -86,6 +91,13 @@ evt_parse_eth_prod_type(struct evt_options *opt, const char *arg __rte_unused)
 	return 0;
 }

+static int
+evt_parse_timer_prod_type(struct evt_options *opt, const char *arg __rte_unused)
+{
+	opt->prod_type = EVT_PROD_TYPE_EVENT_TIMER_ADPTR;
+	return 0;
+}
+
 static int
 evt_parse_test_name(struct evt_options *opt, const char *arg)
 {
@@ -169,7 +181,10 @@ usage(char *program)
 		"\t--worker_deq_depth : dequeue depth of the worker\n"
 		"\t--fwd_latency      : perform fwd_latency measurement\n"
 		"\t--queue_priority   : enable queue priority\n"
-		"\t--prod_type_ethdev : use ethernet device as producer\n."
+		"\t--prod_type_ethdev : use ethernet device as producer.\n"
+		"\t--prod_type_timerdev : use event timer device as producer.\n"
+		"\t                     x * bkt_tck_nsec would be the timeout\n"
+		"\t                     in ns.\n"
 		);
 	printf("available tests:\n");
 	evt_test_dump_names();
@@ -217,22 +232,23 @@ evt_parse_sched_type_list(struct evt_options *opt, const char *arg)
 }

 static struct option lgopts[] = {
-	{ EVT_NB_FLOWS,         1, 0, 0 },
-	{ EVT_DEVICE,           1, 0, 0 },
-	{ EVT_VERBOSE,          1, 0, 0 },
-	{ EVT_TEST,             1, 0, 0 },
-	{ EVT_PROD_LCORES,      1, 0, 0 },
-	{ EVT_WORK_LCORES,      1, 0, 0 },
-	{ EVT_SOCKET_ID,        1, 0, 0 },
-	{ EVT_POOL_SZ,          1, 0, 0 },
-	{ EVT_NB_PKTS,          1, 0, 0 },
-	{ EVT_WKR_DEQ_DEP,      1, 0, 0 },
-	{ EVT_SCHED_TYPE_LIST,  1, 0, 0 },
-	{ EVT_FWD_LATENCY,      0, 0, 0 },
-	{ EVT_QUEUE_PRIORITY,   0, 0, 0 },
-	{ EVT_PROD_ETHDEV,      0, 0, 0 },
-	{ EVT_HELP,             0, 0, 0 },
-	{ NULL,                 0, 0, 0 }
+	{ EVT_NB_FLOWS,            1, 0, 0 },
+	{ EVT_DEVICE,              1, 0, 0 },
+	{ EVT_VERBOSE,             1, 0, 0 },
+	{ EVT_TEST,                1, 0, 0 },
+	{ EVT_PROD_LCORES,         1, 0, 0 },
+	{ EVT_WORK_LCORES,         1, 0, 0 },
+	{ EVT_SOCKET_ID,           1, 0, 0 },
+	{ EVT_POOL_SZ,             1, 0, 0 },
+	{ EVT_NB_PKTS,             1, 0, 0 },
+	{ EVT_WKR_DEQ_DEP,         1, 0, 0 },
+	{ EVT_SCHED_TYPE_LIST,     1, 0, 0 },
+	{ EVT_FWD_LATENCY,         0, 0, 0 },
+	{ EVT_QUEUE_PRIORITY,      0, 0, 0 },
+	{ EVT_PROD_ETHDEV,         0, 0, 0 },
+	{ EVT_PROD_TIMERDEV,       0, 0, 0 },
+	{ EVT_HELP,                0, 0, 0 },
+	{ NULL,                    0, 0, 0 }
 };

 static int
@@ -255,11 +271,12 @@ evt_opts_parse_long(int opt_idx, struct evt_options *opt)
 		{ EVT_FWD_LATENCY, evt_parse_fwd_latency},
 		{ EVT_QUEUE_PRIORITY, evt_parse_queue_priority},
 		{ EVT_PROD_ETHDEV, evt_parse_eth_prod_type},
+		{ EVT_PROD_TIMERDEV, evt_parse_timer_prod_type},
 	};

 	for (i = 0; i < RTE_DIM(parsermap); i++) {
 		if (strncmp(lgopts[opt_idx].name, parsermap[i].lgopt_name,
-				strlen(parsermap[i].lgopt_name)) == 0)
+				strlen(lgopts[opt_idx].name)) == 0)
 			return parsermap[i].parser_fn(opt, optarg);
 	}

@@ -305,6 +322,7 @@ evt_options_dump(struct evt_options *opt)
 	evt_dump("pool_sz", "%d", opt->pool_sz);
 	evt_dump("master lcore", "%d", rte_get_master_lcore());
 	evt_dump("nb_pkts", "%"PRIu64, opt->nb_pkts);
+	evt_dump("nb_timers", "%"PRIu64, opt->nb_timers);
 	evt_dump_begin("available lcores");
 	RTE_LCORE_FOREACH(lcore_id)
 		printf("%d ", lcore_id);
diff --git a/app/test-eventdev/evt_options.h b/app/test-eventdev/evt_options.h
index 46d122229..37debae84 100644
--- a/app/test-eventdev/evt_options.h
+++ b/app/test-eventdev/evt_options.h
@@ -31,12 +31,14 @@
 #define EVT_FWD_LATENCY          ("fwd_latency")
 #define EVT_QUEUE_PRIORITY       ("queue_priority")
 #define EVT_PROD_ETHDEV          ("prod_type_ethdev")
+#define EVT_PROD_TIMERDEV        ("prod_type_timerdev")
 #define EVT_HELP                 ("help")

 enum evt_prod_type {
 	EVT_PROD_TYPE_NONE,
 	EVT_PROD_TYPE_SYNT,          /* Producer type Synthetic i.e. CPU. */
 	EVT_PROD_TYPE_ETH_RX_ADPTR,  /* Producer type Eth Rx Adapter. */
+	EVT_PROD_TYPE_EVENT_TIMER_ADPTR,  /* Producer type Eth Rx Adapter. */
 	EVT_PROD_TYPE_MAX,
 };

@@ -52,11 +54,18 @@ struct evt_options {
 	int nb_stages;
 	int verbose_level;
 	uint64_t nb_pkts;
+	uint8_t nb_timer_adptrs;
+	uint64_t nb_timers;
+	uint64_t bkt_tck_nsec;
+	uint64_t optm_bkt_tck_nsec;
+	uint64_t max_tmo_nsec;
+	uint64_t nb_bkt_tcks;
 	uint16_t wkr_deq_dep;
 	uint8_t dev_id;
 	uint32_t fwd_latency:1;
 	uint32_t q_priority:1;
 	enum evt_prod_type prod_type;
+	uint8_t timdev_cnt;
 };

 void evt_options_default(struct evt_options *opt);
diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
index b36b22a77..b3a312722 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -43,15 +43,12 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
 	while (t->done == false) {
 		uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0);

-		if (enable_fwd_latency)
-			rte_prefetch0(ev.event_ptr);
-
 		if (!event) {
 			rte_pause();
 			continue;
 		}

-		if (enable_fwd_latency)
+		if (enable_fwd_latency && !prod_timer_type)
 		/* first stage in pipeline, mark ts to compute fwd latency */
 			atq_mark_fwd_latency(&ev);

@@ -90,7 +87,7 @@ perf_atq_worker_burst(void *arg, const int enable_fwd_latency)
 		}

 		for (i = 0; i < nb_rx; i++) {
-			if (enable_fwd_latency) {
+			if (enable_fwd_latency && !prod_timer_type) {
 				rte_prefetch0(ev[i+1].event_ptr);
 				/* first stage in pipeline.
 				 * mark time stamp to compute fwd latency
@@ -163,7 +160,8 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_event_dev_info dev_info;

 	nb_ports = evt_nr_active_lcores(opt->wlcores);
-	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+	nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 :
 		evt_nr_active_lcores(opt->plcores);

 	nb_queues = atq_nb_event_queues(opt);
diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
index 59fa0a49e..39072eb5d 100644
--- a/app/test-eventdev/test_perf_common.c
+++ b/app/test-eventdev/test_perf_common.c
@@ -72,6 +72,66 @@ perf_producer(void *arg)
 	return 0;
 }

+static inline int
+perf_event_timer_producer(void *arg)
+{
+	struct prod_data *p  = arg;
+	struct test_perf *t = p->t;
+	struct evt_options *opt = t->opt;
+	uint32_t flow_counter = 0;
+	uint64_t count = 0;
+	uint64_t arm_latency = 0;
+	const uint8_t nb_timer_adptrs = opt->nb_timer_adptrs;
+	const uint32_t nb_flows = t->nb_flows;
+	const uint64_t nb_timers = opt->nb_timers;
+	struct rte_mempool *pool = t->pool;
+	struct perf_elt *m = NULL;
+	struct rte_event_timer_adapter **adptr = t->timer_adptr;
+	uint64_t timeout_ticks = opt->optm_bkt_tck_nsec ?
+			(opt->nb_bkt_tcks * opt->bkt_tck_nsec)
+			/ opt->optm_bkt_tck_nsec : opt->nb_bkt_tcks;
+
+	timeout_ticks += timeout_ticks ? 0 : 1;
+	const struct rte_event_timer tim = {
+		.ev.op = RTE_EVENT_OP_NEW,
+		.ev.queue_id = p->queue_id,
+		.ev.sched_type = t->opt->sched_type_list[0],
+		.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
+		.ev.event_type =  RTE_EVENT_TYPE_TIMER,
+		.state = RTE_EVENT_TIMER_NOT_ARMED,
+		.timeout_ticks = timeout_ticks,
+	};
+
+	if (opt->verbose_level > 1)
+		printf("%s(): lcore %d\n", __func__, rte_lcore_id());
+
+	while (count < nb_timers && t->done == false) {
+		if (rte_mempool_get(pool, (void **)&m) < 0)
+			continue;
+
+		m->tim = tim;
+		m->tim.ev.flow_id = flow_counter++ % nb_flows;
+		m->tim.ev.event_ptr = m;
+		m->timestamp = rte_get_timer_cycles();
+		while (rte_event_timer_arm_burst(
+				adptr[flow_counter % nb_timer_adptrs],
+				(struct rte_event_timer **)&m, 1) != 1) {
+			if (t->done)
+				break;
+			rte_pause();
+			m->timestamp = rte_get_timer_cycles();
+		}
+		arm_latency += rte_get_timer_cycles() - m->timestamp;
+		count++;
+	}
+	fflush(stdout);
+	rte_delay_ms(1000);
+	printf("%s(): lcore %d Average event timer arm latency = %.3f us\n",
+			__func__, rte_lcore_id(), (float)(arm_latency / count) /
+			(rte_get_timer_hz() / 1000000));
+	return 0;
+}
+
 static int
 perf_producer_wrapper(void *arg)
 {
@@ -80,6 +140,8 @@ perf_producer_wrapper(void *arg)
 	/* Launch the producer function only in case of synthetic producer. */
 	if (t->opt->prod_type == EVT_PROD_TYPE_SYNT)
 		return perf_producer(arg);
+	else if (t->opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
+		return perf_event_timer_producer(arg);
 	return 0;
 }

@@ -146,8 +208,7 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
 		port_idx++;
 	}

-	const uint64_t total_pkts = opt->nb_pkts *
-			evt_nr_active_lcores(opt->plcores);
+	const uint64_t total_pkts = t->outstand_pkts;

 	uint64_t dead_lock_cycles = rte_get_timer_cycles();
 	int64_t dead_lock_remaining  =  total_pkts;
@@ -189,7 +250,9 @@ perf_launch_lcores(struct evt_test *test, struct evt_options *opt,

 			if (remaining <= 0) {
 				t->result = EVT_TEST_SUCCESS;
-				if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+				if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+					opt->prod_type ==
+					EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
 					t->done = true;
 					rte_smp_wmb();
 					break;
@@ -283,6 +346,65 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
 	return ret;
 }

+static int
+perf_event_timer_adapter_setup(struct test_perf *t)
+{
+	int i;
+	int ret;
+	struct rte_event_timer_adapter_info adapter_info;
+	struct rte_event_timer_adapter *wl;
+	uint8_t nb_producers = evt_nr_active_lcores(t->opt->plcores);
+	uint8_t flags = RTE_EVENT_TIMER_ADAPTER_F_ADJUST_RES;
+
+	if (nb_producers == 1)
+		flags |= RTE_EVENT_TIMER_ADAPTER_F_SP_PUT;
+
+	for (i = 0; i < t->opt->nb_timer_adptrs; i++) {
+		struct rte_event_timer_adapter_conf config = {
+			.event_dev_id = t->opt->dev_id,
+			.timer_adapter_id = i,
+			.timer_tick_ns = t->opt->bkt_tck_nsec,
+			.max_tmo_ns = t->opt->max_tmo_nsec,
+			.nb_timers = 2 * 1024 * 1024,
+			.flags = flags,
+		};
+
+		wl = rte_event_timer_adapter_create(&config);
+		if (wl == NULL) {
+			evt_err("failed to create event timer ring %d", i);
+			return rte_errno;
+		}
+
+		memset(&adapter_info, 0,
+				sizeof(struct rte_event_timer_adapter_info));
+		rte_event_timer_adapter_get_info(wl, &adapter_info);
+		t->opt->optm_bkt_tck_nsec = adapter_info.min_resolution_ns;
+
+		if (!(adapter_info.caps &
+				RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT)) {
+			uint32_t service_id;
+
+			rte_event_timer_adapter_service_id_get(wl,
+					&service_id);
+			ret = evt_service_setup(service_id);
+			if (ret) {
+				evt_err("Failed to setup service core"
+						" for timer adapter\n");
+				return ret;
+			}
+			rte_service_runstate_set(service_id, 1);
+		}
+
+		ret = rte_event_timer_adapter_start(wl);
+		if (ret) {
+			evt_err("failed to Start event timer adapter %d", i);
+			return ret;
+		}
+		t->timer_adptr[i] = wl;
+	}
+	return 0;
+}
+
 int
 perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 				uint8_t stride, uint8_t nb_queues,
@@ -326,6 +448,18 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
 		ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
 		if (ret)
 			return ret;
+	} else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+		prod = 0;
+		for ( ; port < perf_nb_event_ports(opt); port++) {
+			struct prod_data *p = &t->prod[port];
+			p->queue_id = prod * stride;
+			p->t = t;
+			prod++;
+		}
+
+		ret = perf_event_timer_adapter_setup(t);
+		if (ret)
+			return ret;
 	} else {
 		prod = 0;
 		for ( ; port < perf_nb_event_ports(opt); port++) {
@@ -415,10 +549,13 @@ perf_opt_check(struct evt_options *opt, uint64_t nb_queues)
 	}

 	/* Fixups */
-	if (opt->nb_stages == 1 && opt->fwd_latency) {
+	if ((opt->nb_stages == 1 &&
+			opt->prod_type != EVT_PROD_TYPE_EVENT_TIMER_ADPTR) &&
+			opt->fwd_latency) {
 		evt_info("fwd_latency is valid when nb_stages > 1, disabling");
 		opt->fwd_latency = 0;
 	}
+
 	if (opt->fwd_latency && !opt->q_priority) {
 		evt_info("enabled queue priority for latency measurement");
 		opt->q_priority = 1;
@@ -447,8 +584,13 @@ perf_opt_dump(struct evt_options *opt, uint8_t nb_queues)
 void
 perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
 {
-	RTE_SET_USED(test);
+	int i;
+	struct test_perf *t = evt_test_priv(test);

+	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+		for (i = 0; i < opt->nb_timer_adptrs; i++)
+			rte_event_timer_adapter_stop(t->timer_adptr[i]);
+	}
 	rte_event_dev_stop(opt->dev_id);
 	rte_event_dev_close(opt->dev_id);
 }
@@ -488,7 +630,8 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
 		},
 	};

-	if (opt->prod_type == EVT_PROD_TYPE_SYNT)
+	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR)
 		return 0;

 	if (!rte_eth_dev_count()) {
@@ -544,7 +687,8 @@ perf_mempool_setup(struct evt_test *test, struct evt_options *opt)
 {
 	struct test_perf *t = evt_test_priv(test);

-	if (opt->prod_type == EVT_PROD_TYPE_SYNT) {
+	if (opt->prod_type == EVT_PROD_TYPE_SYNT ||
+			opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
 		t->pool = rte_mempool_create(test->name, /* mempool name */
 				opt->pool_sz, /* number of elements*/
 				sizeof(struct perf_elt), /* element size*/
@@ -594,10 +738,18 @@ perf_test_setup(struct evt_test *test, struct evt_options *opt)

 	struct test_perf *t = evt_test_priv(test);

-	t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores);
+	if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) {
+		t->outstand_pkts = opt->nb_timers *
+			evt_nr_active_lcores(opt->plcores);
+		t->nb_pkts = opt->nb_timers;
+	} else {
+		t->outstand_pkts = opt->nb_pkts *
+			evt_nr_active_lcores(opt->plcores);
+		t->nb_pkts = opt->nb_pkts;
+	}
+
 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
 	t->done = false;
-	t->nb_pkts = opt->nb_pkts;
 	t->nb_flows = opt->nb_flows;
 	t->result = EVT_TEST_FAILED;
 	t->opt = opt;
diff --git a/app/test-eventdev/test_perf_common.h b/app/test-eventdev/test_perf_common.h
index 9ad99733b..4e96f229c 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -13,6 +13,7 @@
 #include <rte_ethdev.h>
 #include <rte_eventdev.h>
 #include <rte_event_eth_rx_adapter.h>
+#include <rte_event_timer_adapter.h>
 #include <rte_lcore.h>
 #include <rte_malloc.h>
 #include <rte_mempool.h>
@@ -39,6 +40,7 @@ struct prod_data {
 	struct test_perf *t;
 } __rte_cache_aligned;

+
 struct test_perf {
 	/* Don't change the offset of "done". Signal handler use this memory
 	 * to terminate all lcores work.
@@ -54,9 +56,12 @@ struct test_perf {
 	struct worker_data worker[EVT_MAX_PORTS];
 	struct evt_options *opt;
 	uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned;
+	struct rte_event_timer_adapter *timer_adptr[
+		RTE_EVENT_TIMER_ADAPTER_NUM_MAX] __rte_cache_aligned;
 } __rte_cache_aligned;

 struct perf_elt {
+	struct rte_event_timer tim;
 	uint64_t timestamp;
 } __rte_cache_aligned;

@@ -68,6 +73,8 @@ struct perf_elt {
 	struct evt_options *opt = t->opt;\
 	const uint8_t dev = w->dev_id;\
 	const uint8_t port = w->port_id;\
+	const uint8_t prod_timer_type = \
+		opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR;\
 	uint8_t *const sched_type_list = &t->sched_type_list[0];\
 	struct rte_mempool *const pool = t->pool;\
 	const uint8_t nb_stages = t->opt->nb_stages;\
diff --git a/app/test-eventdev/test_perf_queue.c b/app/test-eventdev/test_perf_queue.c
index db8f2f3e5..74469a5ee 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -49,7 +49,7 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
 			rte_pause();
 			continue;
 		}
-		if (enable_fwd_latency)
+		if (enable_fwd_latency && !prod_timer_type)
 		/* first q in pipeline, mark timestamp to compute fwd latency */
 			mark_fwd_latency(&ev, nb_stages);

@@ -88,7 +88,7 @@ perf_queue_worker_burst(void *arg, const int enable_fwd_latency)
 		}

 		for (i = 0; i < nb_rx; i++) {
-			if (enable_fwd_latency) {
+			if (enable_fwd_latency && !prod_timer_type) {
 				rte_prefetch0(ev[i+1].event_ptr);
 				/* first queue in pipeline.
 				 * mark time stamp to compute fwd latency
@@ -161,7 +161,8 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
 	struct rte_event_dev_info dev_info;

 	nb_ports = evt_nr_active_lcores(opt->wlcores);
-	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 0 :
+	nb_ports += opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ||
+		 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR ? 0 :
 		evt_nr_active_lcores(opt->plcores);

 	nb_queues = perf_queue_nb_event_queues(opt);
--
2.16.2



More information about the dev mailing list