[dpdk-dev] app/eventdev: fix port dequeue depth configuration
Checks
Commit Message
The port dequeue depth value has to be compared against the maximum
allowed dequeue depth reported by the event drivers.
Fixes: 3617aae53f92 ("app/eventdev: add event Rx adapter setup")
Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
app/test-eventdev/test_perf_atq.c | 13 ++++++++++++-
app/test-eventdev/test_perf_common.c | 25 +++++--------------------
app/test-eventdev/test_perf_common.h | 3 ++-
app/test-eventdev/test_perf_queue.c | 12 +++++++++++-
app/test-eventdev/test_pipeline_atq.c | 3 +++
app/test-eventdev/test_pipeline_queue.c | 3 +++
6 files changed, 36 insertions(+), 23 deletions(-)
Comments
-----Original Message-----
> Date: Wed, 24 Jan 2018 15:00:33 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, harry.van.haaren@intel.com
> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH] app/eventdev: fix port dequeue depth
> configuration
> X-Mailer: git-send-email 2.14.1
>
> The port dequeue depth value has to be compared against the maximum
> allowed dequeue depth reported by the event drivers.
>
> Fixes: 3617aae53f92 ("app/eventdev: add event Rx adapter setup")
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
> app/test-eventdev/test_perf_atq.c | 13 ++++++++++++-
> app/test-eventdev/test_perf_common.c | 25 +++++--------------------
> app/test-eventdev/test_perf_common.h | 3 ++-
> app/test-eventdev/test_perf_queue.c | 12 +++++++++++-
> app/test-eventdev/test_pipeline_atq.c | 3 +++
> app/test-eventdev/test_pipeline_queue.c | 3 +++
> 6 files changed, 36 insertions(+), 23 deletions(-)
>
> diff --git a/app/test-eventdev/test_perf_atq.c b/app/test-eventdev/test_perf_atq.c
> index d07a05425..b36b22a77 100644
> --- a/app/test-eventdev/test_perf_atq.c
> +++ b/app/test-eventdev/test_perf_atq.c
> @@ -207,7 +207,18 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
> }
> }
>
> - ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
> + if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
> + opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
> +
> + /* port configuration */
> + const struct rte_event_port_conf p_conf = {
> + .dequeue_depth = opt->wkr_deq_dep,
> + .enqueue_depth = dev_info.max_event_port_dequeue_depth,
> + .new_event_threshold = dev_info.max_num_events,
> + };
> +
> + ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
> + &p_conf);
> if (ret)
> return ret;
>
> diff --git a/app/test-eventdev/test_perf_common.c b/app/test-eventdev/test_perf_common.c
> index e279d81a5..59fa0a49e 100644
> --- a/app/test-eventdev/test_perf_common.c
> +++ b/app/test-eventdev/test_perf_common.c
> @@ -285,22 +285,12 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
>
> int
> perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
> - uint8_t stride, uint8_t nb_queues)
> + uint8_t stride, uint8_t nb_queues,
> + const struct rte_event_port_conf *port_conf)
> {
> struct test_perf *t = evt_test_priv(test);
> uint16_t port, prod;
> int ret = -1;
> - struct rte_event_port_conf port_conf;
> -
> - memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
> - rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
> -
> - /* port configuration */
> - const struct rte_event_port_conf wkr_p_conf = {
> - .dequeue_depth = opt->wkr_deq_dep,
> - .enqueue_depth = port_conf.enqueue_depth,
> - .new_event_threshold = port_conf.new_event_threshold,
> - };
>
> /* setup one port per worker, linking to all queues */
> for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
> @@ -313,7 +303,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
> w->processed_pkts = 0;
> w->latency = 0;
>
> - ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
> + ret = rte_event_port_setup(opt->dev_id, port, port_conf);
> if (ret) {
> evt_err("failed to setup port %d", port);
> return ret;
> @@ -327,18 +317,13 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
> }
>
> /* port for producers, no links */
> - struct rte_event_port_conf prod_conf = {
> - .dequeue_depth = port_conf.dequeue_depth,
> - .enqueue_depth = port_conf.enqueue_depth,
> - .new_event_threshold = port_conf.new_event_threshold,
> - };
> if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
> for ( ; port < perf_nb_event_ports(opt); port++) {
> struct prod_data *p = &t->prod[port];
> p->t = t;
> }
>
> - ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
> + ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
I think, it is better to pass port_conf as pointer.
With that change:
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
@@ -207,7 +207,18 @@ perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
- ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues);
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
+ ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues,
+ &p_conf);
if (ret)
return ret;
@@ -285,22 +285,12 @@ perf_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
int
perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
- uint8_t stride, uint8_t nb_queues)
+ uint8_t stride, uint8_t nb_queues,
+ const struct rte_event_port_conf *port_conf)
{
struct test_perf *t = evt_test_priv(test);
uint16_t port, prod;
int ret = -1;
- struct rte_event_port_conf port_conf;
-
- memset(&port_conf, 0, sizeof(struct rte_event_port_conf));
- rte_event_port_default_conf_get(opt->dev_id, 0, &port_conf);
-
- /* port configuration */
- const struct rte_event_port_conf wkr_p_conf = {
- .dequeue_depth = opt->wkr_deq_dep,
- .enqueue_depth = port_conf.enqueue_depth,
- .new_event_threshold = port_conf.new_event_threshold,
- };
/* setup one port per worker, linking to all queues */
for (port = 0; port < evt_nr_active_lcores(opt->wlcores);
@@ -313,7 +303,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
w->processed_pkts = 0;
w->latency = 0;
- ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf);
+ ret = rte_event_port_setup(opt->dev_id, port, port_conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
@@ -327,18 +317,13 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
}
/* port for producers, no links */
- struct rte_event_port_conf prod_conf = {
- .dequeue_depth = port_conf.dequeue_depth,
- .enqueue_depth = port_conf.enqueue_depth,
- .new_event_threshold = port_conf.new_event_threshold,
- };
if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
for ( ; port < perf_nb_event_ports(opt); port++) {
struct prod_data *p = &t->prod[port];
p->t = t;
}
- ret = perf_event_rx_adapter_setup(opt, stride, prod_conf);
+ ret = perf_event_rx_adapter_setup(opt, stride, *port_conf);
if (ret)
return ret;
} else {
@@ -352,7 +337,7 @@ perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
p->t = t;
ret = rte_event_port_setup(opt->dev_id, port,
- &prod_conf);
+ port_conf);
if (ret) {
evt_err("failed to setup port %d", port);
return ret;
@@ -133,7 +133,8 @@ int perf_test_setup(struct evt_test *test, struct evt_options *opt);
int perf_ethdev_setup(struct evt_test *test, struct evt_options *opt);
int perf_mempool_setup(struct evt_test *test, struct evt_options *opt);
int perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
- uint8_t stride, uint8_t nb_queues);
+ uint8_t stride, uint8_t nb_queues,
+ const struct rte_event_port_conf *port_conf);
int perf_event_dev_service_setup(uint8_t dev_id);
int perf_launch_lcores(struct evt_test *test, struct evt_options *opt,
int (*worker)(void *));
@@ -219,8 +219,18 @@ perf_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth;
+
+ /* port configuration */
+ const struct rte_event_port_conf p_conf = {
+ .dequeue_depth = opt->wkr_deq_dep,
+ .enqueue_depth = dev_info.max_event_port_dequeue_depth,
+ .new_event_threshold = dev_info.max_num_events,
+ };
+
ret = perf_event_dev_port_setup(test, opt, nb_stages /* stride */,
- nb_queues);
+ nb_queues, &p_conf);
if (ret)
return ret;
@@ -378,6 +378,9 @@ pipeline_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
/* port configuration */
const struct rte_event_port_conf p_conf = {
.dequeue_depth = opt->wkr_deq_dep,
@@ -397,6 +397,9 @@ pipeline_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
}
}
+ if (opt->wkr_deq_dep > info.max_event_port_dequeue_depth)
+ opt->wkr_deq_dep = info.max_event_port_dequeue_depth;
+
/* port configuration */
const struct rte_event_port_conf p_conf = {
.dequeue_depth = opt->wkr_deq_dep,