[dpdk-dev,v5,08/20] event/sw: add support for linking queues to ports
Checks
Commit Message
From: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
---
drivers/event/sw/sw_evdev.c | 81 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 81 insertions(+)
Comments
On Fri, Mar 24, 2017 at 04:53:03PM +0000, Harry van Haaren wrote:
> From: Bruce Richardson <bruce.richardson@intel.com>
>
> Signed-off-by: Bruce Richardson <bruce.richardson@intel.com>
> Signed-off-by: Harry van Haaren <harry.van.haaren@intel.com>
> ---
> drivers/event/sw/sw_evdev.c | 81 +++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 81 insertions(+)
>
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 4b8370d..82ac3bd 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -36,6 +36,7 @@
> #include <rte_memzone.h>
> #include <rte_kvargs.h>
> #include <rte_ring.h>
> +#include <rte_errno.h>
>
> #include "sw_evdev.h"
> #include "iq_ring.h"
> @@ -50,6 +51,84 @@ static void
> sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
>
> static int
> +sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
> + const uint8_t priorities[], uint16_t num)
> +{
> + struct sw_port *p = (void *)port;
(void *) typecast is not required.
> + struct sw_evdev *sw = sw_pmd_priv(dev);
> + int i;
> +
> + RTE_SET_USED(priorities);
> + for (i = 0; i < num; i++) {
> + struct sw_qid *q = &sw->qids[queues[i]];
> +
> + /* check for qid map overflow */
> + if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map))
> + break;
> +
> + if (p->is_directed && p->num_qids_mapped > 0)
Do we need to set rte_errno = -EDQUOT here too?
> + break;
> +
> + if (q->type == SW_SCHED_TYPE_DIRECT) {
> + /* check directed qids only map to one port */
> + if (p->num_qids_mapped > 0) {
> + rte_errno = -EDQUOT;
> + break;
> + }
> + /* check port only takes a directed flow */
> + if (num > 1) {
> + rte_errno = -EDQUOT;
> + break;
> + }
> +
> + p->is_directed = 1;
> + p->num_qids_mapped = 1;
> + } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
Will this "else if" have similar issue shared in
http://dpdk.org/ml/archives/dev/2017-March/061497.html
> + p->num_ordered_qids++;
> + p->num_qids_mapped++;
> + } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
> + p->num_qids_mapped++;
> + }
> +
> + q->cq_map[q->cq_num_mapped_cqs] = p->id;
> + rte_smp_wmb();
> + q->cq_num_mapped_cqs++;
> + }
> + return i;
> +}
> +
> +static int
> +sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
> + uint16_t nb_unlinks)
> +{
> + struct sw_port *p = (void *)port;
(void *) typecast is not required.
> + struct sw_evdev *sw = sw_pmd_priv(dev);
> + unsigned int i, j;
> +
> + int unlinked = 0;
> + for (i = 0; i < nb_unlinks; i++) {
> + struct sw_qid *q = &sw->qids[queues[i]];
> + for (j = 0; j < q->cq_num_mapped_cqs; j++) {
> + if (q->cq_map[j] == p->id) {
> + q->cq_map[j] =
> + q->cq_map[q->cq_num_mapped_cqs - 1];
> + rte_smp_wmb();
> + q->cq_num_mapped_cqs--;
> + unlinked++;
> +
> + p->num_qids_mapped--;
> +
> + if (q->type == RTE_SCHED_TYPE_ORDERED)
> + p->num_ordered_qids--;
> +
> + continue;
> + }
> + }
> + }
> + return unlinked;
> +}
> +
With above suggested changes,
Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
> From: Jerin Jacob [mailto:jerin.jacob@caviumnetworks.com]
> Sent: Monday, March 27, 2017 12:21 PM
> To: Van Haaren, Harry <harry.van.haaren@intel.com>
> Cc: dev@dpdk.org; Richardson, Bruce <bruce.richardson@intel.com>
> Subject: Re: [PATCH v5 08/20] event/sw: add support for linking queues to ports
<snip non-SINGLE_LINK related feedback>
> > + break;
> > +
> > + if (q->type == SW_SCHED_TYPE_DIRECT) {
> > + /* check directed qids only map to one port */
> > + if (p->num_qids_mapped > 0) {
> > + rte_errno = -EDQUOT;
> > + break;
> > + }
> > + /* check port only takes a directed flow */
> > + if (num > 1) {
> > + rte_errno = -EDQUOT;
> > + break;
> > + }
> > +
> > + p->is_directed = 1;
> > + p->num_qids_mapped = 1;
> > + } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
>
> Will this "else if" have similar issue shared in
> http://dpdk.org/ml/archives/dev/2017-March/061497.html
This particular issue has been resolved by fixing in patch 06/20. The other issues you've raised on this patch are fixed, Thanks again for your feedback - the bug in 06/20 was a great catch!
@@ -36,6 +36,7 @@
#include <rte_memzone.h>
#include <rte_kvargs.h>
#include <rte_ring.h>
+#include <rte_errno.h>
#include "sw_evdev.h"
#include "iq_ring.h"
@@ -50,6 +51,84 @@ static void
sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
static int
+sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
+ const uint8_t priorities[], uint16_t num)
+{
+ struct sw_port *p = (void *)port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ int i;
+
+ RTE_SET_USED(priorities);
+ for (i = 0; i < num; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+
+ /* check for qid map overflow */
+ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map))
+ break;
+
+ if (p->is_directed && p->num_qids_mapped > 0)
+ break;
+
+ if (q->type == SW_SCHED_TYPE_DIRECT) {
+ /* check directed qids only map to one port */
+ if (p->num_qids_mapped > 0) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+ /* check port only takes a directed flow */
+ if (num > 1) {
+ rte_errno = -EDQUOT;
+ break;
+ }
+
+ p->is_directed = 1;
+ p->num_qids_mapped = 1;
+ } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
+ p->num_ordered_qids++;
+ p->num_qids_mapped++;
+ } else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
+ p->num_qids_mapped++;
+ }
+
+ q->cq_map[q->cq_num_mapped_cqs] = p->id;
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs++;
+ }
+ return i;
+}
+
+static int
+sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
+ uint16_t nb_unlinks)
+{
+ struct sw_port *p = (void *)port;
+ struct sw_evdev *sw = sw_pmd_priv(dev);
+ unsigned int i, j;
+
+ int unlinked = 0;
+ for (i = 0; i < nb_unlinks; i++) {
+ struct sw_qid *q = &sw->qids[queues[i]];
+ for (j = 0; j < q->cq_num_mapped_cqs; j++) {
+ if (q->cq_map[j] == p->id) {
+ q->cq_map[j] =
+ q->cq_map[q->cq_num_mapped_cqs - 1];
+ rte_smp_wmb();
+ q->cq_num_mapped_cqs--;
+ unlinked++;
+
+ p->num_qids_mapped--;
+
+ if (q->type == RTE_SCHED_TYPE_ORDERED)
+ p->num_ordered_qids--;
+
+ continue;
+ }
+ }
+ }
+ return unlinked;
+}
+
+static int
sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
const struct rte_event_port_conf *conf)
{
@@ -402,6 +481,8 @@ sw_probe(const char *name, const char *params)
.port_def_conf = sw_port_def_conf,
.port_setup = sw_port_setup,
.port_release = sw_port_release,
+ .port_link = sw_port_link,
+ .port_unlink = sw_port_unlink,
};
static const char *const args[] = {