[dpdk-dev] [PATCH v2 09/15] event/sw: add support for linking queues to ports

Jerin Jacob jerin.jacob at caviumnetworks.com
Mon Feb 6 10:37:39 CET 2017


On Tue, Jan 31, 2017 at 04:14:27PM +0000, Harry van Haaren wrote:
> From: Bruce Richardson <bruce.richardson at intel.com>
> 
> Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
> Signed-off-by: Harry van Haaren <harry.van.haaren at intel.com>
> ---
>  drivers/event/sw/sw_evdev.c | 68 +++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 68 insertions(+)
> 
> diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c
> index 0b26fcb..693a833 100644
> --- a/drivers/event/sw/sw_evdev.c
> +++ b/drivers/event/sw/sw_evdev.c
> @@ -50,6 +50,72 @@ static void
>  sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
>  
>  static int
> +sw_port_link(void *port, const uint8_t queues[], const uint8_t priorities[],
> +		uint16_t num)
> +{
> +	struct sw_port *p = (void *)port;
> +	struct sw_evdev *sw = p->sw;
> +	int i;
> +
> +	RTE_SET_USED(priorities);
> +	for (i = 0; i < num; i++) {
> +		struct sw_qid *q = &sw->qids[queues[i]];
> +
> +		/* check for qid map overflow */
> +		if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map))
> +			break;
> +
> +		if (p->is_directed && p->num_qids_mapped > 0)
> +			break;
> +
> +		if (q->type == RTE_SCHED_TYPE_DIRECT) {
> +			/* check directed qids only map to one port */
> +			if (p->num_qids_mapped > 0)
> +				break;
> +			/* check port only takes a directed flow */
> +			if (num > 1)
> +				break;
> +
> +			p->is_directed = 1;
> +			p->num_qids_mapped = 1;

What is the expected behavior if queues configured with
RTE_EVENT_QUEUE_CFG_SINGLE_LINK attempted to link by multiple ports?

Based on the head-file, the expected behavior is following,

(-EDQUOT) Quota exceeded(Application tried to link the queue
configured with RTE_EVENT_QUEUE_CFG_SINGLE_LINK to more than one event ports)

> +		} else if (q->type == RTE_SCHED_TYPE_ORDERED) {
> +			p->num_ordered_qids++;
> +			p->num_qids_mapped++;
> +		} else if (q->type == RTE_SCHED_TYPE_ATOMIC) {
> +			p->num_qids_mapped++;
> +		}
> +
> +		q->cq_map[q->cq_num_mapped_cqs] = p->id;
> +		rte_smp_wmb();
> +		q->cq_num_mapped_cqs++;
> +	}
> +	return i;
> +}
> +
> +static int
> +sw_port_unlink(void *port, uint8_t queues[], uint16_t nb_unlinks)
> +{
> +	struct sw_port *p = (void *)port;
> +	struct sw_evdev *sw = p->sw;
> +	unsigned int i, j;
> +
> +	int unlinked = 0;
> +	for (i = 0; i < nb_unlinks; i++) {
> +		struct sw_qid *q = &sw->qids[queues[i]];
> +		for (j = 0; j < q->cq_num_mapped_cqs; j++)
> +			if (q->cq_map[j] == p->id) {
> +				q->cq_map[j] =
> +					q->cq_map[q->cq_num_mapped_cqs - 1];
> +				rte_smp_wmb();
> +				q->cq_num_mapped_cqs--;
> +				unlinked++;
> +				continue;
> +			}
> +	}
> +	return unlinked;
> +}
> +
> +static int
>  sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
>  		const struct rte_event_port_conf *conf)
>  {
> @@ -381,6 +447,8 @@ sw_probe(const char *name, const char *params)
>  			.port_def_conf = sw_port_def_conf,
>  			.port_setup = sw_port_setup,
>  			.port_release = sw_port_release,
> +			.port_link = sw_port_link,
> +			.port_unlink = sw_port_unlink,
>  	};
>  
>  	static const char *const args[] = {
> -- 
> 2.7.4
> 


More information about the dev mailing list