[dpdk-dev] [PATCH v2 07/15] event/sw: add support for event queues

Jerin Jacob jerin.jacob at caviumnetworks.com
Mon Feb 6 10:25:10 CET 2017


On Tue, Jan 31, 2017 at 04:14:25PM +0000, Harry van Haaren wrote:
> From: Bruce Richardson <bruce.richardson at intel.com>
> 
> Add in the data structures for the event queues, and the eventdev
> functions to create and destroy those queues.
> 
> Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>
> Signed-off-by: Harry van Haaren <harry.van.haaren at intel.com>
> ---
>  drivers/event/sw/iq_ring.h  | 176 ++++++++++++++++++++++++++++++++++++++++++++
>  drivers/event/sw/sw_evdev.c | 158 +++++++++++++++++++++++++++++++++++++++
>  drivers/event/sw/sw_evdev.h |  75 +++++++++++++++++++
>  3 files changed, 409 insertions(+)
>  create mode 100644 drivers/event/sw/iq_ring.h
> 
> + */
> +
> +/*
> + * Ring structure definitions used for the internal ring buffers of the
> + * SW eventdev implementation. These are designed for single-core use only.
> + */

If I understand it correctly, IQ and QE rings are single producer and
single consumer rings. By the specification, multiple producers through
multiple ports can enqueue to the event queues at a time.Does SW implementation
support that? or am I missing something here?

> +#ifndef _IQ_RING_
> +#define _IQ_RING_
> +
> +#include <stdint.h>
> +
> +#include <rte_common.h>
> +#include <rte_memory.h>
> +#include <rte_malloc.h>
> +#include <rte_eventdev.h>
> +
> +#define IQ_RING_NAMESIZE 12
> +#define QID_IQ_DEPTH 512
> +#define QID_IQ_MASK (uint16_t)(QID_IQ_DEPTH - 1)
> +
> +struct iq_ring {
> +	char name[IQ_RING_NAMESIZE] __rte_cache_aligned;
> +	uint16_t write_idx;
> +	uint16_t read_idx;
> +
> +	struct rte_event ring[QID_IQ_DEPTH];
> +};
> +
> +#ifndef force_inline
> +#define force_inline inline __attribute__((always_inline))
> +#endif
> +
> +static inline struct iq_ring *
> +iq_ring_create(const char *name, unsigned int socket_id)
> +{
> +	struct iq_ring *retval;
> +
> +	retval = rte_malloc_socket(NULL, sizeof(*retval), 0, socket_id);
> +	if (retval == NULL)
> +		goto end;
> +
> +	snprintf(retval->name, sizeof(retval->name), "%s", name);
> +	retval->write_idx = retval->read_idx = 0;
> +end:
> +	return retval;
> +}
> +
> +static inline void
> +iq_ring_destroy(struct iq_ring *r)
> +{
> +	rte_free(r);
> +}
> +
> +static force_inline uint16_t
> +iq_ring_count(const struct iq_ring *r)
> +{
> +	return r->write_idx - r->read_idx;
> +}
> +
> +static force_inline uint16_t
> +iq_ring_free_count(const struct iq_ring *r)
> +{
> +	return QID_IQ_MASK - iq_ring_count(r);
> +}
> +
> +static force_inline uint16_t
> +iq_ring_enqueue_burst(struct iq_ring *r, struct rte_event *qes, uint16_t nb_qes)
> +{
> +	const uint16_t read = r->read_idx;
> +	uint16_t write = r->write_idx;
> +	const uint16_t space = read + QID_IQ_MASK - write;
> +	uint16_t i;
> +
> +	if (space < nb_qes)
> +		nb_qes = space;
> +
> +	for (i = 0; i < nb_qes; i++, write++)
> +		r->ring[write & QID_IQ_MASK] = qes[i];
> +
> +	r->write_idx = write;
> +
> +	return nb_qes;
> +}
> +
> diff --git a/drivers/event/sw/sw_evdev.h b/drivers/event/sw/sw_evdev.h
> index 65f00e4..aaa8056 100644
> --- a/drivers/event/sw/sw_evdev.h
> +++ b/drivers/event/sw/sw_evdev.h
> @@ -49,6 +49,78 @@
>  #define SW_INFLIGHT_EVENTS_TOTAL 4096
>  /* allow for lots of over-provisioning */
>  #define MAX_SW_PROD_Q_DEPTH 4096
> +#define SW_FRAGMENTS_MAX 16
> +
> +/* have a new scheduling type for 1:1 queue to port links */
> +#define RTE_SCHED_TYPE_DIRECT (RTE_SCHED_TYPE_PARALLEL + 1)

IMO, better to use SW_ for internal sched types

> +
> +#ifdef RTE_LIBRTE_PMD_EVDEV_SW_DEBUG
> +#define SW_LOG_INFO(fmt, args...) \
> +	RTE_LOG(INFO, PMD, "[%s] %s() line %u: " fmt "\n", \
> +			PMD_NAME, \
> +			__func__, __LINE__, ## args)
> +
> +#define SW_LOG_DBG(fmt, args...) \
> +	RTE_LOG(DEBUG, PMD, "[%s] %s() line %u: " fmt "\n", \


More information about the dev mailing list