[dpdk-dev] [PATCH v2 3/5] eventdev: add ring structure for events

Bruce Richardson bruce.richardson at intel.com
Fri Jun 30 17:06:19 CEST 2017


Add in a new rte_event_ring structure type and functions to allow events to
be passed core to core. This is needed because the standard rte_ring type
only works on pointers, while for events, we want to copy the entire, 16B
events themselves - not just pointers to them. The code makes extensive use
of the functions already defined in rte_ring.h

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>

---
V2: use __rte_always_inline instead of local macro
---
 lib/Makefile                                 |   2 +-
 lib/librte_eventdev/Makefile                 |   2 +
 lib/librte_eventdev/rte_event_ring.c         | 207 ++++++++++++++++++
 lib/librte_eventdev/rte_event_ring.h         | 308 +++++++++++++++++++++++++++
 lib/librte_eventdev/rte_eventdev_version.map |   9 +
 5 files changed, 527 insertions(+), 1 deletion(-)
 create mode 100644 lib/librte_eventdev/rte_event_ring.c
 create mode 100644 lib/librte_eventdev/rte_event_ring.h

diff --git a/lib/Makefile b/lib/Makefile
index 07e1fd0c5..aef584e14 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -52,7 +52,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += librte_cryptodev
 DEPDIRS-librte_cryptodev := librte_eal librte_mempool librte_ring librte_mbuf
 DEPDIRS-librte_cryptodev += librte_kvargs
 DIRS-$(CONFIG_RTE_LIBRTE_EVENTDEV) += librte_eventdev
-DEPDIRS-librte_eventdev := librte_eal
+DEPDIRS-librte_eventdev := librte_eal librte_ring
 DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += librte_vhost
 DEPDIRS-librte_vhost := librte_eal librte_mempool librte_mbuf librte_ether
 DIRS-$(CONFIG_RTE_LIBRTE_HASH) += librte_hash
diff --git a/lib/librte_eventdev/Makefile b/lib/librte_eventdev/Makefile
index 629069ad6..0ed777d61 100644
--- a/lib/librte_eventdev/Makefile
+++ b/lib/librte_eventdev/Makefile
@@ -42,12 +42,14 @@ CFLAGS += $(WERROR_FLAGS)
 
 # library source files
 SRCS-y += rte_eventdev.c
+SRCS-y += rte_event_ring.c
 
 # export include files
 SYMLINK-y-include += rte_eventdev.h
 SYMLINK-y-include += rte_eventdev_pmd.h
 SYMLINK-y-include += rte_eventdev_pmd_pci.h
 SYMLINK-y-include += rte_eventdev_pmd_vdev.h
+SYMLINK-y-include += rte_event_ring.h
 
 # versioning export map
 EXPORT_MAP := rte_eventdev_version.map
diff --git a/lib/librte_eventdev/rte_event_ring.c b/lib/librte_eventdev/rte_event_ring.c
new file mode 100644
index 000000000..b14c2127f
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.c
@@ -0,0 +1,207 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/queue.h>
+#include <string.h>
+
+#include <rte_tailq.h>
+#include <rte_memzone.h>
+#include <rte_rwlock.h>
+#include <rte_eal_memconfig.h>
+#include "rte_event_ring.h"
+
+TAILQ_HEAD(rte_event_ring_list, rte_tailq_entry);
+
+static struct rte_tailq_elem rte_event_ring_tailq = {
+	.name = RTE_TAILQ_EVENT_RING_NAME,
+};
+EAL_REGISTER_TAILQ(rte_event_ring_tailq)
+
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+	unsigned int count, unsigned int flags)
+{
+	/* compilation-time checks */
+	RTE_BUILD_BUG_ON((sizeof(struct rte_event_ring) &
+			  RTE_CACHE_LINE_MASK) != 0);
+
+	/* init the ring structure */
+	return rte_ring_init(&r->r, name, count, flags);
+}
+
+/* create the ring */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+		unsigned int flags)
+{
+	char mz_name[RTE_MEMZONE_NAMESIZE];
+	struct rte_event_ring *r;
+	struct rte_tailq_entry *te;
+	const struct rte_memzone *mz;
+	ssize_t ring_size;
+	int mz_flags = 0;
+	struct rte_event_ring_list *ring_list = NULL;
+	const unsigned int requested_count = count;
+	int ret;
+
+	ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+		rte_event_ring_list);
+
+	/* for an exact size ring, round up from count to a power of two */
+	if (flags & RING_F_EXACT_SZ)
+		count = rte_align32pow2(count + 1);
+	else if (!rte_is_power_of_2(count)) {
+		rte_errno = EINVAL;
+		return NULL;
+	}
+
+	ring_size = sizeof(*r) + (count * sizeof(struct rte_event));
+
+	ret = snprintf(mz_name, sizeof(mz_name), "%s%s",
+		RTE_RING_MZ_PREFIX, name);
+	if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+		rte_errno = ENAMETOOLONG;
+		return NULL;
+	}
+
+	te = rte_zmalloc("RING_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, RING, "Cannot reserve memory for tailq\n");
+		rte_errno = ENOMEM;
+		return NULL;
+	}
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/*
+	 * reserve a memory zone for this ring. If we can't get rte_config or
+	 * we are secondary process, the memzone_reserve function will set
+	 * rte_errno for us appropriately - hence no check in this this function
+	 */
+	mz = rte_memzone_reserve(mz_name, ring_size, socket_id, mz_flags);
+	if (mz != NULL) {
+		r = mz->addr;
+		/*
+		 * no need to check return value here, we already checked the
+		 * arguments above
+		 */
+		rte_event_ring_init(r, name, requested_count, flags);
+
+		te->data = (void *) r;
+		r->r.memzone = mz;
+
+		TAILQ_INSERT_TAIL(ring_list, te, next);
+	} else {
+		r = NULL;
+		RTE_LOG(ERR, RING, "Cannot reserve memory\n");
+		rte_free(te);
+	}
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	return r;
+}
+
+
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name)
+{
+	struct rte_tailq_entry *te;
+	struct rte_event_ring *r = NULL;
+	struct rte_event_ring_list *ring_list;
+
+	ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+			rte_event_ring_list);
+
+	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	TAILQ_FOREACH(te, ring_list, next) {
+		r = (struct rte_event_ring *) te->data;
+		if (strncmp(name, r->r.name, RTE_RING_NAMESIZE) == 0)
+			break;
+	}
+
+	rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	if (te == NULL) {
+		rte_errno = ENOENT;
+		return NULL;
+	}
+
+	return r;
+}
+
+/* free the ring */
+void
+rte_event_ring_free(struct rte_event_ring *r)
+{
+	struct rte_event_ring_list *ring_list = NULL;
+	struct rte_tailq_entry *te;
+
+	if (r == NULL)
+		return;
+
+	/*
+	 * Ring was not created with rte_event_ring_create,
+	 * therefore, there is no memzone to free.
+	 */
+	if (r->r.memzone == NULL) {
+		RTE_LOG(ERR, RING,
+			"Cannot free ring (not created with rte_event_ring_create()");
+		return;
+	}
+
+	if (rte_memzone_free(r->r.memzone) != 0) {
+		RTE_LOG(ERR, RING, "Cannot free memory\n");
+		return;
+	}
+
+	ring_list = RTE_TAILQ_CAST(rte_event_ring_tailq.head,
+			rte_event_ring_list);
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* find out tailq entry */
+	TAILQ_FOREACH(te, ring_list, next) {
+		if (te->data == (void *) r)
+			break;
+	}
+
+	if (te == NULL) {
+		rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+		return;
+	}
+
+	TAILQ_REMOVE(ring_list, te, next);
+
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	rte_free(te);
+}
diff --git a/lib/librte_eventdev/rte_event_ring.h b/lib/librte_eventdev/rte_event_ring.h
new file mode 100644
index 000000000..ea9b68854
--- /dev/null
+++ b/lib/librte_eventdev/rte_event_ring.h
@@ -0,0 +1,308 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * RTE Event Ring
+ *
+ * This provides a ring implementation for passing rte_event structures
+ * from one core to another.
+ */
+
+#ifndef _RTE_EVENT_RING_
+#define _RTE_EVENT_RING_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_ring.h>
+#include "rte_eventdev.h"
+
+#define RTE_TAILQ_EVENT_RING_NAME "RTE_EVENT_RING"
+
+/**
+ * Generic ring structure for passing rte_event objects from core to core.
+ *
+ * Based on the primitives given in the rte_ring library. Designed to be
+ * used inside software eventdev implementations and by applications
+ * directly as needed.
+ */
+struct rte_event_ring {
+	struct rte_ring r;
+};
+
+/**
+ * Returns the number of events in the ring
+ *
+ * @param r
+ *   pointer to the event ring
+ * @return
+ *   the number of events in the ring
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_count(const struct rte_event_ring *r)
+{
+	return rte_ring_count(&r->r);
+}
+
+/**
+ * Returns the amount of free space in the ring
+ *
+ * @param r
+ *   pointer to the event ring
+ * @return
+ *   the number of free slots in the ring, i.e. the number of events that
+ *   can be successfully enqueued before dequeue must be called
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_free_count(const struct rte_event_ring *r)
+{
+	return rte_ring_free_count(&r->r);
+}
+
+/**
+ * Enqueue a set of events onto a ring
+ *
+ * Note: this API enqueues by copying the events themselves onto the ring,
+ * rather than just placing a pointer to each event onto the ring. This
+ * means that statically-allocated events can safely be enqueued by this
+ * API.
+ *
+ * @param r
+ *   pointer to the event ring
+ * @param events
+ *   pointer to an array of struct rte_event objects
+ * @param n
+ *   number of events in the array to enqueue
+ * @param free_space
+ *   if non-null, is updated to indicate the amount of free space in the
+ *   ring once the enqueue has completed.
+ * @return
+ *   the number of elements, n', enqueued to the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_enqueue_burst(struct rte_event_ring *r,
+		const struct rte_event *events,
+		unsigned int n, uint16_t *free_space)
+{
+	uint32_t prod_head, prod_next;
+	uint32_t free_entries;
+
+	n = __rte_ring_move_prod_head(&r->r, r->r.prod.single, n,
+			RTE_RING_QUEUE_VARIABLE,
+			&prod_head, &prod_next, &free_entries);
+	if (n == 0)
+		goto end;
+
+	ENQUEUE_PTRS(&r->r, &r[1], prod_head, events, n, struct rte_event);
+	rte_smp_wmb();
+
+	update_tail(&r->r.prod, prod_head, prod_next, 1);
+end:
+	if (free_space != NULL)
+		*free_space = free_entries - n;
+	return n;
+}
+
+/**
+ * Dequeue a set of events from a ring
+ *
+ * Note: this API does not work with pointers to events, rather it copies
+ * the events themselves to the destination ``events`` buffer.
+ *
+ * @param r
+ *   pointer to the event ring
+ * @param events
+ *   pointer to an array to hold the struct rte_event objects
+ * @param n
+ *   number of events that can be held in the ``events`` array
+ * @param available
+ *   if non-null, is updated to indicate the number of events remaining in
+ *   the ring once the dequeue has completed
+ * @return
+ *   the number of elements, n', dequeued from the ring, 0 <= n' <= n
+ */
+static __rte_always_inline unsigned int
+rte_event_ring_dequeue_burst(struct rte_event_ring *r,
+		struct rte_event *events,
+		unsigned int n, uint16_t *available)
+{
+	uint32_t cons_head, cons_next;
+	uint32_t entries;
+
+	n = __rte_ring_move_cons_head(&r->r, r->r.cons.single, n,
+			RTE_RING_QUEUE_VARIABLE,
+			&cons_head, &cons_next, &entries);
+	if (n == 0)
+		goto end;
+
+	DEQUEUE_PTRS(&r->r, &r[1], cons_head, events, n, struct rte_event);
+	rte_smp_rmb();
+
+	update_tail(&r->r.cons, cons_head, cons_next, 1);
+
+end:
+	if (available != NULL)
+		*available = entries - n;
+	return n;
+}
+
+/*
+ * Initializes an already-allocated ring structure
+ *
+ * @param r
+ *   pointer to the ring memory to be initialized
+ * @param name
+ *   name to be given to the ring
+ * @param count
+ *   the number of elements to be stored in the ring. If the flag
+ *   ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ *   usable space in the ring will be ``count - 1`` entries. If the flag
+ *   ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ *   limit - 1, and the usable space will be exactly that requested.
+ * @param flags
+ *   An OR of the following:
+ *    - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *      is "single-producer". Otherwise, it is "multi-producers".
+ *    - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *      is "single-consumer". Otherwise, it is "multi-consumers".
+ *    - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ *      be taken as the exact usable size of the ring, and as such does not
+ *      need to be a power of 2. The underlying ring memory should be a
+ *      power-of-2 size greater than the count value.
+ * @return
+ *   0 on success, or a negative value on error.
+ */
+int
+rte_event_ring_init(struct rte_event_ring *r, const char *name,
+	unsigned int count, unsigned int flags);
+
+/*
+ * Create an event ring structure
+ *
+ * This function allocates memory and initializes an event ring inside that
+ * memory.
+ *
+ * @param name
+ *   name to be given to the ring
+ * @param count
+ *   the number of elements to be stored in the ring. If the flag
+ *   ``RING_F_EXACT_SZ`` is not set, this must be a power of 2, and the actual
+ *   usable space in the ring will be ``count - 1`` entries. If the flag
+ *   ``RING_F_EXACT_SZ`` is set, the this can be any value up to the ring size
+ *   limit - 1, and the usable space will be exactly that requested.
+ * @param socket_id
+ *   The *socket_id* argument is the socket identifier in case of
+ *   NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
+ *   constraint for the reserved zone.
+ * @param flags
+ *   An OR of the following:
+ *    - RING_F_SP_ENQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ *      is "single-producer". Otherwise, it is "multi-producers".
+ *    - RING_F_SC_DEQ: If this flag is set, the default behavior when
+ *      using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ *      is "single-consumer". Otherwise, it is "multi-consumers".
+ *    - RING_F_EXACT_SZ: If this flag is set, the ``count`` parameter is to
+ *      be taken as the exact usable size of the ring, and as such does not
+ *      need to be a power of 2. The underlying ring memory should be a
+ *      power-of-2 size greater than the count value.
+ * @return
+ *   On success, the pointer to the new allocated ring. NULL on error with
+ *    rte_errno set appropriately. Possible errno values include:
+ *    - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
+ *    - E_RTE_SECONDARY - function was called from a secondary process instance
+ *    - EINVAL - count provided is not a power of 2
+ *    - ENOSPC - the maximum number of memzones has already been allocated
+ *    - EEXIST - a memzone with the same name already exists
+ *    - ENOMEM - no appropriate memory area found in which to create memzone
+ */
+struct rte_event_ring *
+rte_event_ring_create(const char *name, unsigned int count, int socket_id,
+		unsigned int flags);
+
+/**
+ * Search for an event ring based on its name
+ *
+ * @param name
+ *   The name of the ring.
+ * @return
+ *   The pointer to the ring matching the name, or NULL if not found,
+ *   with rte_errno set appropriately. Possible rte_errno values include:
+ *    - ENOENT - required entry not available to return.
+ */
+struct rte_event_ring *
+rte_event_ring_lookup(const char *name);
+
+/**
+ * De-allocate all memory used by the ring.
+ *
+ * @param r
+ *   Ring to free
+ */
+void
+rte_event_ring_free(struct rte_event_ring *r);
+
+/**
+ * Return the size of the event ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   The size of the data store used by the ring.
+ *   NOTE: this is not the same as the usable space in the ring. To query that
+ *   use ``rte_ring_get_capacity()``.
+ */
+static inline unsigned int
+rte_event_ring_get_size(const struct rte_event_ring *r)
+{
+	return rte_ring_get_size(&r->r);
+}
+
+/**
+ * Return the number of elements which can be stored in the event ring.
+ *
+ * @param r
+ *   A pointer to the ring structure.
+ * @return
+ *   The usable size of the ring.
+ */
+static inline unsigned int
+rte_event_ring_get_capacity(const struct rte_event_ring *r)
+{
+	return rte_ring_get_capacity(&r->r);
+}
+#endif
diff --git a/lib/librte_eventdev/rte_eventdev_version.map b/lib/librte_eventdev/rte_eventdev_version.map
index 1fa6b333f..4c48e5f0a 100644
--- a/lib/librte_eventdev/rte_eventdev_version.map
+++ b/lib/librte_eventdev/rte_eventdev_version.map
@@ -42,3 +42,12 @@ DPDK_17.05 {
 
 	local: *;
 };
+
+DPDK_17.08 {
+	global:
+
+	rte_event_ring_create;
+	rte_event_ring_free;
+	rte_event_ring_init;
+	rte_event_ring_lookup;
+} DPDK_17.05;
-- 
2.13.0



More information about the dev mailing list