@@ -31,15 +31,69 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <rte_typed_ring.h>
+#include <rte_random.h>
#include "test.h"
+#define RING_TYPE struct rte_mbuf *
+#define RING_TYPE_NAME rte_mbuf
+#include <rte_typed_ring.h>
+
+#define RING_SIZE 256
+#define BURST_SZ 32
+#define ITERATIONS (RING_SIZE * 2)
+
+static int
+test_mbuf_enqueue_dequeue(struct rte_mbuf_ring *r)
+{
+ struct rte_mbuf *inbufs[BURST_SZ];
+ struct rte_mbuf *outbufs[BURST_SZ];
+ unsigned int i, j;
+
+ for (i = 0; i < BURST_SZ; i++)
+ inbufs[i] = (void *)((uintptr_t)rte_rand());
+
+ for (i = 0; i < ITERATIONS; i++) {
+ uint16_t in = rte_mbuf_ring_enqueue_burst(r, inbufs, BURST_SZ);
+ if (in != BURST_SZ) {
+ printf("Error enqueuing mbuf ptrs\n");
+ return -1;
+ }
+ uint16_t out = rte_mbuf_ring_dequeue_burst(r, outbufs, BURST_SZ);
+ if (out != BURST_SZ) {
+ printf("Error dequeuing mbuf ptrs\n");
+ return -1;
+ }
+
+ for (j = 0; j < BURST_SZ; j++)
+ if (outbufs[j] != inbufs[j]) {
+ printf("Error: dequeued ptr != enqueued ptr\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
/**
* test entry point
*/
static int
test_typed_ring(void)
{
+ struct rte_mbuf_ring *r;
+ r = rte_mbuf_ring_create("Test_mbuf_ring", RING_SIZE, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (r == NULL) {
+ fprintf(stderr, "ln %d: Error creating mbuf ring\n", __LINE__);
+ return -1;
+ }
+ rte_mbuf_ring_list_dump(stdout);
+
+ if (test_mbuf_enqueue_dequeue(r) != 0) {
+ rte_mbuf_ring_free(r);
+ return -1;
+ }
+
+ rte_mbuf_ring_free(r);
return 0;
}
@@ -111,6 +111,17 @@ extern "C" {
#include <rte_rwlock.h>
#include <rte_eal_memconfig.h>
+#define _CAT(a, b) a ## _ ## b
+#define CAT(a, b) _CAT(a, b)
+
+#ifndef RING_TYPE_NAME
+#error "Need RING_TYPE_NAME defined before including"
+#endif
+#ifndef RING_TYPE
+#error "Need RING_TYPE defined before including"
+#endif
+#define TYPE(x) CAT(RING_TYPE_NAME, x)
+
#define RTE_TAILQ_RING_NAME "RTE_RING"
enum rte_ring_queue_behavior {
@@ -161,7 +172,7 @@ struct rte_memzone; /* forward declaration, so as not to require memzone.h */
* values in a modulo-32bit base: that's why the overflow of the indexes is not
* a problem.
*/
-struct rte_ring {
+struct TYPE(ring) {
/*
* Note: this field kept the RTE_MEMZONE_NAMESIZE size due to ABI
* compatibility requirements, it could be changed to RTE_RING_NAMESIZE
@@ -170,7 +181,7 @@ struct rte_ring {
char name[RTE_MEMZONE_NAMESIZE]; /**< Name of the ring. */
int flags; /**< Flags supplied at creation. */
const struct rte_memzone *memzone;
- /**< Memzone, if any, containing the rte_ring */
+ /**< Memzone, if any, containing the ring */
/** Ring producer status. */
struct prod {
@@ -204,7 +215,7 @@ struct rte_ring {
* not volatile so need to be careful
* about compiler re-ordering
*/
- void *ring[] __rte_cache_aligned;
+ RING_TYPE ring[] __rte_cache_aligned;
};
#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
@@ -302,7 +313,7 @@ struct rte_ring {
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @param behavior
@@ -319,7 +330,7 @@ struct rte_ring {
* - n: Actual number of objects enqueued.
*/
static inline int __attribute__((always_inline))
-__rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
+TYPE(__ring_mp_do_enqueue)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior)
{
uint32_t prod_head, prod_next;
@@ -412,7 +423,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @param behavior
@@ -429,7 +440,7 @@ __rte_ring_mp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* - n: Actual number of objects enqueued.
*/
static inline int __attribute__((always_inline))
-__rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
+TYPE(__ring_sp_do_enqueue)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior)
{
uint32_t prod_head, cons_tail;
@@ -495,7 +506,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @param behavior
@@ -512,7 +523,7 @@ __rte_ring_sp_do_enqueue(struct rte_ring *r, void * const *obj_table,
*/
static inline int __attribute__((always_inline))
-__rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
+TYPE(__ring_mc_do_dequeue)(struct TYPE(ring) *r, RING_TYPE *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior)
{
uint32_t cons_head, prod_tail;
@@ -597,7 +608,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @param behavior
@@ -613,7 +624,7 @@ __rte_ring_mc_do_dequeue(struct rte_ring *r, void **obj_table,
* - n: Actual number of objects dequeued.
*/
static inline int __attribute__((always_inline))
-__rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
+TYPE(__ring_sc_do_dequeue)(struct TYPE(ring) *r, RING_TYPE *obj_table,
unsigned int n, enum rte_ring_queue_behavior behavior)
{
uint32_t cons_head, prod_tail;
@@ -665,7 +676,7 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
@@ -675,10 +686,10 @@ __rte_ring_sc_do_dequeue(struct rte_ring *r, void **obj_table,
* - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_mp_enqueue_bulk)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return TYPE(__ring_mp_do_enqueue)(r, obj_table, n, RTE_RING_QUEUE_FIXED);
}
/**
@@ -687,7 +698,7 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
@@ -697,10 +708,10 @@ rte_ring_mp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_sp_enqueue_bulk)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return TYPE(__ring_sp_do_enqueue)(r, obj_table, n, RTE_RING_QUEUE_FIXED);
}
/**
@@ -713,7 +724,7 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
@@ -723,13 +734,13 @@ rte_ring_sp_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_enqueue_bulk)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_bulk(r, obj_table, n);
+ return TYPE(ring_sp_enqueue_bulk)(r, obj_table, n);
else
- return rte_ring_mp_enqueue_bulk(r, obj_table, n);
+ return TYPE(ring_mp_enqueue_bulk)(r, obj_table, n);
}
/**
@@ -749,9 +760,9 @@ rte_ring_enqueue_bulk(struct rte_ring *r, void * const *obj_table,
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
+TYPE(ring_mp_enqueue)(struct TYPE(ring) *r, RING_TYPE obj)
{
- return rte_ring_mp_enqueue_bulk(r, &obj, 1);
+ return TYPE(ring_mp_enqueue_bulk)(r, &obj, 1);
}
/**
@@ -768,9 +779,9 @@ rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
+TYPE(ring_sp_enqueue)(struct TYPE(ring) *r, RING_TYPE obj)
{
- return rte_ring_sp_enqueue_bulk(r, &obj, 1);
+ return TYPE(ring_sp_enqueue_bulk)(r, &obj, 1);
}
/**
@@ -791,12 +802,12 @@ rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
* - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
*/
static inline int __attribute__((always_inline))
-rte_ring_enqueue(struct rte_ring *r, void *obj)
+TYPE(ring_enqueue)(struct TYPE(ring) *r, RING_TYPE obj)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue(r, obj);
+ return TYPE(ring_sp_enqueue(r, obj));
else
- return rte_ring_mp_enqueue(r, obj);
+ return TYPE(ring_mp_enqueue(r, obj));
}
/**
@@ -808,7 +819,7 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
@@ -817,9 +828,9 @@ rte_ring_enqueue(struct rte_ring *r, void *obj)
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_mc_dequeue_bulk)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return TYPE(__ring_mc_do_dequeue)(r, obj_table, n, RTE_RING_QUEUE_FIXED);
}
/**
@@ -828,7 +839,7 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table,
* must be strictly positive.
@@ -838,9 +849,9 @@ rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_sc_dequeue_bulk)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED);
+ return TYPE(__ring_sc_do_dequeue)(r, obj_table, n, RTE_RING_QUEUE_FIXED);
}
/**
@@ -853,7 +864,7 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
@@ -862,12 +873,12 @@ rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_dequeue_bulk)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_bulk(r, obj_table, n);
+ return TYPE(ring_sc_dequeue_bulk)(r, obj_table, n);
else
- return rte_ring_mc_dequeue_bulk(r, obj_table, n);
+ return TYPE(ring_mc_dequeue_bulk)(r, obj_table, n);
}
/**
@@ -879,16 +890,16 @@ rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n)
* @param r
* A pointer to the ring structure.
* @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
+ * A pointer to a RING_TYPE pointer (object) that will be filled.
* @return
* - 0: Success; objects dequeued.
* - -ENOENT: Not enough entries in the ring to dequeue; no object is
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
+TYPE(ring_mc_dequeue)(struct TYPE(ring) *r, RING_TYPE *obj_p)
{
- return rte_ring_mc_dequeue_bulk(r, obj_p, 1);
+ return TYPE(ring_mc_dequeue_bulk)(r, obj_p, 1);
}
/**
@@ -897,16 +908,16 @@ rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
* @param r
* A pointer to the ring structure.
* @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
+ * A pointer to a RING_TYPE pointer (object) that will be filled.
* @return
* - 0: Success; objects dequeued.
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
+TYPE(ring_sc_dequeue)(struct TYPE(ring) *r, RING_TYPE *obj_p)
{
- return rte_ring_sc_dequeue_bulk(r, obj_p, 1);
+ return TYPE(ring_sc_dequeue_bulk)(r, obj_p, 1);
}
/**
@@ -919,19 +930,19 @@ rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
* @param r
* A pointer to the ring structure.
* @param obj_p
- * A pointer to a void * pointer (object) that will be filled.
+ * A pointer to a RING_TYPE pointer (object) that will be filled.
* @return
* - 0: Success, objects dequeued.
* - -ENOENT: Not enough entries in the ring to dequeue, no object is
* dequeued.
*/
static inline int __attribute__((always_inline))
-rte_ring_dequeue(struct rte_ring *r, void **obj_p)
+TYPE(ring_dequeue)(struct TYPE(ring) *r, RING_TYPE *obj_p)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue(r, obj_p);
+ return TYPE(ring_sc_dequeue)(r, obj_p);
else
- return rte_ring_mc_dequeue(r, obj_p);
+ return TYPE(ring_mc_dequeue)(r, obj_p);
}
/**
@@ -944,7 +955,7 @@ rte_ring_dequeue(struct rte_ring *r, void **obj_p)
* - 0: The ring is not full.
*/
static inline int
-rte_ring_full(const struct rte_ring *r)
+TYPE(ring_full)(const struct TYPE(ring) *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
@@ -961,7 +972,7 @@ rte_ring_full(const struct rte_ring *r)
* - 0: The ring is not empty.
*/
static inline int
-rte_ring_empty(const struct rte_ring *r)
+TYPE(ring_empty)(const struct TYPE(ring) *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
@@ -977,7 +988,7 @@ rte_ring_empty(const struct rte_ring *r)
* The number of entries in the ring.
*/
static inline unsigned
-rte_ring_count(const struct rte_ring *r)
+TYPE(ring_count)(const struct TYPE(ring) *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
@@ -993,7 +1004,7 @@ rte_ring_count(const struct rte_ring *r)
* The number of free entries in the ring.
*/
static inline unsigned
-rte_ring_free_count(const struct rte_ring *r)
+TYPE(ring_free_count)(const struct TYPE(ring) *r)
{
uint32_t prod_tail = r->prod.tail;
uint32_t cons_tail = r->cons.tail;
@@ -1009,17 +1020,17 @@ rte_ring_free_count(const struct rte_ring *r)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_mp_enqueue_burst)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
- return __rte_ring_mp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return TYPE(__ring_mp_do_enqueue)(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
}
/**
@@ -1028,17 +1039,17 @@ rte_ring_mp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_sp_enqueue_burst)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
- return __rte_ring_sp_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return TYPE(__ring_sp_do_enqueue)(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
}
/**
@@ -1051,20 +1062,20 @@ rte_ring_sp_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects).
+ * A pointer to a table of RING_TYPE pointers (objects).
* @param n
* The number of objects to add in the ring from the obj_table.
* @return
* - n: Actual number of objects enqueued.
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
+TYPE(ring_enqueue_burst)(struct TYPE(ring) *r, RING_TYPE const *obj_table,
unsigned int n)
{
if (r->prod.sp_enqueue)
- return rte_ring_sp_enqueue_burst(r, obj_table, n);
+ return TYPE(ring_sp_enqueue_burst)(r, obj_table, n);
else
- return rte_ring_mp_enqueue_burst(r, obj_table, n);
+ return TYPE(ring_mp_enqueue_burst)(r, obj_table, n);
}
/**
@@ -1078,16 +1089,16 @@ rte_ring_enqueue_burst(struct rte_ring *r, void * const *obj_table,
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_mc_dequeue_burst)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
- return __rte_ring_mc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return TYPE(__ring_mc_do_dequeue)(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
}
/**
@@ -1098,16 +1109,16 @@ rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
* - n: Actual number of objects dequeued, 0 if ring is empty
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_sc_dequeue_burst)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
- return __rte_ring_sc_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
+ return TYPE(__ring_sc_do_dequeue)(r, obj_table, n, RTE_RING_QUEUE_VARIABLE);
}
/**
@@ -1120,27 +1131,24 @@ rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
* @param r
* A pointer to the ring structure.
* @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
+ * A pointer to a table of RING_TYPE pointers (objects) that will be filled.
* @param n
* The number of objects to dequeue from the ring to the obj_table.
* @return
* - Number of objects dequeued
*/
static inline unsigned int __attribute__((always_inline))
-rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n)
+TYPE(ring_dequeue_burst)(struct TYPE(ring) *r, RING_TYPE *obj_table, unsigned int n)
{
if (r->cons.sc_dequeue)
- return rte_ring_sc_dequeue_burst(r, obj_table, n);
+ return TYPE(ring_sc_dequeue_burst)(r, obj_table, n);
else
- return rte_ring_mc_dequeue_burst(r, obj_table, n);
+ return TYPE(ring_mc_dequeue_burst)(r, obj_table, n);
}
TAILQ_HEAD(rte_ring_list, rte_tailq_entry);
-static struct rte_tailq_elem rte_ring_tailq = {
- .name = RTE_TAILQ_RING_NAME,
-};
-EAL_REGISTER_TAILQ(rte_ring_tailq)
+extern struct rte_tailq_elem rte_ring_tailq;
/* true if x is a power of 2 */
#define POWEROF2(x) ((((x)-1) & (x)) == 0)
@@ -1150,7 +1158,7 @@ EAL_REGISTER_TAILQ(rte_ring_tailq)
*
* This function returns the number of bytes needed for a ring, given
* the number of elements in it. This value is the sum of the size of
- * the structure rte_ring and the size of the memory needed by the
+ * the ring structure and the size of the memory needed by the
* objects pointers. The value is aligned to a cache line size.
*
* @param count
@@ -1160,7 +1168,7 @@ EAL_REGISTER_TAILQ(rte_ring_tailq)
* - -EINVAL if count is not a power of 2.
*/
static inline ssize_t
-rte_ring_get_memsize(unsigned int count)
+TYPE(ring_get_memsize)(unsigned int count)
{
ssize_t sz;
@@ -1172,7 +1180,7 @@ rte_ring_get_memsize(unsigned int count)
return -EINVAL;
}
- sz = sizeof(struct rte_ring) + count * sizeof(void *);
+ sz = sizeof(struct TYPE(ring)) + count * sizeof(RING_TYPE);
sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
return sz;
}
@@ -1182,7 +1190,7 @@ rte_ring_get_memsize(unsigned int count)
*
* Initialize a ring structure in memory pointed by "r". The size of the
* memory area must be large enough to store the ring structure and the
- * object table. It is advised to use rte_ring_get_memsize() to get the
+ * object table. It is advised to use ring_get_memsize() to get the
* appropriate size.
*
* The ring size is set to *count*, which must be a power of two. Water
@@ -1203,33 +1211,33 @@ rte_ring_get_memsize(unsigned int count)
* @param flags
* An OR of the following:
* - RING_F_SP_ENQ: If this flag is set, the default behavior when
- * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * using ``enqueue()`` or ``enqueue_bulk()``
* is "single-producer". Otherwise, it is "multi-producers".
* - RING_F_SC_DEQ: If this flag is set, the default behavior when
- * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * using ``dequeue()`` or ``dequeue_bulk()``
* is "single-consumer". Otherwise, it is "multi-consumers".
* @return
* 0 on success, or a negative value on error.
*/
static inline int
-rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
+TYPE(ring_init)(struct TYPE(ring) *r, const char *name, unsigned int count,
unsigned int flags)
{
int ret;
/* compilation-time checks */
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
+ RTE_BUILD_BUG_ON((sizeof(struct TYPE(ring)) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_RING_SPLIT_PROD_CONS
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, cons) &
+ RTE_BUILD_BUG_ON((offsetof(struct TYPE(ring), cons) &
RTE_CACHE_LINE_MASK) != 0);
#endif
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, prod) &
+ RTE_BUILD_BUG_ON((offsetof(struct TYPE(ring), prod) &
RTE_CACHE_LINE_MASK) != 0);
#ifdef RTE_LIBRTE_RING_DEBUG
- RTE_BUILD_BUG_ON((sizeof(struct rte_ring_debug_stats) &
+ RTE_BUILD_BUG_ON((sizeof(struct TYPE(ring_debug_stats) &
RTE_CACHE_LINE_MASK) != 0);
- RTE_BUILD_BUG_ON((offsetof(struct rte_ring, stats) &
+ RTE_BUILD_BUG_ON((offsetof(struct TYPE(ring), stats) &
RTE_CACHE_LINE_MASK) != 0);
#endif
@@ -1254,7 +1262,7 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
* Create a new ring named *name* in memory.
*
* This function uses ``memzone_reserve()`` to allocate memory. Then it
- * calls rte_ring_init() to initialize an empty ring.
+ * calls ring_init() to initialize an empty ring.
*
* The new ring size is set to *count*, which must be a power of
* two. Water marking is disabled by default. The real usable ring size
@@ -1274,10 +1282,10 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
* @param flags
* An OR of the following:
* - RING_F_SP_ENQ: If this flag is set, the default behavior when
- * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
+ * using ``enqueue()`` or ``enqueue_bulk()``
* is "single-producer". Otherwise, it is "multi-producers".
* - RING_F_SC_DEQ: If this flag is set, the default behavior when
- * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
+ * using ``dequeue()`` or ``dequeue_bulk()``
* is "single-consumer". Otherwise, it is "multi-consumers".
* @return
* On success, the pointer to the new allocated ring. NULL on error with
@@ -1289,12 +1297,12 @@ rte_ring_init(struct rte_ring *r, const char *name, unsigned int count,
* - EEXIST - a memzone with the same name already exists
* - ENOMEM - no appropriate memory area found in which to create memzone
*/
-static inline struct rte_ring *
-rte_ring_create(const char *name, unsigned int count, int socket_id,
+static inline struct TYPE(ring) *
+TYPE(ring_create)(const char *name, unsigned int count, int socket_id,
unsigned int flags)
{
char mz_name[RTE_MEMZONE_NAMESIZE];
- struct rte_ring *r;
+ struct TYPE(ring) *r;
struct rte_tailq_entry *te;
const struct rte_memzone *mz;
ssize_t ring_size;
@@ -1304,7 +1312,7 @@ rte_ring_create(const char *name, unsigned int count, int socket_id,
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
- ring_size = rte_ring_get_memsize(count);
+ ring_size = TYPE(ring_get_memsize)(count);
if (ring_size < 0) {
rte_errno = ring_size;
return NULL;
@@ -1334,7 +1342,7 @@ rte_ring_create(const char *name, unsigned int count, int socket_id,
if (mz != NULL) {
r = mz->addr;
/* no need to check return value here, checked the args above */
- rte_ring_init(r, name, count, flags);
+ TYPE(ring_init)(r, name, count, flags);
te->data = (void *) r;
r->memzone = mz;
@@ -1357,7 +1365,7 @@ rte_ring_create(const char *name, unsigned int count, int socket_id,
* Ring to free
*/
static inline void
-rte_ring_free(struct rte_ring *r)
+TYPE(ring_free)(struct TYPE(ring) *r)
{
struct rte_ring_list *ring_list = NULL;
struct rte_tailq_entry *te;
@@ -1366,11 +1374,12 @@ rte_ring_free(struct rte_ring *r)
return;
/*
- * Ring was not created with rte_ring_create,
+ * Ring was not created with create,
* therefore, there is no memzone to free.
*/
if (r->memzone == NULL) {
- RTE_LOG(ERR, RING, "Cannot free ring (not created with rte_ring_create()");
+ RTE_LOG(ERR, RING,
+ "Cannot free ring (not created with create())\n");
return;
}
@@ -1419,7 +1428,7 @@ rte_ring_free(struct rte_ring *r)
* - -EINVAL: Invalid water mark value.
*/
static inline int
-rte_ring_set_water_mark(struct rte_ring *r, unsigned int count)
+TYPE(ring_set_water_mark)(struct TYPE(ring) *r, unsigned int count)
{
if (count >= r->prod.size)
return -EINVAL;
@@ -1441,7 +1450,7 @@ rte_ring_set_water_mark(struct rte_ring *r, unsigned int count)
* A pointer to the ring structure.
*/
static inline void
-rte_ring_dump(FILE *f, const struct rte_ring *r)
+TYPE(ring_dump)(FILE *f, const struct TYPE(ring) *r)
{
#ifdef RTE_LIBRTE_RING_DEBUG
struct rte_ring_debug_stats sum;
@@ -1455,8 +1464,8 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
fprintf(f, " ch=%"PRIu32"\n", r->cons.head);
fprintf(f, " pt=%"PRIu32"\n", r->prod.tail);
fprintf(f, " ph=%"PRIu32"\n", r->prod.head);
- fprintf(f, " used=%u\n", rte_ring_count(r));
- fprintf(f, " avail=%u\n", rte_ring_free_count(r));
+ fprintf(f, " used=%u\n", TYPE(ring_count)(r));
+ fprintf(f, " avail=%u\n", TYPE(ring_free_count)(r));
if (r->prod.watermark == r->prod.size)
fprintf(f, " watermark=0\n");
else
@@ -1500,7 +1509,7 @@ rte_ring_dump(FILE *f, const struct rte_ring *r)
* A pointer to a file for output
*/
static inline void
-rte_ring_list_dump(FILE *f)
+TYPE(ring_list_dump)(FILE *f)
{
const struct rte_tailq_entry *te;
struct rte_ring_list *ring_list;
@@ -1510,7 +1519,7 @@ rte_ring_list_dump(FILE *f)
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, ring_list, next) {
- rte_ring_dump(f, (struct rte_ring *) te->data);
+ TYPE(ring_dump)(f, (struct TYPE(ring) *) te->data);
}
rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
@@ -1526,11 +1535,11 @@ rte_ring_list_dump(FILE *f)
* with rte_errno set appropriately. Possible rte_errno values include:
* - ENOENT - required entry not available to return.
*/
-static inline struct rte_ring *
-rte_ring_lookup(const char *name)
+static inline struct TYPE(ring) *
+TYPE(ring_lookup)(const char *name)
{
struct rte_tailq_entry *te;
- struct rte_ring *r = NULL;
+ struct TYPE(ring) *r = NULL;
struct rte_ring_list *ring_list;
ring_list = RTE_TAILQ_CAST(rte_ring_tailq.head, rte_ring_list);
@@ -1538,7 +1547,7 @@ rte_ring_lookup(const char *name)
rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
TAILQ_FOREACH(te, ring_list, next) {
- r = (struct rte_ring *) te->data;
+ r = (struct TYPE(ring) *) te->data;
if (strncmp(name, r->name, RTE_RING_NAMESIZE) == 0)
break;
}