[dpdk-dev] [PATCH] mempool: allow for user-owned mempool caches
Lazaros Koromilas
l at nofutznetworks.com
Thu Mar 10 15:44:42 CET 2016
The mempool cache is only available to EAL threads as a per-lcore
resource. Change this so that the user can create and provide their own
cache on mempool get and put operations. This works with non-EAL threads
too. This commit introduces new API calls with the 'with_cache' suffix,
while the current ones default to the per-lcore local cache.
Signed-off-by: Lazaros Koromilas <l at nofutznetworks.com>
---
lib/librte_mempool/rte_mempool.c | 65 +++++-
lib/librte_mempool/rte_mempool.h | 442 ++++++++++++++++++++++++++++++++++++---
2 files changed, 467 insertions(+), 40 deletions(-)
diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
index f8781e1..cebc2b7 100644
--- a/lib/librte_mempool/rte_mempool.c
+++ b/lib/librte_mempool/rte_mempool.c
@@ -375,6 +375,43 @@ rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
return usz;
}
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+static void
+mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
+{
+ cache->size = size;
+ cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
+ cache->len = 0;
+}
+
+/*
+ * Creates and initializes a cache for objects that are retrieved from and
+ * returned to an underlying mempool. This structure is identical to the
+ * structure included inside struct rte_mempool.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size)
+{
+ struct rte_mempool_cache *cache;
+
+ if (size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+
+ cache = rte_zmalloc("MEMPOOL_CACHE", sizeof(*cache), RTE_CACHE_LINE_SIZE);
+ if (cache == NULL) {
+ RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache!\n");
+ rte_errno = ENOMEM;
+ return NULL;
+ }
+
+ mempool_cache_init(cache, size);
+
+ return cache;
+}
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
#ifndef RTE_LIBRTE_XEN_DOM0
/* stub if DOM0 support not configured */
struct rte_mempool *
@@ -587,10 +624,18 @@ rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
mp->elt_size = objsz.elt_size;
mp->header_size = objsz.header_size;
mp->trailer_size = objsz.trailer_size;
- mp->cache_size = cache_size;
- mp->cache_flushthresh = CALC_CACHE_FLUSHTHRESH(cache_size);
+ mp->cache_size = cache_size; /* Keep this for backwards compat. */
mp->private_data_size = private_data_size;
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ {
+ unsigned lcore_id;
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
+ mempool_cache_init(&mp->local_cache[lcore_id],
+ cache_size);
+ }
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+
/* calculate address of the first element for continuous mempool. */
obj = (char *)mp + MEMPOOL_HEADER_SIZE(mp, pg_num) +
private_data_size;
@@ -648,8 +693,8 @@ rte_mempool_count(const struct rte_mempool *mp)
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
{
- unsigned lcore_id;
- if (mp->cache_size == 0)
+ unsigned lcore_id = rte_lcore_id();
+ if (mp->local_cache[lcore_id].size == 0)
return count;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
@@ -673,13 +718,17 @@ rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
unsigned lcore_id;
unsigned count = 0;
+ unsigned cache_size;
unsigned cache_count;
fprintf(f, " cache infos:\n");
- fprintf(f, " cache_size=%"PRIu32"\n", mp->cache_size);
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ cache_size = mp->local_cache[lcore_id].size;
+ fprintf(f, " cache_size[%u]=%"PRIu32"\n",
+ lcore_id, cache_size);
cache_count = mp->local_cache[lcore_id].len;
- fprintf(f, " cache_count[%u]=%u\n", lcore_id, cache_count);
+ fprintf(f, " cache_count[%u]=%"PRIu32"\n",
+ lcore_id, cache_count);
count += cache_count;
}
fprintf(f, " total_cache_count=%u\n", count);
@@ -761,7 +810,9 @@ mempool_audit_cache(const struct rte_mempool *mp)
/* check cache size consistency */
unsigned lcore_id;
for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
- if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
+ const struct rte_mempool_cache *cache;
+ cache = &mp->local_cache[lcore_id];
+ if (cache->len > cache->flushthresh) {
RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
lcore_id);
rte_panic("MEMPOOL: invalid cache len\n");
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 9745bf0..2dca37e 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -95,19 +95,19 @@ struct rte_mempool_debug_stats {
} __rte_cache_aligned;
#endif
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
/**
* A structure that stores a per-core object cache.
*/
struct rte_mempool_cache {
- unsigned len; /**< Cache len */
+ uint32_t size; /**< Size of the cache */
+ uint32_t flushthresh; /**< Threshold before we flush excess elements */
+ uint32_t len; /**< Current cache count */
/*
* Cache is allocated to this size to allow it to overflow in certain
* cases to avoid needless emptying of cache.
*/
void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
} __rte_cache_aligned;
-#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/**
* A structure that stores the size of mempool elements.
@@ -185,8 +185,6 @@ struct rte_mempool {
int flags; /**< Flags of the mempool. */
uint32_t size; /**< Size of the mempool. */
uint32_t cache_size; /**< Size of per-lcore local cache. */
- uint32_t cache_flushthresh;
- /**< Threshold before we flush excess elements. */
uint32_t elt_size; /**< Size of an element. */
uint32_t header_size; /**< Size of header (before elt). */
@@ -748,36 +746,33 @@ void rte_mempool_dump(FILE *f, const struct rte_mempool *mp);
* @param n
* The number of objects to store back in the mempool, must be strictly
* positive.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
* @param is_mp
* Mono-producer (0) or multi-producers (1).
*/
static inline void __attribute__((always_inline))
-__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
- unsigned n, int is_mp)
+__mempool_put_bulk_with_cache(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n, struct rte_mempool_cache *cache,
+ int is_mp)
{
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- struct rte_mempool_cache *cache;
uint32_t index;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- uint32_t flushthresh = mp->cache_flushthresh;
#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
/* increment stat now, adding in mempool always success */
__MEMPOOL_STAT_ADD(mp, put, n);
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- /* cache is not enabled or single producer or non-EAL thread */
- if (unlikely(cache_size == 0 || is_mp == 0 ||
- lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or cache is not enabled or single producer */
+ if (unlikely(cache == NULL || cache->size == 0 || is_mp == 0))
goto ring_enqueue;
/* Go straight to ring if put would overflow mem allocated for cache */
if (unlikely(n > RTE_MEMPOOL_CACHE_MAX_SIZE))
goto ring_enqueue;
- cache = &mp->local_cache[lcore_id];
cache_objs = &cache->objs[cache->len];
/*
@@ -793,10 +788,10 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
cache->len += n;
- if (cache->len >= flushthresh) {
- rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache_size],
- cache->len - cache_size);
- cache->len = cache_size;
+ if (cache->len >= cache->flushthresh) {
+ rte_ring_mp_enqueue_bulk(mp->ring, &cache->objs[cache->size],
+ cache->len - cache->size);
+ cache->len = cache->size;
}
return;
@@ -822,6 +817,32 @@ ring_enqueue:
#endif
}
+/**
+ * @internal Put several objects back in the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to store back in the mempool, must be strictly
+ * positive.
+ * @param is_mp
+ * Mono-producer (0) or multi-producers (1).
+ */
+static inline void __attribute__((always_inline))
+__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
+ unsigned n, int is_mp)
+{
+ struct rte_mempool_cache *cache = NULL;
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ /* Use the default per-lcore mempool cache if it is an EAL thread. */
+ unsigned lcore_id = rte_lcore_id();
+ if (lcore_id < RTE_MAX_LCORE)
+ cache = &mp->local_cache[lcore_id];
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+ return __mempool_put_bulk_with_cache(mp, obj_table, n, cache, is_mp);
+}
+
/**
* Put several objects back in the mempool (multi-producers safe).
@@ -928,6 +949,135 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
}
/**
+ * Put several objects back in the mempool (multi-producers safe).
+ * Use a user-provided mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from the obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_mp_put_bulk_with_cache(struct rte_mempool *mp,
+ void * const *obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk_with_cache(mp, obj_table, n, cache, 1);
+}
+
+/**
+ * Put several objects back in the mempool (NOT multi-producers safe).
+ * Use a user-provided mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void
+rte_mempool_sp_put_bulk_with_cache(struct rte_mempool *mp,
+ void * const *obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk_with_cache(mp, obj_table, n, cache, 0);
+}
+
+/**
+ * Put several objects back in the mempool.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ * Use a user-provided mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to add in the mempool from obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_put_bulk_with_cache(struct rte_mempool *mp,
+ void * const *obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ __mempool_check_cookies(mp, obj_table, n, 0);
+ __mempool_put_bulk_with_cache(mp, obj_table, n, cache,
+ !(mp->flags & MEMPOOL_F_SP_PUT));
+}
+
+/**
+ * Put one object in the mempool (multi-producers safe).
+ * Use a user-provided mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_mp_put_with_cache(struct rte_mempool *mp, void *obj,
+ struct rte_mempool_cache *cache)
+{
+ rte_mempool_mp_put_bulk_with_cache(mp, &obj, 1, cache);
+}
+
+/**
+ * Put one object back in the mempool (NOT multi-producers safe).
+ * Use a user-provided mempool cache.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_sp_put_with_cache(struct rte_mempool *mp, void *obj,
+ struct rte_mempool_cache *cache)
+{
+ rte_mempool_sp_put_bulk_with_cache(mp, &obj, 1, cache);
+}
+
+/**
+ * Put one object back in the mempool.
+ * Use a user-provided mempool cache.
+ *
+ * This function calls the multi-producer or the single-producer
+ * version depending on the default behavior that was specified at
+ * mempool creation time (see flags).
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj
+ * A pointer to the object to be added.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ */
+static inline void __attribute__((always_inline))
+rte_mempool_put_with_cache(struct rte_mempool *mp, void *obj,
+ struct rte_mempool_cache *cache)
+{
+ rte_mempool_put_bulk_with_cache(mp, &obj, 1, cache);
+}
+
+/**
* @internal Get several objects from the mempool; used internally.
* @param mp
* A pointer to the mempool structure.
@@ -935,6 +1085,8 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* A pointer to a table of void * pointers (objects).
* @param n
* The number of objects to get, must be strictly positive.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
* @param is_mc
* Mono-consumer (0) or multi-consumers (1).
* @return
@@ -942,29 +1094,26 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
* - <0: Error; code of ring dequeue function.
*/
static inline int __attribute__((always_inline))
-__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
- unsigned n, int is_mc)
+__mempool_get_bulk_with_cache(struct rte_mempool *mp, void **obj_table,
+ unsigned n, struct rte_mempool_cache *cache,
+ int is_mc)
{
int ret;
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- struct rte_mempool_cache *cache;
uint32_t index, len;
void **cache_objs;
- unsigned lcore_id = rte_lcore_id();
- uint32_t cache_size = mp->cache_size;
- /* cache is not enabled or single consumer */
- if (unlikely(cache_size == 0 || is_mc == 0 ||
- n >= cache_size || lcore_id >= RTE_MAX_LCORE))
+ /* No cache provided or cache is not enabled or single consumer */
+ if (unlikely(cache == NULL || cache->size == 0 || is_mc == 0 ||
+ n >= cache->size))
goto ring_dequeue;
- cache = &mp->local_cache[lcore_id];
cache_objs = cache->objs;
/* Can this be satisfied from the cache? */
if (cache->len < n) {
/* No. Backfill the cache first, and then fill from it */
- uint32_t req = n + (cache_size - cache->len);
+ uint32_t req = n + (cache->size - cache->len);
/* How many do we require i.e. number to fill the cache + the request */
ret = rte_ring_mc_dequeue_bulk(mp->ring, &cache->objs[cache->len], req);
@@ -1009,6 +1158,34 @@ ring_dequeue:
}
/**
+ * @internal Get several objects from the mempool; used internally.
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects).
+ * @param n
+ * The number of objects to get, must be strictly positive.
+ * @param is_mc
+ * Mono-consumer (0) or multi-consumers (1).
+ * @return
+ * - >=0: Success; number of objects supplied.
+ * - <0: Error; code of ring dequeue function.
+ */
+static inline int __attribute__((always_inline))
+__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
+ unsigned n, int is_mc)
+{
+ struct rte_mempool_cache *cache = NULL;
+#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
+ /* Use the default per-lcore mempool cache if it is an EAL thread. */
+ unsigned lcore_id = rte_lcore_id();
+ if (lcore_id < RTE_MAX_LCORE)
+ cache = &mp->local_cache[lcore_id];
+#endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
+ return __mempool_get_bulk_with_cache(mp, obj_table, n, cache, is_mc);
+}
+
+/**
* Get several objects from the mempool (multi-consumers safe).
*
* If cache is enabled, objects will be retrieved first from cache,
@@ -1169,11 +1346,198 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
}
/**
+ * Get several objects from the mempool (multi-consumers safe).
+ * Use a user-provided mempool cache.
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_mc_get_bulk_with_cache(struct rte_mempool *mp,
+ void **obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ int ret;
+ ret = __mempool_get_bulk_with_cache(mp, obj_table, n, cache, 1);
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool (NOT multi-consumers safe).
+ * Use a user-provided mempool cache.
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from the mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is
+ * retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_sc_get_bulk_with_cache(struct rte_mempool *mp,
+ void **obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ int ret;
+ ret = __mempool_get_bulk_with_cache(mp, obj_table, n, cache, 0);
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get several objects from the mempool.
+ * Use a user-provided mempool cache.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behaviour that was specified at
+ * mempool creation time (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_table
+ * A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ * The number of objects to get from the mempool to obj_table.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_get_bulk_with_cache(struct rte_mempool *mp,
+ void **obj_table, unsigned n,
+ struct rte_mempool_cache *cache)
+{
+ int ret;
+ ret = __mempool_get_bulk_with_cache(mp, obj_table, n, cache,
+ !(mp->flags & MEMPOOL_F_SC_GET));
+ if (ret == 0)
+ __mempool_check_cookies(mp, obj_table, n, 1);
+ return ret;
+}
+
+/**
+ * Get one object from the mempool (multi-consumers safe).
+ * Use a user-provided mempool cache.
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_mc_get_with_cache(struct rte_mempool *mp, void **obj_p,
+ struct rte_mempool_cache *cache)
+{
+ return rte_mempool_mc_get_bulk_with_cache(mp, obj_p, 1, cache);
+}
+
+/**
+ * Get one object from the mempool (NOT multi-consumers safe).
+ * Use a user-provided mempool cache.
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_sc_get_with_cache(struct rte_mempool *mp, void **obj_p,
+ struct rte_mempool_cache *cache)
+{
+ return rte_mempool_sc_get_bulk_with_cache(mp, obj_p, 1, cache);
+}
+
+/**
+ * Get one object from the mempool.
+ * Use a user-provided mempool cache.
+ *
+ * This function calls the multi-consumers or the single-consumer
+ * version, depending on the default behavior that was specified at
+ * mempool creation (see flags).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ * A pointer to the mempool structure.
+ * @param obj_p
+ * A pointer to a void * pointer (object) that will be filled.
+ * @param cache
+ * A pointer to a mempool cache structure. May be NULL if not needed.
+ * @return
+ * - 0: Success; objects taken.
+ * - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+static inline int __attribute__((always_inline))
+rte_mempool_get_with_cache(struct rte_mempool *mp, void **obj_p,
+ struct rte_mempool_cache *cache)
+{
+ return rte_mempool_get_bulk_with_cache(mp, obj_p, 1, cache);
+}
+
+/**
* Return the number of entries in the mempool.
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
- * debug purposes.
+ * debug purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1192,7 +1556,7 @@ unsigned rte_mempool_count(const struct rte_mempool *mp);
*
* When cache is enabled, this function has to browse the length of
* all lcores, so it should not be used in a data path, but only for
- * debug purposes.
+ * debug purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1210,7 +1574,7 @@ rte_mempool_free_count(const struct rte_mempool *mp)
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1229,7 +1593,7 @@ rte_mempool_full(const struct rte_mempool *mp)
*
* When cache is enabled, this function has to browse the length of all
* lcores, so it should not be used in a data path, but only for debug
- * purposes.
+ * purposes. User-owned mempool caches are not accounted for.
*
* @param mp
* A pointer to the mempool structure.
@@ -1401,6 +1765,18 @@ ssize_t rte_mempool_xmem_usage(void *vaddr, uint32_t elt_num, size_t elt_sz,
void rte_mempool_walk(void (*func)(const struct rte_mempool *, void *arg),
void *arg);
+/**
+ * Create a user-owned mempool cache. This can be used by non-EAL threads
+ * to enable caching when they interact with a mempool.
+ *
+ * @param size
+ * The size of the mempool cache. See rte_mempool_create()'s cache_size
+ * parameter description for more information. The same limits and
+ * considerations apply here too.
+ */
+struct rte_mempool_cache *
+rte_mempool_cache_create(uint32_t size);
+
#ifdef __cplusplus
}
#endif
--
1.9.1
More information about the dev
mailing list