[dpdk-dev] [PATCH v5 1/3] mempool: deprecate specific get and put functions

Lazaros Koromilas l at nofutznetworks.com
Wed Jun 29 01:47:36 CEST 2016


This commit introduces the API calls:

    rte_mempool_generic_put(mp, obj_table, n, is_mp)
    rte_mempool_generic_get(mp, obj_table, n, is_mc)

Deprecates the API calls:

    rte_mempool_mp_put_bulk(mp, obj_table, n)
    rte_mempool_sp_put_bulk(mp, obj_table, n)
    rte_mempool_mp_put(mp, obj)
    rte_mempool_sp_put(mp, obj)
    rte_mempool_mc_get_bulk(mp, obj_table, n)
    rte_mempool_sc_get_bulk(mp, obj_table, n)
    rte_mempool_mc_get(mp, obj_p)
    rte_mempool_sc_get(mp, obj_p)

We also check cookies in one place now.

Signed-off-by: Lazaros Koromilas <l at nofutznetworks.com>
Acked-by: Olivier Matz <olivier.matz at 6wind.com>
---
 app/test/test_mempool.c                    |  10 +--
 lib/librte_mempool/rte_mempool.h           | 115 ++++++++++++++++++++---------
 lib/librte_mempool/rte_mempool_version.map |   2 +
 3 files changed, 87 insertions(+), 40 deletions(-)

diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index 31582d8..55c2cbc 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -338,7 +338,7 @@ static int test_mempool_single_producer(void)
 			printf("obj not owned by this mempool\n");
 			RET_ERR();
 		}
-		rte_mempool_sp_put(mp_spsc, obj);
+		rte_mempool_put(mp_spsc, obj);
 		rte_spinlock_lock(&scsp_spinlock);
 		scsp_obj_table[i] = NULL;
 		rte_spinlock_unlock(&scsp_spinlock);
@@ -371,7 +371,7 @@ static int test_mempool_single_consumer(void)
 		rte_spinlock_unlock(&scsp_spinlock);
 		if (i >= MAX_KEEP)
 			continue;
-		if (rte_mempool_sc_get(mp_spsc, &obj) < 0)
+		if (rte_mempool_get(mp_spsc, &obj) < 0)
 			break;
 		rte_spinlock_lock(&scsp_spinlock);
 		scsp_obj_table[i] = obj;
@@ -477,13 +477,13 @@ test_mempool_basic_ex(struct rte_mempool *mp)
 	}
 
 	for (i = 0; i < MEMPOOL_SIZE; i ++) {
-		if (rte_mempool_mc_get(mp, &obj[i]) < 0) {
+		if (rte_mempool_get(mp, &obj[i]) < 0) {
 			printf("test_mp_basic_ex fail to get object for [%u]\n",
 				i);
 			goto fail_mp_basic_ex;
 		}
 	}
-	if (rte_mempool_mc_get(mp, &err_obj) == 0) {
+	if (rte_mempool_get(mp, &err_obj) == 0) {
 		printf("test_mempool_basic_ex get an impossible obj\n");
 		goto fail_mp_basic_ex;
 	}
@@ -494,7 +494,7 @@ test_mempool_basic_ex(struct rte_mempool *mp)
 	}
 
 	for (i = 0; i < MEMPOOL_SIZE; i++)
-		rte_mempool_mp_put(mp, obj[i]);
+		rte_mempool_put(mp, obj[i]);
 
 	if (rte_mempool_full(mp) != 1) {
 		printf("test_mempool_basic_ex the mempool should be full\n");
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index 0a1777c..a48f46d 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -957,8 +957,8 @@ void rte_mempool_dump(FILE *f, struct rte_mempool *mp);
  *   Mono-producer (0) or multi-producers (1).
  */
 static inline void __attribute__((always_inline))
-__mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
-		    unsigned n, int is_mp)
+__mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+		      unsigned n, int is_mp)
 {
 	struct rte_mempool_cache *cache;
 	uint32_t index;
@@ -1016,7 +1016,7 @@ ring_enqueue:
 
 
 /**
- * Put several objects back in the mempool (multi-producers safe).
+ * Put several objects back in the mempool.
  *
  * @param mp
  *   A pointer to the mempool structure.
@@ -1024,16 +1024,37 @@ ring_enqueue:
  *   A pointer to a table of void * pointers (objects).
  * @param n
  *   The number of objects to add in the mempool from the obj_table.
+ * @param is_mp
+ *   Mono-producer (0) or multi-producers (1).
  */
 static inline void __attribute__((always_inline))
+rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table,
+			unsigned n, int is_mp)
+{
+	__mempool_check_cookies(mp, obj_table, n, 0);
+	__mempool_generic_put(mp, obj_table, n, is_mp);
+}
+
+/**
+ * @deprecated
+ * Put several objects back in the mempool (multi-producers safe).
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects).
+ * @param n
+ *   The number of objects to add in the mempool from the obj_table.
+ */
+__rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 			unsigned n)
 {
-	__mempool_check_cookies(mp, obj_table, n, 0);
-	__mempool_put_bulk(mp, obj_table, n, 1);
+	rte_mempool_generic_put(mp, obj_table, n, 1);
 }
 
 /**
+ * @deprecated
  * Put several objects back in the mempool (NOT multi-producers safe).
  *
  * @param mp
@@ -1043,12 +1064,11 @@ rte_mempool_mp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
  * @param n
  *   The number of objects to add in the mempool from obj_table.
  */
-static inline void
+__rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_sp_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 			unsigned n)
 {
-	__mempool_check_cookies(mp, obj_table, n, 0);
-	__mempool_put_bulk(mp, obj_table, n, 0);
+	rte_mempool_generic_put(mp, obj_table, n, 0);
 }
 
 /**
@@ -1069,11 +1089,12 @@ static inline void __attribute__((always_inline))
 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
 		     unsigned n)
 {
-	__mempool_check_cookies(mp, obj_table, n, 0);
-	__mempool_put_bulk(mp, obj_table, n, !(mp->flags & MEMPOOL_F_SP_PUT));
+	rte_mempool_generic_put(mp, obj_table, n,
+				!(mp->flags & MEMPOOL_F_SP_PUT));
 }
 
 /**
+ * @deprecated
  * Put one object in the mempool (multi-producers safe).
  *
  * @param mp
@@ -1081,13 +1102,14 @@ rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
  * @param obj
  *   A pointer to the object to be added.
  */
-static inline void __attribute__((always_inline))
+__rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
 {
-	rte_mempool_mp_put_bulk(mp, &obj, 1);
+	rte_mempool_generic_put(mp, &obj, 1, 1);
 }
 
 /**
+ * @deprecated
  * Put one object back in the mempool (NOT multi-producers safe).
  *
  * @param mp
@@ -1095,10 +1117,10 @@ rte_mempool_mp_put(struct rte_mempool *mp, void *obj)
  * @param obj
  *   A pointer to the object to be added.
  */
-static inline void __attribute__((always_inline))
+__rte_deprecated static inline void __attribute__((always_inline))
 rte_mempool_sp_put(struct rte_mempool *mp, void *obj)
 {
-	rte_mempool_sp_put_bulk(mp, &obj, 1);
+	rte_mempool_generic_put(mp, &obj, 1, 0);
 }
 
 /**
@@ -1134,8 +1156,8 @@ rte_mempool_put(struct rte_mempool *mp, void *obj)
  *   - <0: Error; code of ring dequeue function.
  */
 static inline int __attribute__((always_inline))
-__mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
-		   unsigned n, int is_mc)
+__mempool_generic_get(struct rte_mempool *mp, void **obj_table,
+		      unsigned n, int is_mc)
 {
 	int ret;
 	struct rte_mempool_cache *cache;
@@ -1197,7 +1219,7 @@ ring_dequeue:
 }
 
 /**
- * Get several objects from the mempool (multi-consumers safe).
+ * Get several objects from the mempool.
  *
  * If cache is enabled, objects will be retrieved first from cache,
  * subsequently from the common pool. Note that it can return -ENOENT when
@@ -1210,21 +1232,50 @@ ring_dequeue:
  *   A pointer to a table of void * pointers (objects) that will be filled.
  * @param n
  *   The number of objects to get from mempool to obj_table.
+ * @param is_mc
+ *   Mono-consumer (0) or multi-consumers (1).
  * @return
  *   - 0: Success; objects taken.
  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
  */
 static inline int __attribute__((always_inline))
-rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, unsigned n,
+			int is_mc)
 {
 	int ret;
-	ret = __mempool_get_bulk(mp, obj_table, n, 1);
+	ret = __mempool_generic_get(mp, obj_table, n, is_mc);
 	if (ret == 0)
 		__mempool_check_cookies(mp, obj_table, n, 1);
 	return ret;
 }
 
 /**
+ * @deprecated
+ * Get several objects from the mempool (multi-consumers safe).
+ *
+ * If cache is enabled, objects will be retrieved first from cache,
+ * subsequently from the common pool. Note that it can return -ENOENT when
+ * the local cache and common pool are empty, even if cache from other
+ * lcores are full.
+ *
+ * @param mp
+ *   A pointer to the mempool structure.
+ * @param obj_table
+ *   A pointer to a table of void * pointers (objects) that will be filled.
+ * @param n
+ *   The number of objects to get from mempool to obj_table.
+ * @return
+ *   - 0: Success; objects taken.
+ *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
+ */
+__rte_deprecated static inline int __attribute__((always_inline))
+rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
+{
+	return rte_mempool_generic_get(mp, obj_table, n, 1);
+}
+
+/**
+ * @deprecated
  * Get several objects from the mempool (NOT multi-consumers safe).
  *
  * If cache is enabled, objects will be retrieved first from cache,
@@ -1243,14 +1294,10 @@ rte_mempool_mc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
  *   - -ENOENT: Not enough entries in the mempool; no object is
  *     retrieved.
  */
-static inline int __attribute__((always_inline))
+__rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
-	int ret;
-	ret = __mempool_get_bulk(mp, obj_table, n, 0);
-	if (ret == 0)
-		__mempool_check_cookies(mp, obj_table, n, 1);
-	return ret;
+	return rte_mempool_generic_get(mp, obj_table, n, 0);
 }
 
 /**
@@ -1278,15 +1325,12 @@ rte_mempool_sc_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 static inline int __attribute__((always_inline))
 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
 {
-	int ret;
-	ret = __mempool_get_bulk(mp, obj_table, n,
-				 !(mp->flags & MEMPOOL_F_SC_GET));
-	if (ret == 0)
-		__mempool_check_cookies(mp, obj_table, n, 1);
-	return ret;
+	return rte_mempool_generic_get(mp, obj_table, n,
+				       !(mp->flags & MEMPOOL_F_SC_GET));
 }
 
 /**
+ * @deprecated
  * Get one object from the mempool (multi-consumers safe).
  *
  * If cache is enabled, objects will be retrieved first from cache,
@@ -1302,13 +1346,14 @@ rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned n)
  *   - 0: Success; objects taken.
  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
  */
-static inline int __attribute__((always_inline))
+__rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
 {
-	return rte_mempool_mc_get_bulk(mp, obj_p, 1);
+	return rte_mempool_generic_get(mp, obj_p, 1, 1);
 }
 
 /**
+ * @deprecated
  * Get one object from the mempool (NOT multi-consumers safe).
  *
  * If cache is enabled, objects will be retrieved first from cache,
@@ -1324,10 +1369,10 @@ rte_mempool_mc_get(struct rte_mempool *mp, void **obj_p)
  *   - 0: Success; objects taken.
  *   - -ENOENT: Not enough entries in the mempool; no object is retrieved.
  */
-static inline int __attribute__((always_inline))
+__rte_deprecated static inline int __attribute__((always_inline))
 rte_mempool_sc_get(struct rte_mempool *mp, void **obj_p)
 {
-	return rte_mempool_sc_get_bulk(mp, obj_p, 1);
+	return rte_mempool_generic_get(mp, obj_p, 1, 0);
 }
 
 /**
diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
index 9bcbf17..6d4fc4a 100644
--- a/lib/librte_mempool/rte_mempool_version.map
+++ b/lib/librte_mempool/rte_mempool_version.map
@@ -22,6 +22,8 @@ DPDK_16.07 {
 	rte_mempool_check_cookies;
 	rte_mempool_create_empty;
 	rte_mempool_free;
+	rte_mempool_generic_get;
+	rte_mempool_generic_put;
 	rte_mempool_mem_iter;
 	rte_mempool_obj_iter;
 	rte_mempool_ops_table;
-- 
1.9.1



More information about the dev mailing list