[dpdk-dev] [PATCH v2] mempool: reduce rte_mempool structure size

Olivier MATZ olivier.matz at 6wind.com
Wed Feb 10 17:59:35 CET 2016


Hi Keith,

Thank you for adding the RTE_NEXT_ABI. I think this is the way
described in the process. Your changes will be available in next
version (16.4) for people compiling with RTE_NEXT_ABI=y, and in
16.7 without option (I'm just surprised that RTE_NEXT_ABI=y in
default configs...).

I think a deprecation notice should also be added in this commit
in doc/guides/rel_notes/deprecation.rst.

Please also find comments below.

On 02/09/2016 06:30 PM, Keith Wiles wrote:

> diff --git a/config/defconfig_x86_64-native-linuxapp-gcc b/config/defconfig_x86_64-native-linuxapp-gcc
> index 60baf5b..02e9ace 100644
> --- a/config/defconfig_x86_64-native-linuxapp-gcc
> +++ b/config/defconfig_x86_64-native-linuxapp-gcc
> @@ -40,3 +40,8 @@ CONFIG_RTE_ARCH_64=y
>  
>  CONFIG_RTE_TOOLCHAIN="gcc"
>  CONFIG_RTE_TOOLCHAIN_GCC=y
> +CONFIG_RTE_BUILD_SHARED_LIB=y
> +CONFIG_RTE_NEXT_ABI=n
> +CONFIG_RTE_EAL_IGB_UIO=n
> +CONFIG_RTE_LIBRTE_KNI=n
> +CONFIG_RTE_KNI_KMOD=n

I think this should not be part of the patch.

> @@ -672,6 +704,24 @@ rte_mempool_count(const struct rte_mempool *mp)
>  static unsigned
>  rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
>  {
> +#ifdef RTE_NEXT_ABI
> +	unsigned lcore_id;
> +	unsigned count = 0;
> +	unsigned cache_count;
> +
> +	fprintf(f, "  cache infos:\n");
> +	fprintf(f, "    cache_size=%"PRIu32"\n", mp->cache_size);
> +	if (mp->cache_size == 0)
> +		return count;
> +
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		cache_count = mp->local_cache[lcore_id].len;
> +		fprintf(f, "    cache_count[%u]=%u\n", lcore_id, cache_count);
> +		count += cache_count;
> +	}
> +	fprintf(f, "    total_cache_count=%u\n", count);
> +	return count;
> +#else
>  #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
>  	unsigned lcore_id;
>  	unsigned count = 0;

I think in this case we could avoid to duplicate the code without
beeing unclear by using the proper #ifdefs:

#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 || defined(RTE_NEXT_ABI)
	/* common code */
#ifdef RTE_NEXT_ABI
	if (mp->cache_size == 0)
		return count;
#endif
	/* common code */
#else
...
#endif


> @@ -755,6 +806,26 @@ mempool_audit_cookies(const struct rte_mempool *mp)
>  #define mempool_audit_cookies(mp) do {} while(0)
>  #endif
>  
> +#ifdef RTE_NEXT_ABI
> +/* check cookies before and after objects */
> +static void
> +mempool_audit_cache(const struct rte_mempool *mp)
> +{
> +	/* check cache size consistency */
> +	unsigned lcore_id;
> +
> +	if (mp->cache_size == 0)
> +		return;
> +
> +	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
> +		if (mp->local_cache[lcore_id].len > mp->cache_flushthresh) {
> +			RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
> +				lcore_id);
> +			rte_panic("MEMPOOL: invalid cache len\n");
> +		}
> +	}
> +}
> +#else

same here

> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index 6e2390a..fc9b595 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -95,6 +95,19 @@ struct rte_mempool_debug_stats {
>  } __rte_cache_aligned;
>  #endif
>  
> +#ifdef RTE_NEXT_ABI
> +/**
> + * A structure that stores a per-core object cache.
> + */
> +struct rte_mempool_cache {
> +	unsigned len; /**< Cache len */
> +	/*
> +	 * Cache is allocated to this size to allow it to overflow in certain
> +	 * cases to avoid needless emptying of cache.
> +	 */
> +	void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */
> +} __rte_cache_aligned;
> +#else

same here



> @@ -755,19 +793,25 @@ static inline void __attribute__((always_inline))
>  __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
>  		    unsigned n, int is_mp)
>  {
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> +#endif /* RTE_NEXT_ABI */
>  	struct rte_mempool_cache *cache;
>  	uint32_t index;
>  	void **cache_objs;
>  	unsigned lcore_id = rte_lcore_id();
>  	uint32_t cache_size = mp->cache_size;
>  	uint32_t flushthresh = mp->cache_flushthresh;
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
> +#endif /* RTE_NEXT_ABI */

this looks strange... I think it does not work properly.
Why not
#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0 || defined(RTE_NEXT_ABI)

>  	/* increment stat now, adding in mempool always success */
>  	__MEMPOOL_STAT_ADD(mp, put, n);
>  
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> +#endif /* RTE_NEXT_ABI */
>  	/* cache is not enabled or single producer or non-EAL thread */
>  	if (unlikely(cache_size == 0 || is_mp == 0 ||
>  		     lcore_id >= RTE_MAX_LCORE))
> @@ -802,7 +846,9 @@ __mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table,
>  	return;
>  
>  ring_enqueue:
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
> +#endif /* RTE_NEXT_ABI */
>  
>  	/* push remaining objects in ring */
>  #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
> @@ -946,7 +992,9 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
>  		   unsigned n, int is_mc)
>  {
>  	int ret;
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
> +#endif /* RTE_NEXT_ABI */
>  	struct rte_mempool_cache *cache;
>  	uint32_t index, len;
>  	void **cache_objs;
> @@ -992,7 +1040,9 @@ __mempool_get_bulk(struct rte_mempool *mp, void **obj_table,
>  	return 0;
>  
>  ring_dequeue:
> +#ifndef RTE_NEXT_ABI	/* Note: ifndef */
>  #endif /* RTE_MEMPOOL_CACHE_MAX_SIZE > 0 */
> +#endif /* RTE_NEXT_ABI */
>  
>  	/* get remaining objects from ring */
>  	if (is_mc)

Same in those cases.



Regards,
Olivier


More information about the dev mailing list