[dpdk-dev] [PATCH v5 8/8] mempool: update range info to pool

santosh santosh.shukla at caviumnetworks.com
Thu Sep 7 10:56:49 CEST 2017


On Thursday 07 September 2017 02:00 PM, Olivier MATZ wrote:
> On Wed, Sep 06, 2017 at 04:58:34PM +0530, Santosh Shukla wrote:
>> HW pool manager e.g. Octeontx SoC demands s/w to program start and end
>> address of pool. Currently, there is no such handle in external mempool.
>> Introducing rte_mempool_update_range handle which will let HW(pool
>> manager) to know when common layer selects hugepage:
>> For each hugepage - update its start/end address to HW pool manager.
>>
>> Signed-off-by: Santosh Shukla <santosh.shukla at caviumnetworks.com>
>> Signed-off-by: Jerin Jacob <jerin.jacob at caviumnetworks.com>
>> ---
>>  lib/librte_mempool/rte_mempool.c           |  3 +++
>>  lib/librte_mempool/rte_mempool.h           | 22 ++++++++++++++++++++++
>>  lib/librte_mempool/rte_mempool_ops.c       | 13 +++++++++++++
>>  lib/librte_mempool/rte_mempool_version.map |  1 +
>>  4 files changed, 39 insertions(+)
>>
>> diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c
>> index 38dab1067..65f17a7a7 100644
>> --- a/lib/librte_mempool/rte_mempool.c
>> +++ b/lib/librte_mempool/rte_mempool.c
>> @@ -363,6 +363,9 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr,
>>  	struct rte_mempool_memhdr *memhdr;
>>  	int ret;
>>  
>> +	/* update range info to mempool */
>> +	rte_mempool_ops_update_range(mp, vaddr, paddr, len);
>> +
>
> My understanding is that the 2 capability flags imply that the mempool
> is composed of only one memory area (rte_mempool_memhdr). Do you confirm?

yes.

> So in your case, you will be notified only once with the full range of
> the mempool. But if there are several memory areas, the function will
> be called each time.
>
> So I suggest to rename rte_mempool_ops_update_range() in
> rte_mempool_ops_register_memory_area(), which goal is to notify the mempool
> handler each time a new memory area is added.
>
> This should be properly explained in the API comments.
>
> I think this handler can return an error code (0 on success, negative on
> error). On error, rte_mempool_populate_phys() should fail.
>
Will rename to rte_mempool_ops_register_memory_area() and
change return type from void to int.


return description:
0 : for success
<0 : failure, such that
 - if handler returns -ENOTSUP then valid error case--> no error handling at mempool layer
 - Otherwise  rte_mempool_populate_phys () fails.

Are you okay with error return? pl. confirm. 

Thanks.

>
>>  	/* create the internal ring if not already done */
>>  	if ((mp->flags & MEMPOOL_F_POOL_CREATED) == 0) {
>>  		ret = rte_mempool_ops_alloc(mp);
>> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
>> index 110ffb601..dfde31c35 100644
>> --- a/lib/librte_mempool/rte_mempool.h
>> +++ b/lib/librte_mempool/rte_mempool.h
>> @@ -405,6 +405,12 @@ typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
>>  typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
>>  		unsigned int *flags);
>>  
>> +/**
>> + * Update range info to mempool.
>> + */
>> +typedef void (*rte_mempool_update_range_t)(const struct rte_mempool *mp,
>> +		char *vaddr, phys_addr_t paddr, size_t len);
>> +
>>  /** Structure defining mempool operations structure */
>>  struct rte_mempool_ops {
>>  	char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */
>> @@ -417,6 +423,7 @@ struct rte_mempool_ops {
>>  	 * Get the pool capability
>>  	 */
>>  	rte_mempool_get_capabilities_t get_capabilities;
>> +	rte_mempool_update_range_t update_range; /**< Update range to mempool */
>>  } __rte_cache_aligned;
>>  
>>  #define RTE_MEMPOOL_MAX_OPS_IDX 16  /**< Max registered ops structs */
>> @@ -543,6 +550,21 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp);
>>  int
>>  rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
>>  					unsigned int *flags);
>> +/**
>> + * @internal wrapper for mempool_ops update_range callback.
>> + *
>> + * @param mp
>> + *   Pointer to the memory pool.
>> + * @param vaddr
>> + *   Pointer to the buffer virtual address
>> + * @param paddr
>> + *   Pointer to the buffer physical address
>> + * @param len
>> + *   Pool size
>> + */
>> +void
>> +rte_mempool_ops_update_range(const struct rte_mempool *mp,
>> +				char *vaddr, phys_addr_t paddr, size_t len);
>>  
>>  /**
>>   * @internal wrapper for mempool_ops free callback.
>> diff --git a/lib/librte_mempool/rte_mempool_ops.c b/lib/librte_mempool/rte_mempool_ops.c
>> index 9f605ae2d..549ade2d1 100644
>> --- a/lib/librte_mempool/rte_mempool_ops.c
>> +++ b/lib/librte_mempool/rte_mempool_ops.c
>> @@ -87,6 +87,7 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
>>  	ops->dequeue = h->dequeue;
>>  	ops->get_count = h->get_count;
>>  	ops->get_capabilities = h->get_capabilities;
>> +	ops->update_range = h->update_range;
>>  
>>  	rte_spinlock_unlock(&rte_mempool_ops_table.sl);
>>  
>> @@ -138,6 +139,18 @@ rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
>>  	return ops->get_capabilities(mp, flags);
>>  }
>>  
>> +/* wrapper to update range info to external mempool */
>> +void
>> +rte_mempool_ops_update_range(const struct rte_mempool *mp, char *vaddr,
>> +			     phys_addr_t paddr, size_t len)
>> +{
>> +	struct rte_mempool_ops *ops;
>> +
>> +	ops = rte_mempool_get_ops(mp->ops_index);
>> +	RTE_FUNC_PTR_OR_RET(ops->update_range);
>> +	ops->update_range(mp, vaddr, paddr, len);
>> +}
>> +
>>  /* sets mempool ops previously registered by rte_mempool_register_ops. */
>>  int
>>  rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name,
>> diff --git a/lib/librte_mempool/rte_mempool_version.map b/lib/librte_mempool/rte_mempool_version.map
>> index 3c3471507..2663001c3 100644
>> --- a/lib/librte_mempool/rte_mempool_version.map
>> +++ b/lib/librte_mempool/rte_mempool_version.map
>> @@ -46,5 +46,6 @@ DPDK_17.11 {
>>  	global:
>>  
>>  	rte_mempool_ops_get_capabilities;
>> +	rte_mempool_ops_update_range;
>>  
>>  } DPDK_16.07;
>> -- 
>> 2.11.0
>>



More information about the dev mailing list