@@ -126,16 +126,6 @@ octeontx_fpavf_get_count(const struct rte_mempool *mp)
return octeontx_fpa_bufpool_free_count(pool);
}
-static int
-octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
-{
- RTE_SET_USED(mp);
- *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
- MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
- return 0;
-}
-
static ssize_t
octeontx_fpavf_calc_mem_size(const struct rte_mempool *mp,
uint32_t obj_num, uint32_t pg_shift,
@@ -208,7 +198,6 @@ static struct rte_mempool_ops octeontx_fpavf_ops = {
.enqueue = octeontx_fpavf_enqueue,
.dequeue = octeontx_fpavf_dequeue,
.get_count = octeontx_fpavf_get_count,
- .get_capabilities = octeontx_fpavf_get_capabilities,
.register_memory_area = octeontx_fpavf_register_memory_area,
.calc_mem_size = octeontx_fpavf_calc_mem_size,
.populate = octeontx_fpavf_populate,
@@ -237,15 +237,9 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
*/
static size_t
rte_mempool_xmem_size_int(uint32_t elt_num, size_t total_elt_sz,
- uint32_t pg_shift, unsigned int flags)
+ uint32_t pg_shift, __rte_unused unsigned int flags)
{
size_t obj_per_page, pg_num, pg_sz;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
if (total_elt_sz == 0)
return 0;
@@ -268,26 +262,15 @@ rte_mempool_calc_mem_size_def(const struct rte_mempool *mp,
size_t *min_chunk_size,
__rte_unused size_t *align)
{
- unsigned int mp_flags;
- int ret;
size_t total_elt_sz;
size_t mem_size;
- /* Get mempool capabilities */
- mp_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
mem_size = rte_mempool_xmem_size_int(obj_num, total_elt_sz, pg_shift,
- mp->flags | mp_flags);
+ mp->flags);
- if (mp_flags & MEMPOOL_F_CAPA_PHYS_CONTIG)
- *min_chunk_size = mem_size;
- else
- *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
+ *min_chunk_size = RTE_MAX((size_t)1 << pg_shift, total_elt_sz);
/* No extra align requirements by default */
@@ -312,18 +295,12 @@ rte_mempool_xmem_size(uint32_t elt_num, size_t total_elt_sz, uint32_t pg_shift,
ssize_t
rte_mempool_xmem_usage(__rte_unused void *vaddr, uint32_t elt_num,
size_t total_elt_sz, const rte_iova_t iova[], uint32_t pg_num,
- uint32_t pg_shift, unsigned int flags)
+ uint32_t pg_shift, __rte_unused unsigned int flags)
{
uint32_t elt_cnt = 0;
rte_iova_t start, end;
uint32_t iova_idx;
size_t pg_sz = (size_t)1 << pg_shift;
- unsigned int mask;
-
- mask = MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS | MEMPOOL_F_CAPA_PHYS_CONTIG;
- if ((flags & mask) == mask)
- /* alignment need one additional object */
- elt_num += 1;
/* if iova is NULL, assume contiguous memory */
if (iova == NULL) {
@@ -426,8 +403,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
void *opaque)
{
- unsigned total_elt_sz;
- unsigned int mp_cap_flags;
unsigned i = 0;
size_t off;
struct rte_mempool_memhdr *memhdr;
@@ -450,24 +425,6 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
if (mp->populated_size >= mp->size)
return -ENOSPC;
- total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
-
- /* Get mempool capabilities */
- mp_cap_flags = 0;
- ret = rte_mempool_ops_get_capabilities(mp, &mp_cap_flags);
- if ((ret < 0) && (ret != -ENOTSUP))
- return ret;
-
- /* Detect pool area has sufficient space for elements */
- if (mp_cap_flags & MEMPOOL_F_CAPA_PHYS_CONTIG) {
- if (len < total_elt_sz * mp->size) {
- RTE_LOG(ERR, MEMPOOL,
- "pool area %" PRIx64 " not enough\n",
- (uint64_t)len);
- return -ENOSPC;
- }
- }
-
memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
if (memhdr == NULL)
return -ENOMEM;
@@ -479,10 +436,7 @@ rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
memhdr->free_cb = free_cb;
memhdr->opaque = opaque;
- if (mp_cap_flags & MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS)
- /* align object start address to a multiple of total_elt_sz */
- off = total_elt_sz - ((uintptr_t)vaddr % total_elt_sz);
- else if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
+ if (mp->flags & MEMPOOL_F_NO_CACHE_ALIGN)
off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
else
off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_CACHE_LINE_SIZE) - vaddr;
@@ -274,24 +274,6 @@ struct rte_mempool {
#define MEMPOOL_F_SC_GET 0x0008 /**< Default get is "single-consumer".*/
#define MEMPOOL_F_POOL_CREATED 0x0010 /**< Internal: pool is created. */
#define MEMPOOL_F_NO_PHYS_CONTIG 0x0020 /**< Don't need physically contiguous objs. */
-/**
- * This capability flag is advertised by a mempool handler, if the whole
- * memory area containing the objects must be physically contiguous.
- * Note: This flag should not be passed by application.
- */
-#define MEMPOOL_F_CAPA_PHYS_CONTIG 0x0040
-/**
- * This capability flag is advertised by a mempool handler. Used for a case
- * where mempool driver wants object start address(vaddr) aligned to block
- * size(/ total element size).
- *
- * Note:
- * - This flag should not be passed by application.
- * Flag used for mempool driver only.
- * - Mempool driver must also set MEMPOOL_F_CAPA_PHYS_CONTIG flag along with
- * MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS.
- */
-#define MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS 0x0080
/**
* @internal When debug is enabled, store some statistics.
@@ -417,12 +399,6 @@ typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp,
typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp);
/**
- * Get the mempool capabilities.
- */
-typedef int (*rte_mempool_get_capabilities_t)(const struct rte_mempool *mp,
- unsigned int *flags);
-
-/**
* Notify new memory area to mempool.
*/
typedef int (*rte_mempool_ops_register_memory_area_t)
@@ -523,10 +499,6 @@ struct rte_mempool_ops {
rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */
rte_mempool_get_count get_count; /**< Get qty of available objs. */
/**
- * Get the mempool capabilities
- */
- rte_mempool_get_capabilities_t get_capabilities;
- /**
* Notify new memory area to mempool
*/
rte_mempool_ops_register_memory_area_t register_memory_area;
@@ -652,22 +624,6 @@ unsigned
rte_mempool_ops_get_count(const struct rte_mempool *mp);
/**
- * @internal wrapper for mempool_ops get_capabilities callback.
- *
- * @param mp [in]
- * Pointer to the memory pool.
- * @param flags [out]
- * Pointer to the mempool flags.
- * @return
- * - 0: Success; The mempool driver has advertised his pool capabilities in
- * flags param.
- * - -ENOTSUP - doesn't support get_capabilities ops (valid case).
- * - Otherwise, pool create fails.
- */
-int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags);
-/**
* @internal wrapper for mempool_ops register_memory_area callback.
* API to notify the mempool handler when a new memory area is added to pool.
*
@@ -86,7 +86,6 @@ rte_mempool_register_ops(const struct rte_mempool_ops *h)
ops->enqueue = h->enqueue;
ops->dequeue = h->dequeue;
ops->get_count = h->get_count;
- ops->get_capabilities = h->get_capabilities;
ops->register_memory_area = h->register_memory_area;
ops->calc_mem_size = h->calc_mem_size;
ops->populate = h->populate;
@@ -128,19 +127,6 @@ rte_mempool_ops_get_count(const struct rte_mempool *mp)
return ops->get_count(mp);
}
-/* wrapper to get external mempool capabilities. */
-int
-rte_mempool_ops_get_capabilities(const struct rte_mempool *mp,
- unsigned int *flags)
-{
- struct rte_mempool_ops *ops;
-
- ops = rte_mempool_get_ops(mp->ops_index);
-
- RTE_FUNC_PTR_OR_ERR_RET(ops->get_capabilities, -ENOTSUP);
- return ops->get_capabilities(mp, flags);
-}
-
/* wrapper to notify new memory area to external mempool */
int
rte_mempool_ops_register_memory_area(const struct rte_mempool *mp, char *vaddr,
@@ -45,7 +45,6 @@ DPDK_16.07 {
DPDK_17.11 {
global:
- rte_mempool_ops_get_capabilities;
rte_mempool_ops_register_memory_area;
rte_mempool_populate_iova;
rte_mempool_populate_iova_tab;