[dpdk-dev,v1] mempool/dpaa2: add DPAA2 hardware offloaded mempool

Message ID 1489754838-1455-2-git-send-email-hemant.agrawal@nxp.com
State Accepted
Delegated to: Ferruh Yigit
Headers show

Checks

Context Check Description
checkpatch success coding style OK
Intel-compilation fail apply patch file failure

Commit Message

Hemant Agrawal March 17, 2017, 12:47 p.m.
DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
QBMAN hardware block.
CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
is enabled.

This memory pool currently supports packet mbuf type blocks only.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 MAINTAINERS                                        |   1 +
 config/common_base                                 |   5 +
 config/defconfig_arm64-dpaa2-linuxapp-gcc          |   8 +
 drivers/Makefile                                   |   1 +
 drivers/bus/Makefile                               |   2 +
 drivers/mempool/Makefile                           |  40 +++
 drivers/mempool/dpaa2/Makefile                     |  72 ++++
 drivers/mempool/dpaa2/dpaa2_hw_mempool.c           | 374 +++++++++++++++++++++
 drivers/mempool/dpaa2/dpaa2_hw_mempool.h           |  91 +++++
 .../mempool/dpaa2/rte_mempool_dpaa2_version.map    |   8 +
 10 files changed, 602 insertions(+)
 create mode 100644 drivers/mempool/Makefile
 create mode 100644 drivers/mempool/dpaa2/Makefile
 create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.c
 create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.h
 create mode 100644 drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map

Comments

Jianbo Liu March 22, 2017, 6:09 a.m.
Hi Hemant,

The 03/17/2017 18:17, Hemant Agrawal wrote:
> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
> QBMAN hardware block.
> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
> is enabled.
>
> This memory pool currently supports packet mbuf type blocks only.

Do you plan to support multi-process for this hardware mempool?

>
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> ---
>  MAINTAINERS                                        |   1 +
>  config/common_base                                 |   5 +
>  config/defconfig_arm64-dpaa2-linuxapp-gcc          |   8 +
>  drivers/Makefile                                   |   1 +
>  drivers/bus/Makefile                               |   2 +
>  drivers/mempool/Makefile                           |  40 +++
>  drivers/mempool/dpaa2/Makefile                     |  72 ++++
>  drivers/mempool/dpaa2/dpaa2_hw_mempool.c           | 374 +++++++++++++++++++++
>  drivers/mempool/dpaa2/dpaa2_hw_mempool.h           |  91 +++++
>  .../mempool/dpaa2/rte_mempool_dpaa2_version.map    |   8 +
>  10 files changed, 602 insertions(+)
>  create mode 100644 drivers/mempool/Makefile
>  create mode 100644 drivers/mempool/dpaa2/Makefile
>  create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.c
>  create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.h
>  create mode 100644 drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
>
IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
Hemant Agrawal March 23, 2017, 4:57 p.m.
On 3/22/2017 11:39 AM, Jianbo Liu wrote:
> Hi Hemant,
>
> The 03/17/2017 18:17, Hemant Agrawal wrote:
>> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
>> QBMAN hardware block.
>> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
>> is enabled.
>>
>> This memory pool currently supports packet mbuf type blocks only.
>
> Do you plan to support multi-process for this hardware mempool?
>

No, currently we don't support mutli-process. we plan to do it in future.

>>
>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
>> ---
>>  MAINTAINERS                                        |   1 +
>>  config/common_base                                 |   5 +
>>  config/defconfig_arm64-dpaa2-linuxapp-gcc          |   8 +
>>  drivers/Makefile                                   |   1 +
>>  drivers/bus/Makefile                               |   2 +
>>  drivers/mempool/Makefile                           |  40 +++
>>  drivers/mempool/dpaa2/Makefile                     |  72 ++++
>>  drivers/mempool/dpaa2/dpaa2_hw_mempool.c           | 374 +++++++++++++++++++++
>>  drivers/mempool/dpaa2/dpaa2_hw_mempool.h           |  91 +++++
>>  .../mempool/dpaa2/rte_mempool_dpaa2_version.map    |   8 +
>>  10 files changed, 602 insertions(+)
>>  create mode 100644 drivers/mempool/Makefile
>>  create mode 100644 drivers/mempool/dpaa2/Makefile
>>  create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.c
>>  create mode 100644 drivers/mempool/dpaa2/dpaa2_hw_mempool.h
>>  create mode 100644 drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
>>
> IMPORTANT NOTICE: The contents of this email and any attachments are confidential and may also be privileged. If you are not the intended recipient, please notify the sender immediately and do not disclose the contents to any other person, use it for any purpose, or store or copy the information in any medium. Thank you.
>
Ferruh Yigit March 24, 2017, 2:57 p.m.
On 3/17/2017 12:47 PM, Hemant Agrawal wrote:
> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
> QBMAN hardware block.
> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
> is enabled.
> 
> This memory pool currently supports packet mbuf type blocks only.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Applied to dpdk-next-net/master, thanks.
Ferruh Yigit March 24, 2017, 3:59 p.m.
On 3/24/2017 2:57 PM, Ferruh Yigit wrote:
> On 3/17/2017 12:47 PM, Hemant Agrawal wrote:
>> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
>> QBMAN hardware block.
>> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
>> is enabled.
>>
>> This memory pool currently supports packet mbuf type blocks only.
>>
>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
> 
> Applied to dpdk-next-net/master, thanks.

Hi Olivier,

I get this to next-net, since dpaa2 net driver depends this one.

But were you planning any review on the code? Or is it good to go as it is?


Thanks,
ferruh
Olivier Matz March 24, 2017, 4:31 p.m.
Hi Ferruh,

On Fri, 24 Mar 2017 15:59:50 +0000, Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> On 3/24/2017 2:57 PM, Ferruh Yigit wrote:
> > On 3/17/2017 12:47 PM, Hemant Agrawal wrote:  
> >> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
> >> QBMAN hardware block.
> >> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
> >> is enabled.
> >>
> >> This memory pool currently supports packet mbuf type blocks only.
> >>
> >> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>  
> > 
> > Applied to dpdk-next-net/master, thanks.  
> 
> Hi Olivier,
> 
> I get this to next-net, since dpaa2 net driver depends this one.
> 
> But were you planning any review on the code? Or is it good to go as it is?

Yes, but I'm afraid I won't be able to do it today.

From high level, I'm still a little puzzled by the amount of references
to mbuf in a mempool handler code, which should theorically handle any
kind of objects.

Is it planned to support other kind of objects?
Does this driver passes the mempool autotest?
Can the user be aware of these limitations?


Thanks,
Olivier
Ferruh Yigit March 24, 2017, 4:38 p.m.
On 3/24/2017 4:31 PM, Olivier Matz wrote:
> Hi Ferruh,
> 
> On Fri, 24 Mar 2017 15:59:50 +0000, Ferruh Yigit <ferruh.yigit@intel.com> wrote:
>> On 3/24/2017 2:57 PM, Ferruh Yigit wrote:
>>> On 3/17/2017 12:47 PM, Hemant Agrawal wrote:  
>>>> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
>>>> QBMAN hardware block.
>>>> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
>>>> is enabled.
>>>>
>>>> This memory pool currently supports packet mbuf type blocks only.
>>>>
>>>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>  
>>>
>>> Applied to dpdk-next-net/master, thanks.  
>>
>> Hi Olivier,
>>
>> I get this to next-net, since dpaa2 net driver depends this one.
>>
>> But were you planning any review on the code? Or is it good to go as it is?
> 
> Yes, but I'm afraid I won't be able to do it today.

OK, when you done your review, we can act according its result.

I just want to remind the dependency chain, dpaa2 net depends this
patch, and dpaa2 crypto depends net one.
An early integration from next-net required so that next-crypto can
finish its work before integration deadline.

Thanks,
ferruh

> 
> From high level, I'm still a little puzzled by the amount of references
> to mbuf in a mempool handler code, which should theorically handle any
> kind of objects.
> 
> Is it planned to support other kind of objects?
> Does this driver passes the mempool autotest?
> Can the user be aware of these limitations?
> 
> 
> Thanks,
> Olivier
>
Olivier Matz March 24, 2017, 4:42 p.m.
On Fri, 24 Mar 2017 16:38:04 +0000, Ferruh Yigit <ferruh.yigit@intel.com> wrote:
> On 3/24/2017 4:31 PM, Olivier Matz wrote:
> > Hi Ferruh,
> > 
> > On Fri, 24 Mar 2017 15:59:50 +0000, Ferruh Yigit <ferruh.yigit@intel.com> wrote:  
> >> On 3/24/2017 2:57 PM, Ferruh Yigit wrote:  
> >>> On 3/17/2017 12:47 PM, Hemant Agrawal wrote:    
> >>>> DPAA2 Hardware Mempool handlers allow enqueue/dequeue from NXP's
> >>>> QBMAN hardware block.
> >>>> CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS is set to 'dpaa2', if the pool
> >>>> is enabled.
> >>>>
> >>>> This memory pool currently supports packet mbuf type blocks only.
> >>>>
> >>>> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>    
> >>>
> >>> Applied to dpdk-next-net/master, thanks.    
> >>
> >> Hi Olivier,
> >>
> >> I get this to next-net, since dpaa2 net driver depends this one.
> >>
> >> But were you planning any review on the code? Or is it good to go as it is?  
> > 
> > Yes, but I'm afraid I won't be able to do it today.  
> 
> OK, when you done your review, we can act according its result.
> 
> I just want to remind the dependency chain, dpaa2 net depends this
> patch, and dpaa2 crypto depends net one.
> An early integration from next-net required so that next-crypto can
> finish its work before integration deadline.

Understood. Thanks.


> 
> Thanks,
> ferruh
> 
> > 
> > From high level, I'm still a little puzzled by the amount of references
> > to mbuf in a mempool handler code, which should theorically handle any
> > kind of objects.
> > 
> > Is it planned to support other kind of objects?
> > Does this driver passes the mempool autotest?
> > Can the user be aware of these limitations?
> > 
> > 
> > Thanks,
> > Olivier
> >   
>
Olivier Matz March 27, 2017, 4:30 p.m.
Hi Hemant,

On Fri, 24 Mar 2017 17:42:46 +0100, Olivier Matz <olivier.matz@6wind.com> wrote:
> > > From high level, I'm still a little puzzled by the amount of references
> > > to mbuf in a mempool handler code, which should theorically handle any
> > > kind of objects.
> > > 
> > > Is it planned to support other kind of objects?
> > > Does this driver passes the mempool autotest?
> > > Can the user be aware of these limitations?

Some more comments.

I think the mempool model as it is today in DPDK does not match your
driver model.

For instance, the fact that the hardware is able return the mbuf in the
pool by itself makes me think that the mbuf rework patchset [1] can break
your driver. Especially this patch [2], that expects that m->refcnt=1,
m->nb_segs=1 and m->next=NULL when allocating from a pool.

- Can this handler can be used with another driver?
- Can your driver be used with another mempool handler?
- Is the dpaa driver the only driver that would take advantage of
  the mempool handler? Will it work with cloned mbufs?


Defining a flag like this in your private code should not be done:

   #define MEMPOOL_F_HW_PKT_POOL (1 << ((sizeof(int) * 8) - 1))

Nothing prevents to break it if someone touches the generic flags in
mempool. And hope that no other driver does the same :)

Maybe you can do the same without flag, for instance by checking if
(m->pool == pmd->pool)?



I think a new mempool handler should pass the mempool tests, or at least
we should add a new API that would describe the capabilities or something
like that (for instance: support mbuf pool only, support multiprocess).


To conclude, I'm quite reserved.
Well, all the code is in driver/, meaning it does not pollute the rest.


Regards,
Olivier

[1] http://dpdk.org/ml/archives/dev/2017-March/059693.html
[2] http://dpdk.org/dev/patchwork/patch/21602/
Hemant Agrawal March 28, 2017, 9:45 a.m.
Hi Olivier,

On 3/27/2017 10:00 PM, Olivier Matz wrote:
> Hi Hemant,
>
> On Fri, 24 Mar 2017 17:42:46 +0100, Olivier Matz <olivier.matz@6wind.com> wrote:
>>>> From high level, I'm still a little puzzled by the amount of references
>>>> to mbuf in a mempool handler code, which should theorically handle any
>>>> kind of objects.
>>>>
>>>> Is it planned to support other kind of objects?

We do have plan. However, we also have reservations about using hw 
mempools for non-packet objects. They generally give advantage when 
working seamlessly with NICs for rx/tx of packets.

>>>> Does this driver passes the mempool autotest?

We have tested it internally by manually changing the  mempool autotest 
(mempool name from "stack" to "dpaa2").  we still need to figure out 
about how to pass the default pool name to autotest.

>>>> Can the user be aware of these limitations?

That opens a new question,  Do We need a documentation for 
drivers/mempools as well.
or, for the time being, we can add this to NXP PMD driver limitations?

>
> Some more comments.
>
> I think the mempool model as it is today in DPDK does not match your
> driver model.
>
> For instance, the fact that the hardware is able return the mbuf in the
> pool by itself makes me think that the mbuf rework patchset [1] can break
> your driver. Especially this patch [2], that expects that m->refcnt=1,
> m->nb_segs=1 and m->next=NULL when allocating from a pool.
>
Yes! we will need to give a small patch, once your patch is applied.

> - Can this handler can be used with another driver?

NXP mempool is specific to NXP hw only. It is designed to work with with 
NXP DPAA2 type NICs. There is no limitation in using it with any other 
PCI NIC connected to NXP Board. We do have tested it with ixgbe (82599) 
interworking with DPAA2 interfaces.

> - Can your driver be used with another mempool handler?
No, NXP DPAA2 PMD need NXP mempool only - at least for RX packets.
In TX, we can send non-NXP DPAA2 pool packets. (The HW will not free 
them autonomously, but TX confirm will be required.)

> - Is the dpaa driver the only driver that would take advantage of
>   the mempool handler? Will it work with cloned mbufs?
>
For now, dpaa driver is the only user.  We will be sending cloned-mbuf 
support patches, once the basic driver is up-stream.

> Defining a flag like this in your private code should not be done:
>
>    #define MEMPOOL_F_HW_PKT_POOL (1 << ((sizeof(int) * 8) - 1))
>
> Nothing prevents to break it if someone touches the generic flags in
> mempool. And hope that no other driver does the same :)

Yes! I agree. We need to work with you to improve the overall hw mempool 
support infrastructure:

1. When transmitting packet, the HW need to differentiate between HW 
supported pool vs non-HW supported pool packets. (Application may choose 
to have multiple pools of different type).

2. Option to use a different default mempool when used with virtio-net 
in VM.  You shared your opinion & some possible ways a while back. Now, 
we are seeing hw mempools actually coming to DPDK. So, We need to 
re-start this discussion.

>
> Maybe you can do the same without flag, for instance by checking if
> (m->pool == pmd->pool)?

This may not work, if more than one instance of hw mempool is in use.

>
>
> I think a new mempool handler should pass the mempool tests, or at least
> we should add a new API that would describe the capabilities or something
> like that (for instance: support mbuf pool only, support multiprocess).
>
Let me start working on this asap. we will experiment and send some RFCs.

>
> To conclude, I'm quite reserved.
> Well, all the code is in driver/, meaning it does not pollute the rest.

Thanks and understood your concerns.

>
>
> Regards,
> Olivier
>
> [1] http://dpdk.org/ml/archives/dev/2017-March/059693.html
> [2] http://dpdk.org/dev/patchwork/patch/21602/
>

Patch hide | download patch | download mbox

diff --git a/MAINTAINERS b/MAINTAINERS
index e9b1ac1..229b919 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -352,6 +352,7 @@  F: doc/guides/nics/nfp.rst
 NXP dpaa2
 M: Hemant Agrawal <hemant.agrawal@nxp.com>
 F: drivers/bus/fslmc/
+F: drivers/mempool/dpaa2/
 
 QLogic bnx2x
 M: Harish Patil <harish.patil@cavium.com>
diff --git a/config/common_base b/config/common_base
index dfe5db2..1c3bbe0 100644
--- a/config/common_base
+++ b/config/common_base
@@ -292,6 +292,11 @@  CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER=n
 CONFIG_RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX=n
 
 #
+# Compile Support Libraries for NXP DPAA2
+#
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
+
+#
 # Compile NXP DPAA2 FSL-MC Bus
 #
 CONFIG_RTE_LIBRTE_FSLMC_BUS=n
diff --git a/config/defconfig_arm64-dpaa2-linuxapp-gcc b/config/defconfig_arm64-dpaa2-linuxapp-gcc
index 365ae5a..47a5eee 100644
--- a/config/defconfig_arm64-dpaa2-linuxapp-gcc
+++ b/config/defconfig_arm64-dpaa2-linuxapp-gcc
@@ -42,6 +42,14 @@  CONFIG_RTE_ARCH_ARM_TUNE="cortex-a57+fp+simd"
 CONFIG_RTE_MAX_LCORE=8
 CONFIG_RTE_MAX_NUMA_NODES=1
 
+CONFIG_RTE_PKTMBUF_HEADROOM=256
+
+#
+# Compile Support Libraries for DPAA2
+#
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL=n
+CONFIG_RTE_MBUF_DEFAULT_MEMPOOL_OPS="dpaa2"
+
 #
 # Compile NXP DPAA2 FSL-MC Bus
 #
diff --git a/drivers/Makefile b/drivers/Makefile
index e937449..88e1005 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -32,6 +32,7 @@ 
 include $(RTE_SDK)/mk/rte.vars.mk
 
 DIRS-y += bus
+DIRS-y += mempool
 DIRS-y += net
 DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += crypto
 
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 8f7864b..70fbe79 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -31,7 +31,9 @@ 
 
 include $(RTE_SDK)/mk/rte.vars.mk
 
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
 CONFIG_RTE_LIBRTE_FSLMC_BUS = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
+endif
 
 DIRS-$(CONFIG_RTE_LIBRTE_FSLMC_BUS) += fslmc
 
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile
new file mode 100644
index 0000000..fb19049
--- /dev/null
+++ b/drivers/mempool/Makefile
@@ -0,0 +1,40 @@ 
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 NXP. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of NXP nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
+endif
+
+DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile
new file mode 100644
index 0000000..cc5f068
--- /dev/null
+++ b/drivers/mempool/dpaa2/Makefile
@@ -0,0 +1,72 @@ 
+#   BSD LICENSE
+#
+#   Copyright(c) 2016 NXP. All rights reserved.
+#   All rights reserved.
+#
+#   Redistribution and use in source and binary forms, with or without
+#   modification, are permitted provided that the following conditions
+#   are met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in
+#       the documentation and/or other materials provided with the
+#       distribution.
+#     * Neither the name of NXP nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+#   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_dpaa2.a
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_PMD),y)
+CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL = $(CONFIG_RTE_LIBRTE_DPAA2_PMD)
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc
+CFLAGS += -I$(RTE_SDK)/drivers/bus/fslmc/qbman/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_mempool_dpaa2_version.map
+
+# Lbrary version
+LIBABIVER := 1
+
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += drivers/bus/fslmc
+
+LDLIBS += -lrte_bus_fslmc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
new file mode 100644
index 0000000..a8a530c
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c
@@ -0,0 +1,374 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <errno.h>
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_malloc.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+#include <rte_cycles.h>
+#include <rte_kvargs.h>
+#include <rte_dev.h>
+#include <rte_ethdev.h>
+
+#include <fslmc_logs.h>
+#include <mc/fsl_dpbp.h>
+#include <portal/dpaa2_hw_pvt.h>
+#include <portal/dpaa2_hw_dpio.h>
+#include "dpaa2_hw_mempool.h"
+
+struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+static struct dpaa2_bp_list *h_bp_list;
+
+static int
+rte_hw_mbuf_create_pool(struct rte_mempool *mp)
+{
+	struct dpaa2_bp_list *bp_list;
+	struct dpaa2_dpbp_dev *avail_dpbp;
+	struct dpbp_attr dpbp_attr;
+	uint32_t bpid;
+	int ret, p_ret;
+
+	avail_dpbp = dpaa2_alloc_dpbp_dev();
+
+	if (!avail_dpbp) {
+		PMD_DRV_LOG(ERR, "DPAA2 resources not available");
+		return -ENOENT;
+	}
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret) {
+			RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+			return ret;
+		}
+	}
+
+	ret = dpbp_enable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Resource enable failure with"
+			" err code: %d\n", ret);
+		return ret;
+	}
+
+	ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW,
+				  avail_dpbp->token, &dpbp_attr);
+	if (ret != 0) {
+		PMD_INIT_LOG(ERR, "Resource read failure with"
+			     " err code: %d\n", ret);
+		p_ret = ret;
+		ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW,
+				   avail_dpbp->token);
+		return p_ret;
+	}
+
+	/* Allocate the bp_list which will be added into global_bp_list */
+	bp_list = (struct dpaa2_bp_list *)malloc(sizeof(struct dpaa2_bp_list));
+	if (!bp_list) {
+		PMD_INIT_LOG(ERR, "No heap memory available");
+		return -ENOMEM;
+	}
+
+	/* Set parameters of buffer pool list */
+	bp_list->buf_pool.num_bufs = mp->size;
+	bp_list->buf_pool.size = mp->elt_size
+			- sizeof(struct rte_mbuf) - rte_pktmbuf_priv_size(mp);
+	bp_list->buf_pool.bpid = dpbp_attr.bpid;
+	bp_list->buf_pool.h_bpool_mem = NULL;
+	bp_list->buf_pool.mp = mp;
+	bp_list->buf_pool.dpbp_node = avail_dpbp;
+	bp_list->next = h_bp_list;
+
+	bpid = dpbp_attr.bpid;
+
+	rte_dpaa2_bpid_info[bpid].meta_data_size = sizeof(struct rte_mbuf)
+				+ rte_pktmbuf_priv_size(mp);
+	rte_dpaa2_bpid_info[bpid].bp_list = bp_list;
+	rte_dpaa2_bpid_info[bpid].bpid = bpid;
+
+	mp->pool_data = (void *)&rte_dpaa2_bpid_info[bpid];
+
+	PMD_INIT_LOG(DEBUG, "BP List created for bpid =%d", dpbp_attr.bpid);
+
+	h_bp_list = bp_list;
+	/* Identification for our offloaded pool_data structure
+	 */
+	mp->flags |= MEMPOOL_F_HW_PKT_POOL;
+	return 0;
+}
+
+static void
+rte_hw_mbuf_free_pool(struct rte_mempool *mp)
+{
+	struct dpaa2_bp_info *bpinfo;
+	struct dpaa2_bp_list *bp;
+	struct dpaa2_dpbp_dev *dpbp_node;
+
+	if (!mp->pool_data) {
+		PMD_DRV_LOG(ERR, "Not a valid dpaa22 pool");
+		return;
+	}
+
+	bpinfo = (struct dpaa2_bp_info *)mp->pool_data;
+	bp = bpinfo->bp_list;
+	dpbp_node = bp->buf_pool.dpbp_node;
+
+	dpbp_disable(&(dpbp_node->dpbp), CMD_PRI_LOW, dpbp_node->token);
+
+	if (h_bp_list == bp) {
+		h_bp_list = h_bp_list->next;
+	} else { /* if it is not the first node */
+		struct dpaa2_bp_list *prev = h_bp_list, *temp;
+		temp = h_bp_list->next;
+		while (temp) {
+			if (temp == bp) {
+				prev->next = temp->next;
+				free(bp);
+				break;
+			}
+			prev = temp;
+			temp = temp->next;
+		}
+	}
+
+	dpaa2_free_dpbp_dev(dpbp_node);
+}
+
+static void
+rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused,
+			void * const *obj_table,
+			uint32_t bpid,
+			uint32_t meta_data_size,
+			int count)
+{
+	struct qbman_release_desc releasedesc;
+	struct qbman_swp *swp;
+	int ret;
+	int i, n;
+	uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret != 0) {
+			RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+			return;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	/* Create a release descriptor required for releasing
+	 * buffers into QBMAN
+	 */
+	qbman_release_desc_clear(&releasedesc);
+	qbman_release_desc_set_bpid(&releasedesc, bpid);
+
+	n = count % DPAA2_MBUF_MAX_ACQ_REL;
+	if (unlikely(!n))
+		goto aligned;
+
+	/* convert mbuf to buffers for the remainder */
+	for (i = 0; i < n ; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+		bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i])
+				+ meta_data_size;
+#else
+		bufs[i] = (uint64_t)obj_table[i] + meta_data_size;
+#endif
+	}
+
+	/* feed them to bman */
+	do {
+		ret = qbman_swp_release(swp, &releasedesc, bufs, n);
+	} while (ret == -EBUSY);
+
+aligned:
+	/* if there are more buffers to free */
+	while (n < count) {
+		/* convert mbuf to buffers */
+		for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) {
+#ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
+			bufs[i] = (uint64_t)
+				  rte_mempool_virt2phy(pool, obj_table[n + i])
+				  + meta_data_size;
+#else
+			bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size;
+#endif
+		}
+
+		do {
+			ret = qbman_swp_release(swp, &releasedesc, bufs,
+						DPAA2_MBUF_MAX_ACQ_REL);
+		} while (ret == -EBUSY);
+		n += DPAA2_MBUF_MAX_ACQ_REL;
+	}
+}
+
+int
+rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+			  void **obj_table, unsigned int count)
+{
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+	static int alloc;
+#endif
+	struct qbman_swp *swp;
+	uint16_t bpid;
+	uint64_t bufs[DPAA2_MBUF_MAX_ACQ_REL];
+	int i, ret;
+	unsigned int n = 0;
+	struct dpaa2_bp_info *bp_info;
+
+	bp_info = mempool_to_bpinfo(pool);
+
+	if (!(bp_info->bp_list)) {
+		RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured\n");
+		return -ENOENT;
+	}
+
+	bpid = bp_info->bpid;
+
+	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+		ret = dpaa2_affine_qbman_swp();
+		if (ret != 0) {
+			RTE_LOG(ERR, PMD, "Failed to allocate IO portal");
+			return ret;
+		}
+	}
+	swp = DPAA2_PER_LCORE_PORTAL;
+
+	while (n < count) {
+		/* Acquire is all-or-nothing, so we drain in 7s,
+		 * then the remainder.
+		 */
+		if ((count - n) > DPAA2_MBUF_MAX_ACQ_REL) {
+			ret = qbman_swp_acquire(swp, bpid, bufs,
+						DPAA2_MBUF_MAX_ACQ_REL);
+		} else {
+			ret = qbman_swp_acquire(swp, bpid, bufs,
+						count - n);
+		}
+		/* In case of less than requested number of buffers available
+		 * in pool, qbman_swp_acquire returns 0
+		 */
+		if (ret <= 0) {
+			PMD_TX_LOG(ERR, "Buffer acquire failed with"
+				   " err code: %d", ret);
+			/* The API expect the exact number of requested bufs */
+			/* Releasing all buffers allocated */
+			rte_dpaa2_mbuf_release(pool, obj_table, bpid,
+					   bp_info->meta_data_size, n);
+			return ret;
+		}
+		/* assigning mbuf from the acquired objects */
+		for (i = 0; (i < ret) && bufs[i]; i++) {
+			DPAA2_MODIFY_IOVA_TO_VADDR(bufs[i], uint64_t);
+			obj_table[n] = (struct rte_mbuf *)
+				       (bufs[i] - bp_info->meta_data_size);
+			rte_mbuf_refcnt_set((struct rte_mbuf *)obj_table[n], 0);
+			PMD_TX_LOG(DEBUG, "Acquired %p address %p from BMAN",
+				   (void *)bufs[i], (void *)obj_table[n]);
+			n++;
+		}
+	}
+
+#ifdef RTE_LIBRTE_DPAA2_DEBUG_DRIVER
+	alloc += n;
+	PMD_TX_LOG(DEBUG, "Total = %d , req = %d done = %d",
+		   alloc, count, n);
+#endif
+	return 0;
+}
+
+static int
+rte_hw_mbuf_free_bulk(struct rte_mempool *pool,
+		  void * const *obj_table, unsigned int n)
+{
+	struct dpaa2_bp_info *bp_info;
+
+	bp_info = mempool_to_bpinfo(pool);
+	if (!(bp_info->bp_list)) {
+		RTE_LOG(ERR, PMD, "DPAA2 buffer pool not configured");
+		return -ENOENT;
+	}
+	rte_dpaa2_mbuf_release(pool, obj_table, bp_info->bpid,
+			   bp_info->meta_data_size, n);
+
+	return 0;
+}
+
+static unsigned int
+rte_hw_mbuf_get_count(const struct rte_mempool *mp)
+{
+	int ret;
+	unsigned int num_of_bufs = 0;
+	struct dpaa2_bp_info *bp_info;
+	struct dpaa2_dpbp_dev *dpbp_node;
+
+	if (!mp || !mp->pool_data) {
+		RTE_LOG(ERR, PMD, "Invalid mempool provided");
+		return 0;
+	}
+
+	bp_info = (struct dpaa2_bp_info *)mp->pool_data;
+	dpbp_node = bp_info->bp_list->buf_pool.dpbp_node;
+
+	ret = dpbp_get_num_free_bufs(&dpbp_node->dpbp, CMD_PRI_LOW,
+				     dpbp_node->token, &num_of_bufs);
+	if (ret) {
+		RTE_LOG(ERR, PMD, "Unable to obtain free buf count (err=%d)",
+			ret);
+		return 0;
+	}
+
+	RTE_LOG(DEBUG, PMD, "Free bufs = %u", num_of_bufs);
+
+	return num_of_bufs;
+}
+
+struct rte_mempool_ops dpaa2_mpool_ops = {
+	.name = "dpaa2",
+	.alloc = rte_hw_mbuf_create_pool,
+	.free = rte_hw_mbuf_free_pool,
+	.enqueue = rte_hw_mbuf_free_bulk,
+	.dequeue = rte_dpaa2_mbuf_alloc_bulk,
+	.get_count = rte_hw_mbuf_get_count,
+};
+
+MEMPOOL_REGISTER_OPS(dpaa2_mpool_ops);
diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
new file mode 100644
index 0000000..4f2fcd7
--- /dev/null
+++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h
@@ -0,0 +1,91 @@ 
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ *   Copyright (c) 2016 NXP. All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA2_HW_DPBP_H_
+#define _DPAA2_HW_DPBP_H_
+
+#define DPAA2_MAX_BUF_POOLS	8
+
+struct buf_pool_cfg {
+	void *addr;
+	/**< The address from where DPAA2 will carve out the buffers */
+	phys_addr_t    phys_addr;
+	/**< Physical address of the memory provided in addr */
+	uint32_t num;
+	/**< Number of buffers */
+	uint32_t size;
+	/**< Size including headroom for each buffer */
+	uint16_t align;
+	/**< Buffer alignment (in bytes) */
+	uint16_t bpid;
+	/**< Autogenerated buffer pool ID for internal use */
+};
+
+struct buf_pool {
+	uint32_t size; /**< Size of the Pool */
+	uint32_t num_bufs; /**< Number of buffers in Pool */
+	uint16_t bpid; /**< Pool ID, from pool configuration */
+	uint8_t *h_bpool_mem; /**< Internal context data */
+	struct rte_mempool *mp; /**< DPDK RTE EAL pool reference */
+	struct dpaa2_dpbp_dev *dpbp_node; /**< Hardware context */
+};
+
+/*!
+ * Buffer pool list configuration structure. User need to give DPAA2 the
+ * valid number of 'num_buf_pools'.
+ */
+struct dpaa2_bp_list_cfg {
+	struct buf_pool_cfg buf_pool; /* Configuration of each buffer pool*/
+};
+
+struct dpaa2_bp_list {
+	struct dpaa2_bp_list *next;
+	struct rte_mempool *mp;
+	struct buf_pool buf_pool;
+};
+
+struct dpaa2_bp_info {
+	uint32_t meta_data_size;
+	uint32_t bpid;
+	struct dpaa2_bp_list *bp_list;
+};
+
+#define mempool_to_bpinfo(mp) ((struct dpaa2_bp_info *)(mp)->pool_data)
+#define mempool_to_bpid(mp) ((mempool_to_bpinfo(mp))->bpid)
+
+extern struct dpaa2_bp_info rte_dpaa2_bpid_info[MAX_BPID];
+
+int rte_dpaa2_mbuf_alloc_bulk(struct rte_mempool *pool,
+		       void **obj_table, unsigned int count);
+
+#endif /* _DPAA2_HW_DPBP_H_ */
diff --git a/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
new file mode 100644
index 0000000..a8aa685
--- /dev/null
+++ b/drivers/mempool/dpaa2/rte_mempool_dpaa2_version.map
@@ -0,0 +1,8 @@ 
+DPDK_17.05 {
+	global:
+
+	rte_dpaa2_bpid_info;
+	rte_dpaa2_mbuf_alloc_bulk;
+
+	local: *;
+};