[dpdk-dev] kni: use bulk functions to allocate and free mbufs

Message ID 1483048216-2936-1-git-send-email-s.vyazmitinov@brain4net.com (mailing list archive)
State Superseded, archived
Headers

Checks

Context Check Description
ci/checkpatch warning coding style issues
ci/Intel compilation fail Compilation issues

Commit Message

Sergey Vyazmitinov Dec. 29, 2016, 9:50 p.m. UTC
  Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
functions. This can improve performance more than two times.

Signed-off-by: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
---
 lib/librte_kni/rte_kni.c         | 44 +++++++++++++++++-----------------------
 lib/librte_kni/rte_kni_fifo.h    | 18 ++++++++++++++++
 lib/librte_mbuf/rte_mbuf.h       | 32 +++++++++++++++++++++++++++++
 lib/librte_mempool/rte_mempool.h |  6 ++++++
 4 files changed, 75 insertions(+), 25 deletions(-)
  

Comments

Ananyev, Konstantin Jan. 11, 2017, 10:39 a.m. UTC | #1
Hi Sergey,

...
> diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
> index 4476d75..707c300 100644
> --- a/lib/librte_mbuf/rte_mbuf.h
> +++ b/lib/librte_mbuf/rte_mbuf.h
> @@ -1261,6 +1261,38 @@ static inline void rte_pktmbuf_free(struct rte_mbuf *m)
>  }
> 
>  /**
> + * Free n packets mbuf back into its original mempool.
> + *
> + * Free each mbuf, and all its segments in case of chained buffers. Each
> + * segment is added back into its original mempool.
> + *
> + * @param mp
> + *   The packets mempool.
> + * @param mbufs
> + *   The packets mbufs array to be freed.
> + * @param n
> + *   Number of packets.
> + */
> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> +		struct rte_mbuf **mbufs, unsigned n)
> +{
> +	struct rte_mbuf *mbuf, *m_next;
> +	unsigned i;
> +	for (i = 0; i < n; ++i) {
> +		mbuf = mbufs[i];
> +		__rte_mbuf_sanity_check(mbuf, 1);
> +
> +		mbuf = mbuf->next;
> +		while (mbuf != NULL) {
> +			m_next = mbuf->next;
> +			rte_pktmbuf_free_seg(mbuf);
> +			mbuf = m_next;
> +		}

I think you forgot to call __rte_pktmbuf_prefree_seg(mbufs[i]); somewhere here.
Konstantin

> +	}
> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> +}
> +
> +/**
>   * Creates a "clone" of the given packet mbuf.
>   *
>   * Walks through all segments of the given packet mbuf, and for each of them:
> diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
> index d315d42..e612a0a 100644
> --- a/lib/librte_mempool/rte_mempool.h
> +++ b/lib/librte_mempool/rte_mempool.h
> @@ -1497,6 +1497,12 @@ rte_mempool_get(struct rte_mempool *mp, void **obj_p)
>  	return rte_mempool_get_bulk(mp, obj_p, 1);
>  }
> 
> +static inline int __attribute__((always_inline))
> +rte_mempool_get_n(struct rte_mempool *mp, void **obj_p, int n)
> +{
> +	return rte_mempool_get_bulk(mp, obj_p, n);
> +}
> +
>  /**
>   * Return the number of entries in the mempool.
>   *
> --
> 2.7.4
  
Stephen Hemminger Jan. 11, 2017, 4:17 p.m. UTC | #2
On Fri, 30 Dec 2016 04:50:16 +0700
Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:

>  /**
> + * Free n packets mbuf back into its original mempool.
> + *
> + * Free each mbuf, and all its segments in case of chained buffers. Each
> + * segment is added back into its original mempool.
> + *
> + * @param mp
> + *   The packets mempool.
> + * @param mbufs
> + *   The packets mbufs array to be freed.
> + * @param n
> + *   Number of packets.
> + */
> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> +		struct rte_mbuf **mbufs, unsigned n)
> +{
> +	struct rte_mbuf *mbuf, *m_next;
> +	unsigned i;
> +	for (i = 0; i < n; ++i) {
> +		mbuf = mbufs[i];
> +		__rte_mbuf_sanity_check(mbuf, 1);
> +
> +		mbuf = mbuf->next;
> +		while (mbuf != NULL) {
> +			m_next = mbuf->next;
> +			rte_pktmbuf_free_seg(mbuf);
> +			mbuf = m_next;
> +		}
> +	}
> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> +}

The mbufs may come from different pools. You need to handle that.
  
Olivier Matz Jan. 11, 2017, 4:38 p.m. UTC | #3
On Wed, 11 Jan 2017 08:17:59 -0800, Stephen Hemminger
<stephen@networkplumber.org> wrote:
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
> >  /**
> > + * Free n packets mbuf back into its original mempool.
> > + *
> > + * Free each mbuf, and all its segments in case of chained
> > buffers. Each
> > + * segment is added back into its original mempool.
> > + *
> > + * @param mp
> > + *   The packets mempool.
> > + * @param mbufs
> > + *   The packets mbufs array to be freed.
> > + * @param n
> > + *   Number of packets.
> > + */
> > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > +		struct rte_mbuf **mbufs, unsigned n)
> > +{
> > +	struct rte_mbuf *mbuf, *m_next;
> > +	unsigned i;
> > +	for (i = 0; i < n; ++i) {
> > +		mbuf = mbufs[i];
> > +		__rte_mbuf_sanity_check(mbuf, 1);
> > +
> > +		mbuf = mbuf->next;
> > +		while (mbuf != NULL) {
> > +			m_next = mbuf->next;
> > +			rte_pktmbuf_free_seg(mbuf);
> > +			mbuf = m_next;
> > +		}
> > +	}
> > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > +}  
> 
> The mbufs may come from different pools. You need to handle that.

I have an implementation for that in an endless-work-in-progress
patchset:

 /**
+ * Free several mbufs segments.
+ *
+ * This function frees a table of mbufs, ensuring that each mbuf is
+ * returned into its original pool. It is the equivalent of calling
+ * rte_pktmbuf_free_seg() on all mbuf of the table.
+ *
+ * @param mbufs
+ *    Array of mbuf pointers.
+ * @param n
+ *    Array size.
+ */
+static inline void
+rte_pktmbuf_free_seg_bulk(struct rte_mbuf * const *m_tab, unsigned n)
+{
+       struct rte_mbuf *m;
+       struct rte_mbuf * const *start = NULL;
+       unsigned n_free = 0, i;
+       struct rte_mempool *free_pool = NULL;
+
+       for (i = 0; i < n; i++) {
+               m = m_tab[i];
+
+               if (__rte_pktmbuf_prefree_seg(m) == NULL) {
+                       if (n_free != 0)
+                               rte_mempool_put_bulk(free_pool,
+                                       (void * const *)start, n_free);
+
+                       free_pool = NULL;
+                       n_free = 0;
+                       continue;
+               }
+
+               if (unlikely(m->pool != free_pool)) {
+                       if (n_free != 0)
+                               rte_mempool_put_bulk(free_pool,
+                                       (void * const *)start, n_free);
+
+                       free_pool = m->pool;
+                       start = &m_tab[i];
+                       n_free = 1;
+               } else {
+                       n_free++;
+               }
+       }
+
+       if (n_free != 0)
+               rte_mempool_put_bulk(free_pool,
+                       (void * const *)start, n_free);
+}


In the same patch, I also remove the tx_free_bulk_mbuf() functions
that does almost the same job in specific drivers. Unfortunately,
this patch needs to be rebased and better tested, so it's not ready yet.


Regards,
Olivier
  
Ferruh Yigit Jan. 11, 2017, 5 p.m. UTC | #4
Hi Sergey,

On 1/11/2017 4:17 PM, Stephen Hemminger wrote:
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
>>  /**
>> + * Free n packets mbuf back into its original mempool.
>> + *
>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>> + * segment is added back into its original mempool.
>> + *
>> + * @param mp
>> + *   The packets mempool.
>> + * @param mbufs
>> + *   The packets mbufs array to be freed.
>> + * @param n
>> + *   Number of packets.
>> + */
>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>> +		struct rte_mbuf **mbufs, unsigned n)
>> +{
>> +	struct rte_mbuf *mbuf, *m_next;
>> +	unsigned i;
>> +	for (i = 0; i < n; ++i) {
>> +		mbuf = mbufs[i];
>> +		__rte_mbuf_sanity_check(mbuf, 1);
>> +
>> +		mbuf = mbuf->next;
>> +		while (mbuf != NULL) {
>> +			m_next = mbuf->next;
>> +			rte_pktmbuf_free_seg(mbuf);
>> +			mbuf = m_next;
>> +		}
>> +	}
>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>> +}
> 
> The mbufs may come from different pools. You need to handle that.
> 

As Stephen pointed mbufs can be from different mempools. But still can
benefit from bulk allocation part of your patch, would you mind
separating patches for alloc and free?

Thanks,
ferruh
  
Ananyev, Konstantin Jan. 11, 2017, 5:28 p.m. UTC | #5
> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> Sent: Wednesday, January 11, 2017 4:18 PM
> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On Fri, 30 Dec 2016 04:50:16 +0700
> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> 
> >  /**
> > + * Free n packets mbuf back into its original mempool.
> > + *
> > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > + * segment is added back into its original mempool.
> > + *
> > + * @param mp
> > + *   The packets mempool.
> > + * @param mbufs
> > + *   The packets mbufs array to be freed.
> > + * @param n
> > + *   Number of packets.
> > + */
> > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > +		struct rte_mbuf **mbufs, unsigned n)
> > +{
> > +	struct rte_mbuf *mbuf, *m_next;
> > +	unsigned i;
> > +	for (i = 0; i < n; ++i) {
> > +		mbuf = mbufs[i];
> > +		__rte_mbuf_sanity_check(mbuf, 1);
> > +
> > +		mbuf = mbuf->next;
> > +		while (mbuf != NULL) {
> > +			m_next = mbuf->next;
> > +			rte_pktmbuf_free_seg(mbuf);
> > +			mbuf = m_next;
> > +		}
> > +	}
> > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > +}
> 
> The mbufs may come from different pools. You need to handle that.

I suppose both stituations are possible:
1) user knows off-hand that all mbufs in the group are from the same mempool
2) user can't guarantee that all mbufs in the group are from same mempool.

As I understand that patch is for case 1) only.
For 2) it could be a separate function and separate patch.

Konstantin
  
Stephen Hemminger Jan. 11, 2017, 5:35 p.m. UTC | #6
On Wed, 11 Jan 2017 17:28:21 +0000
"Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:

> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> > Sent: Wednesday, January 11, 2017 4:18 PM
> > To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> > Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> > Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> > 
> > On Fri, 30 Dec 2016 04:50:16 +0700
> > Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> >   
> > >  /**
> > > + * Free n packets mbuf back into its original mempool.
> > > + *
> > > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > > + * segment is added back into its original mempool.
> > > + *
> > > + * @param mp
> > > + *   The packets mempool.
> > > + * @param mbufs
> > > + *   The packets mbufs array to be freed.
> > > + * @param n
> > > + *   Number of packets.
> > > + */
> > > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > > +		struct rte_mbuf **mbufs, unsigned n)
> > > +{
> > > +	struct rte_mbuf *mbuf, *m_next;
> > > +	unsigned i;
> > > +	for (i = 0; i < n; ++i) {
> > > +		mbuf = mbufs[i];
> > > +		__rte_mbuf_sanity_check(mbuf, 1);
> > > +
> > > +		mbuf = mbuf->next;
> > > +		while (mbuf != NULL) {
> > > +			m_next = mbuf->next;
> > > +			rte_pktmbuf_free_seg(mbuf);
> > > +			mbuf = m_next;
> > > +		}
> > > +	}
> > > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > > +}  
> > 
> > The mbufs may come from different pools. You need to handle that.  
> 
> I suppose both stituations are possible:
> 1) user knows off-hand that all mbufs in the group are from the same mempool
> 2) user can't guarantee that all mbufs in the group are from same mempool.
> 
> As I understand that patch is for case 1) only.
> For 2) it could be a separate function and separate patch.
> 
> Konstantin
> 
> 

Please don't make unnecessary assumptions in pursuit of minor optimizations.
It is trivial to write a correct free bulk that handles pool changing.
Also the free_seg could be bulked as well.
  
Ananyev, Konstantin Jan. 11, 2017, 5:43 p.m. UTC | #7
> -----Original Message-----
> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> Sent: Wednesday, January 11, 2017 5:36 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On Wed, 11 Jan 2017 17:28:21 +0000
> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
> 
> > > -----Original Message-----
> > > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> > > Sent: Wednesday, January 11, 2017 4:18 PM
> > > To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> > > Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> > > Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> > >
> > > On Fri, 30 Dec 2016 04:50:16 +0700
> > > Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> > >
> > > >  /**
> > > > + * Free n packets mbuf back into its original mempool.
> > > > + *
> > > > + * Free each mbuf, and all its segments in case of chained buffers. Each
> > > > + * segment is added back into its original mempool.
> > > > + *
> > > > + * @param mp
> > > > + *   The packets mempool.
> > > > + * @param mbufs
> > > > + *   The packets mbufs array to be freed.
> > > > + * @param n
> > > > + *   Number of packets.
> > > > + */
> > > > +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> > > > +		struct rte_mbuf **mbufs, unsigned n)
> > > > +{
> > > > +	struct rte_mbuf *mbuf, *m_next;
> > > > +	unsigned i;
> > > > +	for (i = 0; i < n; ++i) {
> > > > +		mbuf = mbufs[i];
> > > > +		__rte_mbuf_sanity_check(mbuf, 1);
> > > > +
> > > > +		mbuf = mbuf->next;
> > > > +		while (mbuf != NULL) {
> > > > +			m_next = mbuf->next;
> > > > +			rte_pktmbuf_free_seg(mbuf);
> > > > +			mbuf = m_next;
> > > > +		}
> > > > +	}
> > > > +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> > > > +}
> > >
> > > The mbufs may come from different pools. You need to handle that.
> >
> > I suppose both stituations are possible:
> > 1) user knows off-hand that all mbufs in the group are from the same mempool
> > 2) user can't guarantee that all mbufs in the group are from same mempool.
> >
> > As I understand that patch is for case 1) only.
> > For 2) it could be a separate function and separate patch.
> >
> > Konstantin
> >
> >
> 
> Please don't make unnecessary assumptions in pursuit of minor optimizations.

I don't suggest to make *any* assumptions.
What I am saying we  can have 2 functions for two different cases.
Obviously we'll have to document it properly.
Konstantin

> It is trivial to write a correct free bulk that handles pool changing.
> Also the free_seg could be bulked as well.
  
Ferruh Yigit Jan. 11, 2017, 5:47 p.m. UTC | #8
On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
> 
> 
>> -----Original Message-----
>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>> Sent: Wednesday, January 11, 2017 5:36 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>
>> On Wed, 11 Jan 2017 17:28:21 +0000
>> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
>>
>>>> -----Original Message-----
>>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
>>>> Sent: Wednesday, January 11, 2017 4:18 PM
>>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
>>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>
>>>> On Fri, 30 Dec 2016 04:50:16 +0700
>>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
>>>>
>>>>>  /**
>>>>> + * Free n packets mbuf back into its original mempool.
>>>>> + *
>>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>>>>> + * segment is added back into its original mempool.
>>>>> + *
>>>>> + * @param mp
>>>>> + *   The packets mempool.
>>>>> + * @param mbufs
>>>>> + *   The packets mbufs array to be freed.
>>>>> + * @param n
>>>>> + *   Number of packets.
>>>>> + */
>>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>>>>> +		struct rte_mbuf **mbufs, unsigned n)
>>>>> +{
>>>>> +	struct rte_mbuf *mbuf, *m_next;
>>>>> +	unsigned i;
>>>>> +	for (i = 0; i < n; ++i) {
>>>>> +		mbuf = mbufs[i];
>>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
>>>>> +
>>>>> +		mbuf = mbuf->next;
>>>>> +		while (mbuf != NULL) {
>>>>> +			m_next = mbuf->next;
>>>>> +			rte_pktmbuf_free_seg(mbuf);
>>>>> +			mbuf = m_next;
>>>>> +		}
>>>>> +	}
>>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>>>>> +}
>>>>
>>>> The mbufs may come from different pools. You need to handle that.
>>>
>>> I suppose both stituations are possible:
>>> 1) user knows off-hand that all mbufs in the group are from the same mempool
>>> 2) user can't guarantee that all mbufs in the group are from same mempool.
>>>
>>> As I understand that patch is for case 1) only.
>>> For 2) it could be a separate function and separate patch.
>>>
>>> Konstantin
>>>
>>>
>>
>> Please don't make unnecessary assumptions in pursuit of minor optimizations.
> 
> I don't suggest to make *any* assumptions.
> What I am saying we  can have 2 functions for two different cases.

kni_free_mbufs() is static function. Even user knows if all mbufs are
some same pool or not, can't pass this information to the free function.

Of course this information can be passed via new API, or as an update to
exiting API, but I think it is better to update free function to cover
both cases instead of getting this information from user.

> Obviously we'll have to document it properly.
> Konstantin
> 
>> It is trivial to write a correct free bulk that handles pool changing.
>> Also the free_seg could be bulked as well.
  
Ananyev, Konstantin Jan. 11, 2017, 6:25 p.m. UTC | #9
> -----Original Message-----
> From: Yigit, Ferruh
> Sent: Wednesday, January 11, 2017 5:48 PM
> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Stephen Hemminger <stephen@networkplumber.org>
> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; dev@dpdk.org
> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> 
> On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
> >
> >
> >> -----Original Message-----
> >> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
> >> Sent: Wednesday, January 11, 2017 5:36 PM
> >> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
> >> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>;
> dev@dpdk.org
> >> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>
> >> On Wed, 11 Jan 2017 17:28:21 +0000
> >> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
> >>
> >>>> -----Original Message-----
> >>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
> >>>> Sent: Wednesday, January 11, 2017 4:18 PM
> >>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> >>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
> >>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
> >>>>
> >>>> On Fri, 30 Dec 2016 04:50:16 +0700
> >>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
> >>>>
> >>>>>  /**
> >>>>> + * Free n packets mbuf back into its original mempool.
> >>>>> + *
> >>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
> >>>>> + * segment is added back into its original mempool.
> >>>>> + *
> >>>>> + * @param mp
> >>>>> + *   The packets mempool.
> >>>>> + * @param mbufs
> >>>>> + *   The packets mbufs array to be freed.
> >>>>> + * @param n
> >>>>> + *   Number of packets.
> >>>>> + */
> >>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
> >>>>> +		struct rte_mbuf **mbufs, unsigned n)
> >>>>> +{
> >>>>> +	struct rte_mbuf *mbuf, *m_next;
> >>>>> +	unsigned i;
> >>>>> +	for (i = 0; i < n; ++i) {
> >>>>> +		mbuf = mbufs[i];
> >>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
> >>>>> +
> >>>>> +		mbuf = mbuf->next;
> >>>>> +		while (mbuf != NULL) {
> >>>>> +			m_next = mbuf->next;
> >>>>> +			rte_pktmbuf_free_seg(mbuf);
> >>>>> +			mbuf = m_next;
> >>>>> +		}
> >>>>> +	}
> >>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
> >>>>> +}
> >>>>
> >>>> The mbufs may come from different pools. You need to handle that.
> >>>
> >>> I suppose both stituations are possible:
> >>> 1) user knows off-hand that all mbufs in the group are from the same mempool
> >>> 2) user can't guarantee that all mbufs in the group are from same mempool.
> >>>
> >>> As I understand that patch is for case 1) only.
> >>> For 2) it could be a separate function and separate patch.
> >>>
> >>> Konstantin
> >>>
> >>>
> >>
> >> Please don't make unnecessary assumptions in pursuit of minor optimizations.
> >
> > I don't suggest to make *any* assumptions.
> > What I am saying we  can have 2 functions for two different cases.
> 
> kni_free_mbufs() is static function. Even user knows if all mbufs are
> some same pool or not, can't pass this information to the free function.
> 
> Of course this information can be passed via new API, or as an update to
> exiting API, but I think it is better to update free function to cover
> both cases instead of getting this information from user.

I suppose misunderstanding came from the fact that kni_free_mbufs()
is modified to use rte_pktmbuf_free_bulk(mp, mbufs, n).
I am not talking about kni part of the patch
(to be honest I didn't pay much attention to it).
What I am saying there are many situations when user knows off-hand
that all  mbufs in that group are from the same mempool and such
function will be useful too.
BTW, for my own curiosity, how it could happen with KNI that 
kni_fifo_get() would return mbufs not from kni->pktmbuf_pool
(I am not really familiar with KNI and its use-cases)?
Konstantin

> 
> > Obviously we'll have to document it properly.
> > Konstantin
> >
> >> It is trivial to write a correct free bulk that handles pool changing.
> >> Also the free_seg could be bulked as well.
  
Ferruh Yigit Jan. 11, 2017, 6:41 p.m. UTC | #10
On 1/11/2017 6:25 PM, Ananyev, Konstantin wrote:
> 
> 
>> -----Original Message-----
>> From: Yigit, Ferruh
>> Sent: Wednesday, January 11, 2017 5:48 PM
>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>; Stephen Hemminger <stephen@networkplumber.org>
>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; dev@dpdk.org
>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>
>> On 1/11/2017 5:43 PM, Ananyev, Konstantin wrote:
>>>
>>>
>>>> -----Original Message-----
>>>> From: Stephen Hemminger [mailto:stephen@networkplumber.org]
>>>> Sent: Wednesday, January 11, 2017 5:36 PM
>>>> To: Ananyev, Konstantin <konstantin.ananyev@intel.com>
>>>> Cc: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>; olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>;
>> dev@dpdk.org
>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>
>>>> On Wed, 11 Jan 2017 17:28:21 +0000
>>>> "Ananyev, Konstantin" <konstantin.ananyev@intel.com> wrote:
>>>>
>>>>>> -----Original Message-----
>>>>>> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Stephen Hemminger
>>>>>> Sent: Wednesday, January 11, 2017 4:18 PM
>>>>>> To: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
>>>>>> Cc: olivier.matz@6wind.com; Yigit, Ferruh <ferruh.yigit@intel.com>; dev@dpdk.org
>>>>>> Subject: Re: [dpdk-dev] [PATCH] kni: use bulk functions to allocate and free mbufs
>>>>>>
>>>>>> On Fri, 30 Dec 2016 04:50:16 +0700
>>>>>> Sergey Vyazmitinov <s.vyazmitinov@brain4net.com> wrote:
>>>>>>
>>>>>>>  /**
>>>>>>> + * Free n packets mbuf back into its original mempool.
>>>>>>> + *
>>>>>>> + * Free each mbuf, and all its segments in case of chained buffers. Each
>>>>>>> + * segment is added back into its original mempool.
>>>>>>> + *
>>>>>>> + * @param mp
>>>>>>> + *   The packets mempool.
>>>>>>> + * @param mbufs
>>>>>>> + *   The packets mbufs array to be freed.
>>>>>>> + * @param n
>>>>>>> + *   Number of packets.
>>>>>>> + */
>>>>>>> +static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
>>>>>>> +		struct rte_mbuf **mbufs, unsigned n)
>>>>>>> +{
>>>>>>> +	struct rte_mbuf *mbuf, *m_next;
>>>>>>> +	unsigned i;
>>>>>>> +	for (i = 0; i < n; ++i) {
>>>>>>> +		mbuf = mbufs[i];
>>>>>>> +		__rte_mbuf_sanity_check(mbuf, 1);
>>>>>>> +
>>>>>>> +		mbuf = mbuf->next;
>>>>>>> +		while (mbuf != NULL) {
>>>>>>> +			m_next = mbuf->next;
>>>>>>> +			rte_pktmbuf_free_seg(mbuf);
>>>>>>> +			mbuf = m_next;
>>>>>>> +		}
>>>>>>> +	}
>>>>>>> +	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
>>>>>>> +}
>>>>>>
>>>>>> The mbufs may come from different pools. You need to handle that.
>>>>>
>>>>> I suppose both stituations are possible:
>>>>> 1) user knows off-hand that all mbufs in the group are from the same mempool
>>>>> 2) user can't guarantee that all mbufs in the group are from same mempool.
>>>>>
>>>>> As I understand that patch is for case 1) only.
>>>>> For 2) it could be a separate function and separate patch.
>>>>>
>>>>> Konstantin
>>>>>
>>>>>
>>>>
>>>> Please don't make unnecessary assumptions in pursuit of minor optimizations.
>>>
>>> I don't suggest to make *any* assumptions.
>>> What I am saying we  can have 2 functions for two different cases.
>>
>> kni_free_mbufs() is static function. Even user knows if all mbufs are
>> some same pool or not, can't pass this information to the free function.
>>
>> Of course this information can be passed via new API, or as an update to
>> exiting API, but I think it is better to update free function to cover
>> both cases instead of getting this information from user.
> 
> I suppose misunderstanding came from the fact that kni_free_mbufs()
> is modified to use rte_pktmbuf_free_bulk(mp, mbufs, n).
> I am not talking about kni part of the patch
> (to be honest I didn't pay much attention to it).
> What I am saying there are many situations when user knows off-hand
> that all  mbufs in that group are from the same mempool and such
> function will be useful too.

> BTW, for my own curiosity, how it could happen with KNI that 
> kni_fifo_get() would return mbufs not from kni->pktmbuf_pool
> (I am not really familiar with KNI and its use-cases)?

It gets packets from free queue:
kni_fifo_get(kni->free_q, ...)

DPDK app may send a mbuf (from any pool, like another port's mempool) to
kernel, kernel puts buf back to kni->free_q when done with it.

> Konstantin
> 
>>
>>> Obviously we'll have to document it properly.
>>> Konstantin
>>>
>>>> It is trivial to write a correct free bulk that handles pool changing.
>>>> Also the free_seg could be bulked as well.
>
  

Patch

diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
index a80cefd..cb4cfa6 100644
--- a/lib/librte_kni/rte_kni.c
+++ b/lib/librte_kni/rte_kni.c
@@ -590,22 +590,21 @@  rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
 static void
 kni_free_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
+	unsigned freeing;
 	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
 
-	ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
-	if (likely(ret > 0)) {
-		for (i = 0; i < ret; i++)
-			rte_pktmbuf_free(pkts[i]);
+	freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+	if (likely(freeing > 0)) {
+		rte_pktmbuf_free_bulk(kni->pktmbuf_pool, pkts, freeing);
 	}
 }
 
 static void
 kni_allocate_mbufs(struct rte_kni *kni)
 {
-	int i, ret;
-	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
-	void *phys[MAX_MBUF_BURST_NUM];
+	unsigned count, allocated, put;
+	struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX];
+	void *phys[KNI_FIFO_COUNT_MAX];
 
 	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
 			 offsetof(struct rte_kni_mbuf, pool));
@@ -628,28 +627,23 @@  kni_allocate_mbufs(struct rte_kni *kni)
 		return;
 	}
 
-	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
-		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
-		if (unlikely(pkts[i] == NULL)) {
-			/* Out of memory */
-			RTE_LOG(ERR, KNI, "Out of memory\n");
-			break;
-		}
-		phys[i] = va2pa(pkts[i]);
-	}
+	/* Calculate alloc queue free space */
+	count = kni_fifo_free_count(kni->alloc_q);
 
-	/* No pkt mbuf alocated */
-	if (i <= 0)
-		return;
+	/* Get buffers from mempool */
+	allocated = rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count);
+	for (unsigned i = 0; i < allocated; i++)
+		phys[i] = va2pa(pkts[i]);
 
-	ret = kni_fifo_put(kni->alloc_q, phys, i);
+	/* Put buffers into alloc queue */
+	put = kni_fifo_put(kni->alloc_q, (void **)phys, allocated);
 
 	/* Check if any mbufs not put into alloc_q, and then free them */
-	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
-		int j;
-
-		for (j = ret; j < i; j++)
+	if (unlikely(put < allocated)) {
+		for (unsigned j = put; j < allocated; j++) {
+			RTE_LOG(ERR, KNI, "Free allocated buffer\n");
 			rte_pktmbuf_free(pkts[j]);
+		}
 	}
 }
 
diff --git a/lib/librte_kni/rte_kni_fifo.h b/lib/librte_kni/rte_kni_fifo.h
index 8cb8587..361ddb0 100644
--- a/lib/librte_kni/rte_kni_fifo.h
+++ b/lib/librte_kni/rte_kni_fifo.h
@@ -91,3 +91,21 @@  kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
 	fifo->read = new_read;
 	return i;
 }
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+	return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}
diff --git a/lib/librte_mbuf/rte_mbuf.h b/lib/librte_mbuf/rte_mbuf.h
index 4476d75..707c300 100644
--- a/lib/librte_mbuf/rte_mbuf.h
+++ b/lib/librte_mbuf/rte_mbuf.h
@@ -1261,6 +1261,38 @@  static inline void rte_pktmbuf_free(struct rte_mbuf *m)
 }
 
 /**
+ * Free n packets mbuf back into its original mempool.
+ *
+ * Free each mbuf, and all its segments in case of chained buffers. Each
+ * segment is added back into its original mempool.
+ *
+ * @param mp
+ *   The packets mempool.
+ * @param mbufs
+ *   The packets mbufs array to be freed.
+ * @param n
+ *   Number of packets.
+ */
+static inline void rte_pktmbuf_free_bulk(struct rte_mempool *mp,
+		struct rte_mbuf **mbufs, unsigned n)
+{
+	struct rte_mbuf *mbuf, *m_next;
+	unsigned i;
+	for (i = 0; i < n; ++i) {
+		mbuf = mbufs[i];
+		__rte_mbuf_sanity_check(mbuf, 1);
+
+		mbuf = mbuf->next;
+		while (mbuf != NULL) {
+			m_next = mbuf->next;
+			rte_pktmbuf_free_seg(mbuf);
+			mbuf = m_next;
+		}
+	}
+	rte_mempool_put_bulk(mp, (void * const *)mbufs, n);
+}
+
+/**
  * Creates a "clone" of the given packet mbuf.
  *
  * Walks through all segments of the given packet mbuf, and for each of them:
diff --git a/lib/librte_mempool/rte_mempool.h b/lib/librte_mempool/rte_mempool.h
index d315d42..e612a0a 100644
--- a/lib/librte_mempool/rte_mempool.h
+++ b/lib/librte_mempool/rte_mempool.h
@@ -1497,6 +1497,12 @@  rte_mempool_get(struct rte_mempool *mp, void **obj_p)
 	return rte_mempool_get_bulk(mp, obj_p, 1);
 }
 
+static inline int __attribute__((always_inline))
+rte_mempool_get_n(struct rte_mempool *mp, void **obj_p, int n)
+{
+	return rte_mempool_get_bulk(mp, obj_p, n);
+}
+
 /**
  * Return the number of entries in the mempool.
  *