[dpdk-dev,v4,2/2] kni: Use bulk functions to allocate and free mbufs
Checks
Commit Message
Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
functions. This can improve performance more than two times.
Signed-off-by: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
---
lib/librte_kni/rte_kni.c | 46 +++++++++++++++++++++----------------------
lib/librte_kni/rte_kni_fifo.h | 18 +++++++++++++++++
2 files changed, 40 insertions(+), 24 deletions(-)
Comments
2017-01-18 11:28 GMT+01:00 Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>:
> Optimized kni_allocate_mbufs and kni_free_mbufs by using mbuf bulk
> functions. This can improve performance more than two times.
>
> Signed-off-by: Sergey Vyazmitinov <s.vyazmitinov@brain4net.com>
> ---
> lib/librte_kni/rte_kni.c | 46 +++++++++++++++++++++----------------------
> lib/librte_kni/rte_kni_fifo.h | 18 +++++++++++++++++
> 2 files changed, 40 insertions(+), 24 deletions(-)
>
> diff --git a/lib/librte_kni/rte_kni.c b/lib/librte_kni/rte_kni.c
> index a80cefd..ad746ad 100644
> --- a/lib/librte_kni/rte_kni.c
> +++ b/lib/librte_kni/rte_kni.c
[...]
> + /* Check if any mbufs not put into alloc_q, and then free them */
> + if (unlikely(put < count)) {
> + for (unsigned int j = put; j < count; j++) {
> + RTE_LOG(ERR, KNI, "Free allocated buffer\n");
> rte_pktmbuf_free(pkts[j]);
> + }
This could also use bulk free, even if only to shave two lines of code.
Best Regards,
Michał Mirosław
@@ -590,22 +590,21 @@ rte_kni_rx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
static void
kni_free_mbufs(struct rte_kni *kni)
{
- int i, ret;
+ unsigned int freeing;
struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
- ret = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
- if (likely(ret > 0)) {
- for (i = 0; i < ret; i++)
- rte_pktmbuf_free(pkts[i]);
+ freeing = kni_fifo_get(kni->free_q, (void **)pkts, MAX_MBUF_BURST_NUM);
+ if (likely(freeing > 0)) {
+ rte_pktmbuf_free_bulk(pkts, freeing);
}
}
static void
kni_allocate_mbufs(struct rte_kni *kni)
{
- int i, ret;
- struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
- void *phys[MAX_MBUF_BURST_NUM];
+ unsigned int count, put;
+ struct rte_mbuf *pkts[KNI_FIFO_COUNT_MAX];
+ void *phys[KNI_FIFO_COUNT_MAX];
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
offsetof(struct rte_kni_mbuf, pool));
@@ -628,28 +627,27 @@ kni_allocate_mbufs(struct rte_kni *kni)
return;
}
- for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
- pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
- if (unlikely(pkts[i] == NULL)) {
- /* Out of memory */
- RTE_LOG(ERR, KNI, "Out of memory\n");
- break;
- }
- phys[i] = va2pa(pkts[i]);
- }
+ /* Calculate alloc queue free space */
+ count = kni_fifo_free_count(kni->alloc_q);
- /* No pkt mbuf alocated */
- if (i <= 0)
+ /* Get buffers from mempool */
+ if (rte_pktmbuf_alloc_bulk(kni->pktmbuf_pool, pkts, count) != 0) {
+ RTE_LOG(ERR, KNI, "Can`t allocate %d mbufs\n", count);
return;
+ }
- ret = kni_fifo_put(kni->alloc_q, phys, i);
+ for (unsigned int i = 0; i < count; i++)
+ phys[i] = va2pa(pkts[i]);
- /* Check if any mbufs not put into alloc_q, and then free them */
- if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
- int j;
+ /* Put buffers into alloc queue */
+ put = kni_fifo_put(kni->alloc_q, (void **)phys, count);
- for (j = ret; j < i; j++)
+ /* Check if any mbufs not put into alloc_q, and then free them */
+ if (unlikely(put < count)) {
+ for (unsigned int j = put; j < count; j++) {
+ RTE_LOG(ERR, KNI, "Free allocated buffer\n");
rte_pktmbuf_free(pkts[j]);
+ }
}
}
@@ -91,3 +91,21 @@ kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
fifo->read = new_read;
return i;
}
+
+/**
+ * Get the num of elements in the fifo
+ */
+static inline unsigned
+kni_fifo_count(struct rte_kni_fifo *fifo)
+{
+ return (fifo->len + fifo->write - fifo->read) & (fifo->len - 1);
+}
+
+/**
+ * Get the num of available elements in the fifo
+ */
+static inline unsigned
+kni_fifo_free_count(struct rte_kni_fifo *fifo)
+{
+ return (fifo->read - fifo->write - 1) & (fifo->len - 1);
+}