[dpdk-dev] [PATCH 08/10] event/octeontx: add option to use fpavf as chunk pool

Pavan Nikhilesh pbhagavatula at caviumnetworks.com
Fri Feb 16 22:36:58 CET 2018


Add compile-time configurable option to force TIMvf to use Octeontx
FPAvf pool manager as its chunk pool.
When FPAvf is used as pool manager the TIMvf automatically frees the
chunks to FPAvf through gpool-id.

Signed-off-by: Pavan Nikhilesh <pbhagavatula at caviumnetworks.com>
---
 config/common_base                    |  1 +
 drivers/event/octeontx/timvf_evdev.c  | 23 +++++++++++++++++++++++
 drivers/event/octeontx/timvf_evdev.h  |  3 +++
 drivers/event/octeontx/timvf_worker.h | 35 +++++++++++++++++++++++++++++++++++
 4 files changed, 62 insertions(+)

diff --git a/config/common_base b/config/common_base
index ad03cf433..00010de92 100644
--- a/config/common_base
+++ b/config/common_base
@@ -562,6 +562,7 @@ CONFIG_RTE_LIBRTE_PMD_SW_EVENTDEV=y
 # Compile PMD for octeontx sso event device
 #
 CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=y
+CONFIG_RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF=n
 
 #
 # Compile PMD for OPDL event device
diff --git a/drivers/event/octeontx/timvf_evdev.c b/drivers/event/octeontx/timvf_evdev.c
index ffdfbb387..386eaa08f 100644
--- a/drivers/event/octeontx/timvf_evdev.c
+++ b/drivers/event/octeontx/timvf_evdev.c
@@ -162,10 +162,27 @@ timvf_ring_start(const struct rte_event_timer_adapter *adptr)
 		1ull << 48 |
 		1ull << 47 |
 		1ull << 44 |
+#ifndef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
+		1ull << 43 |
+#endif
 		(timr->meta.nb_bkts - 1);
 
 	rctrl.rctrl2 = (uint64_t)(TIM_CHUNK_SIZE / 16) << 40;
 
+#ifdef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
+	uintptr_t pool;
+	pool = (uintptr_t)((struct rte_mempool *)
+			timr->meta.chunk_pool)->pool_id;
+	ret = octeontx_fpa_bufpool_gpool(pool);
+	if (ret < 0) {
+		timvf_log_dbg("Unable to get gaura id");
+		ret = -ENOMEM;
+		goto error;
+	}
+	timvf_write64((uint64_t)ret,
+			(uint8_t *)timr->vbar0 + TIM_VRING_AURA);
+#endif
+
 	timvf_write64((uint64_t)timr->meta.bkt,
 			(uint8_t *)timr->vbar0 + TIM_VRING_BASE);
 	if (timvf_ring_conf_set(&rctrl, timr->tim_ring_id)) {
@@ -296,9 +313,15 @@ timvf_ring_create(struct rte_event_timer_adapter *adptr)
 		return -ENOMEM;
 	}
 
+#ifdef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
+	ret = rte_mempool_set_ops_byname(timr->meta.chunk_pool,
+			"octeontx_fpavf", NULL);
+	timvf_log_dbg("Giving back chunks to fpa gaura : %d", ret);
+#else
 	ret = rte_mempool_set_ops_byname(timr->meta.chunk_pool,
 			RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
 	timvf_log_dbg("Not giving back chunks to fpa");
+#endif
 
 	if (ret != 0) {
 		timvf_log_err("Unable to set chunkpool ops.");
diff --git a/drivers/event/octeontx/timvf_evdev.h b/drivers/event/octeontx/timvf_evdev.h
index 5e526a36a..02bd99a34 100644
--- a/drivers/event/octeontx/timvf_evdev.h
+++ b/drivers/event/octeontx/timvf_evdev.h
@@ -24,6 +24,9 @@
 #include <rte_reciprocal.h>
 
 #include <octeontx_mbox.h>
+#ifdef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
+#include <octeontx_fpavf.h>
+#endif
 
 #define timvf_log(level, fmt, args...) \
 	rte_log(RTE_LOG_ ## level, otx_logtype_timvf, \
diff --git a/drivers/event/octeontx/timvf_worker.h b/drivers/event/octeontx/timvf_worker.h
index 320eb6ac1..c3f37372a 100644
--- a/drivers/event/octeontx/timvf_worker.h
+++ b/drivers/event/octeontx/timvf_worker.h
@@ -144,6 +144,7 @@ timr_bkt_clr_nent(struct tim_mem_bucket *bktp)
 	return __atomic_and_fetch((uint64_t *)&bktp->w1, v, __ATOMIC_ACQ_REL);
 }
 
+#ifndef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
 static inline __hot struct tim_mem_entry*
 timr_clr_bkt(struct timvf_ring *timr, struct tim_mem_bucket *bkt)
 {
@@ -159,6 +160,7 @@ timr_clr_bkt(struct timvf_ring *timr, struct tim_mem_bucket *bkt)
 	}
 	return (struct tim_mem_entry *)bkt->first_chunk;
 }
+#endif
 
 /* Burst mode functions */
 static inline int __hot
@@ -241,7 +243,16 @@ timvf_add_entry_brst(struct timvf_ring *timr, const uint16_t rel_bkt,
 				bkt->first_chunk = (uint64_t) chunk;
 			}
 		} else {
+#ifndef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
 			chunk = timr_clr_bkt(timr, bkt);
+#else
+			if (unlikely(rte_mempool_get(timr->meta.chunk_pool,
+							(void **)&chunk))) {
+				timr_bkt_set_rem(bkt, 0);
+				tim[index]->state = RTE_EVENT_TIMER_ERROR;
+				return -ENOMEM;
+			}
+#endif
 			bkt->first_chunk = (uint64_t) chunk;
 		}
 		*(uint64_t *)(chunk + nb_chunk_slots) = 0;
@@ -355,7 +366,18 @@ timvf_add_entry_sp(struct timvf_ring *timr, const uint32_t rel_bkt,
 			}
 			*(uint64_t *)(chunk + nb_chunk_slots) = 0;
 		} else {
+#ifndef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
 			chunk = timr_clr_bkt(timr, bkt);
+#else
+			if (unlikely(rte_mempool_get(timr->meta.chunk_pool,
+							(void **)&chunk))) {
+				timr_bkt_set_rem(bkt, 0);
+				tim->impl_opaque[0] =
+					tim->impl_opaque[1] = 0;
+				tim->state = RTE_EVENT_TIMER_ERROR;
+				return -ENOMEM;
+			}
+#endif
 			*(uint64_t *)(chunk + nb_chunk_slots) = 0;
 			bkt->first_chunk = (uint64_t) chunk;
 		}
@@ -438,7 +460,20 @@ timvf_add_entry_mp(struct timvf_ring *timr, const uint32_t rel_bkt,
 				}
 				*(uint64_t *)(chunk + nb_chunk_slots) = 0;
 			} else {
+#ifndef RTE_PMD_OCTEONTX_TIMVF_USE_FPAVF
 				chunk = timr_clr_bkt(timr, bkt);
+#else
+				if (unlikely(rte_mempool_get(
+							timr->meta.chunk_pool,
+							(void **)&chunk))) {
+					timr_bkt_set_rem(bkt, 0);
+					timr_bkt_dec_lock(bkt);
+					tim->impl_opaque[0] =
+						tim->impl_opaque[1] = 0;
+					tim->state = RTE_EVENT_TIMER_ERROR;
+					return -ENOMEM;
+				}
+#endif
 				*(uint64_t *)(chunk + nb_chunk_slots) = 0;
 				bkt->first_chunk = (uint64_t) chunk;
 			}
-- 
2.16.1



More information about the dev mailing list