[PATCH v1 7/7] event/cnxk: support DMA event functions

Amit Prakash Shukla amitprakashs at marvell.com
Tue Sep 19 15:42:22 CEST 2023


Added support of dma driver callback assignment to eventdev
enqueue and dequeue. The change also defines dma adapter
capabilities function.

Signed-off-by: Amit Prakash Shukla <amitprakashs at marvell.com>
---
 drivers/event/cnxk/cn10k_eventdev.c | 20 ++++++++++++++++++++
 drivers/event/cnxk/cn10k_worker.h   |  3 +++
 drivers/event/cnxk/cn9k_eventdev.c  | 17 +++++++++++++++++
 drivers/event/cnxk/cn9k_worker.h    |  3 +++
 drivers/event/cnxk/meson.build      |  3 +--
 5 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c
index c5d4be0474..9bb8b8ff01 100644
--- a/drivers/event/cnxk/cn10k_eventdev.c
+++ b/drivers/event/cnxk/cn10k_eventdev.c
@@ -8,6 +8,9 @@
 #include "cn10k_cryptodev_ops.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
+
+#include <rte_dmadev_pmd.h>
 
 #define CN10K_SET_EVDEV_DEQ_OP(dev, deq_op, deq_ops)                           \
 	deq_op = deq_ops[dev->rx_offloads & (NIX_RX_OFFLOAD_MAX - 1)]
@@ -469,6 +472,8 @@ cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 	else
 		event_dev->ca_enqueue = cn10k_cpt_sg_ver1_crypto_adapter_enqueue;
 
+	event_dev->dma_enqueue = cn10k_dma_adapter_enqueue;
+
 	if (dev->tx_offloads & NIX_TX_MULTI_SEG_F)
 		CN10K_SET_EVDEV_ENQ_OP(dev, event_dev->txa_enqueue, sso_hws_tx_adptr_enq_seg);
 	else
@@ -978,6 +983,19 @@ cn10k_crypto_adapter_vec_limits(const struct rte_eventdev *event_dev,
 	return 0;
 }
 
+static int
+cn10k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			   const struct rte_dma_dev *dma_dev, uint32_t *caps)
+{
+	RTE_SET_USED(dma_dev);
+
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn10k", EINVAL);
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
 static struct eventdev_ops cn10k_sso_dev_ops = {
 	.dev_infos_get = cn10k_sso_info_get,
 	.dev_configure = cn10k_sso_dev_configure,
@@ -1017,6 +1035,8 @@ static struct eventdev_ops cn10k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_del = cn10k_crypto_adapter_qp_del,
 	.crypto_adapter_vector_limits_get = cn10k_crypto_adapter_vec_limits,
 
+	.dma_adapter_caps_get = cn10k_dma_adapter_caps_get,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn10k_worker.h b/drivers/event/cnxk/cn10k_worker.h
index e71ab3c523..3d35fcb657 100644
--- a/drivers/event/cnxk/cn10k_worker.h
+++ b/drivers/event/cnxk/cn10k_worker.h
@@ -7,6 +7,7 @@
 
 #include <rte_eventdev.h>
 #include "cn10k_cryptodev_event_dp.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn10k_rx.h"
 #include "cnxk_worker.h"
 #include "cn10k_eventdev.h"
@@ -226,6 +227,8 @@ cn10k_sso_hws_post_process(struct cn10k_sso_hws *ws, uint64_t *u64,
 		/* Mark vector mempool object as get */
 		RTE_MEMPOOL_CHECK_COOKIES(rte_mempool_from_obj((void *)u64[1]),
 					  (void **)&u64[1], 1, 1);
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c
index f77a9d7085..980932bd12 100644
--- a/drivers/event/cnxk/cn9k_eventdev.c
+++ b/drivers/event/cnxk/cn9k_eventdev.c
@@ -510,6 +510,8 @@ cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
 					      sso_hws_dual_tx_adptr_enq);
 	}
 
+	event_dev->dma_enqueue = cn9k_dma_adapter_enqueue;
+
 	event_dev->txa_enqueue_same_dest = event_dev->txa_enqueue;
 	rte_mb();
 #else
@@ -991,6 +993,19 @@ cn9k_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
 				 cn9k_sso_set_priv_mem);
 }
 
+static int
+cn9k_dma_adapter_caps_get(const struct rte_eventdev *event_dev,
+			  const struct rte_dma_dev *dma_dev, uint32_t *caps)
+{
+	RTE_SET_USED(dma_dev);
+
+	CNXK_VALID_DEV_OR_ERR_RET(event_dev->dev, "event_cn9k");
+
+	*caps = RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD;
+
+	return 0;
+}
+
 static struct eventdev_ops cn9k_sso_dev_ops = {
 	.dev_infos_get = cn9k_sso_info_get,
 	.dev_configure = cn9k_sso_dev_configure,
@@ -1027,6 +1042,8 @@ static struct eventdev_ops cn9k_sso_dev_ops = {
 	.crypto_adapter_queue_pair_add = cn9k_crypto_adapter_qp_add,
 	.crypto_adapter_queue_pair_del = cn9k_crypto_adapter_qp_del,
 
+	.dma_adapter_caps_get = cn9k_dma_adapter_caps_get,
+
 	.xstats_get = cnxk_sso_xstats_get,
 	.xstats_reset = cnxk_sso_xstats_reset,
 	.xstats_get_names = cnxk_sso_xstats_get_names,
diff --git a/drivers/event/cnxk/cn9k_worker.h b/drivers/event/cnxk/cn9k_worker.h
index 9ddab095ac..6ac6fffc86 100644
--- a/drivers/event/cnxk/cn9k_worker.h
+++ b/drivers/event/cnxk/cn9k_worker.h
@@ -11,6 +11,7 @@
 #include "cnxk_ethdev.h"
 #include "cnxk_eventdev.h"
 #include "cnxk_worker.h"
+#include "cnxk_dma_event_dp.h"
 #include "cn9k_cryptodev_ops.h"
 
 #include "cn9k_ethdev.h"
@@ -214,6 +215,8 @@ cn9k_sso_hws_post_process(uint64_t *u64, uint64_t mbuf, const uint32_t flags,
 		if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
 			cn9k_sso_process_tstamp(u64[1], mbuf, tstamp[port]);
 		u64[1] = mbuf;
+	} else if (CNXK_EVENT_TYPE_FROM_TAG(u64[0]) == RTE_EVENT_TYPE_DMADEV) {
+		u64[1] = cnxk_dma_adapter_dequeue(u64[1]);
 	}
 }
 
diff --git a/drivers/event/cnxk/meson.build b/drivers/event/cnxk/meson.build
index 51f1be8848..649419d5d3 100644
--- a/drivers/event/cnxk/meson.build
+++ b/drivers/event/cnxk/meson.build
@@ -314,8 +314,7 @@ foreach flag: extra_flags
     endif
 endforeach
 
-deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk']
-
+deps += ['bus_pci', 'common_cnxk', 'net_cnxk', 'crypto_cnxk', 'dma_cnxk']
 require_iova_in_mbuf = false
 
 annotate_locks = false
-- 
2.25.1



More information about the dev mailing list