[v2,1/6] event/octeontx2: add event eth Rx adapter support
Checks
Commit Message
From: Pavan Nikhilesh <pbhagavatula@marvell.com>
Add event eth Rx adapter capabilities, queue add and delete functions.
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
---
drivers/event/octeontx2/Makefile | 4 +-
drivers/event/octeontx2/meson.build | 2 +-
drivers/event/octeontx2/otx2_evdev.c | 4 +
drivers/event/octeontx2/otx2_evdev.h | 15 ++
drivers/event/octeontx2/otx2_evdev_adptr.c | 254 +++++++++++++++++++++
5 files changed, 276 insertions(+), 3 deletions(-)
Comments
> -----Original Message-----
> From: pbhagavatula@marvell.com <pbhagavatula@marvell.com>
> Sent: Monday, June 3, 2019 4:04 PM
> To: Jerin Jacob Kollanukkaran <jerinj@marvell.com>; Pavan Nikhilesh
> Bhagavatula <pbhagavatula@marvell.com>
> Cc: dev@dpdk.org
> Subject: [dpdk-dev] [PATCH v2 1/6] event/octeontx2: add event eth Rx adapter
> support
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> Add event eth Rx adapter capabilities, queue add and delete functions.
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
> Signed-off-by: Jerin Jacob <jerinj@marvell.com>
> ---
> drivers/event/octeontx2/Makefile | 4 +-
> drivers/event/octeontx2/meson.build | 2 +-
> drivers/event/octeontx2/otx2_evdev.c | 4 +
> drivers/event/octeontx2/otx2_evdev.h | 15 ++
> drivers/event/octeontx2/otx2_evdev_adptr.c | 254 +++++++++++++++++++++
> 5 files changed, 276 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/event/octeontx2/Makefile
> b/drivers/event/octeontx2/Makefile
> index d01da6b11..20d7c2fee 100644
> --- a/drivers/event/octeontx2/Makefile
> +++ b/drivers/event/octeontx2/Makefile
> @@ -40,7 +40,7 @@ SRCS-
> $(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) +=
> otx2_evdev_selftest.c
> SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) +=
> otx2_evdev_irq.c
>
> LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs -LDLIBS += -
> lrte_mempool -lrte_eventdev -lrte_mbuf -LDLIBS += -lrte_common_octeontx2 -
> lrte_mempool_octeontx2
> +LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf -lrte_ethdev LDLIBS
> ++= -lrte_common_octeontx2 -lrte_mempool_octeontx2 -lrte_pmd_octeontx2
As Ferruh suggested, We will remove the rte_pmd_octeontx2 dependency.
>
> include $(RTE_SDK)/mk/rte.lib.mk
> diff --git a/drivers/event/octeontx2/meson.build
> b/drivers/event/octeontx2/meson.build
> index bdb5beed6..e94bc5944 100644
> --- a/drivers/event/octeontx2/meson.build
> +++ b/drivers/event/octeontx2/meson.build
> @@ -26,4 +26,4 @@ foreach flag: extra_flags
> endif
> endforeach
>
> -deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2']
> +deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2',
> +'pmd_octeontx2']
As Ferruh suggested, We will remove the rte_pmd_octeontx2 dependency.
> diff --git a/drivers/event/octeontx2/o
tx2_evdev.c
> b/drivers/event/octeontx2/otx2_evdev.c
> index 534ac4a6b..2ddc007f3 100644
> --- a/drivers/event/octeontx2/otx2_evdev.c
> +++ b/drivers/event/octeontx2/otx2_evdev.c
> @@ -1118,6 +1118,10 @@ static struct rte_eventdev_ops otx2_sso_ops = {
> .port_unlink = otx2_sso_port_unlink,
> .timeout_ticks = otx2_sso_timeout_ticks,
>
> + .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get,
> + .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
> + .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
> +
> .timer_adapter_caps_get = otx2_tim_caps_get,
>
> .xstats_get = otx2_sso_xstats_get,
> diff --git a/drivers/event/octeontx2/otx2_evdev.h
> b/drivers/event/octeontx2/otx2_evdev.h
> index eeb65f03f..d1e99b9d9 100644
> --- a/drivers/event/octeontx2/otx2_evdev.h
> +++ b/drivers/event/octeontx2/otx2_evdev.h
> @@ -6,9 +6,12 @@
> #define __OTX2_EVDEV_H__
>
> #include <rte_eventdev.h>
> +#include <rte_eventdev_pmd.h>
> +#include <rte_event_eth_rx_adapter.h>
>
> #include "otx2_common.h"
> #include "otx2_dev.h"
> +#include "otx2_ethdev.h"
> #include "otx2_mempool.h"
>
> #define EVENTDEV_NAME_OCTEONTX2_PMD otx2_eventdev @@ -234,6
> +237,18 @@ void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
> uint32_t event_type);
> int sso_xae_reconfigure(struct rte_eventdev *event_dev); void
> sso_fastpath_fns_set(struct rte_eventdev *event_dev);
> +
> +int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
> + const struct rte_eth_dev *eth_dev,
> + uint32_t *caps);
> +int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
> + const struct rte_eth_dev *eth_dev,
> + int32_t rx_queue_id,
> + const struct rte_event_eth_rx_adapter_queue_conf
> *queue_conf); int
> +otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
> + const struct rte_eth_dev *eth_dev,
> + int32_t rx_queue_id);
> +
> /* Clean up API's */
> typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev); void
> ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, diff --git
> a/drivers/event/octeontx2/otx2_evdev_adptr.c
> b/drivers/event/octeontx2/otx2_evdev_adptr.c
> index 810722f89..1aef864fe 100644
> --- a/drivers/event/octeontx2/otx2_evdev_adptr.c
> +++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
> @@ -4,6 +4,197 @@
>
> #include "otx2_evdev.h"
>
> +int
> +otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
> + const struct rte_eth_dev *eth_dev, uint32_t *caps) {
> + int rc;
> +
> + RTE_SET_USED(event_dev);
> + rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
> + if (rc)
> + *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
> + else
> + *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
> +
> + return 0;
> +}
> +
> +static inline int
> +sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t
> ggrp,
> + uint16_t eth_port_id)
> +{
> + struct otx2_mbox *mbox = dev->mbox;
> + struct nix_aq_enq_req *aq;
> + int rc;
> +
> + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
> + aq->qidx = qid;
> + aq->ctype = NIX_AQ_CTYPE_CQ;
> + aq->op = NIX_AQ_INSTOP_WRITE;
> +
> + aq->cq.ena = 0;
> + aq->cq.caching = 0;
> +
> + otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
> + aq->cq_mask.ena = ~(aq->cq_mask.ena);
> + aq->cq_mask.caching = ~(aq->cq_mask.caching);
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0) {
> + otx2_err("failed to disable cq context");
s/failed/Failed
> + goto fail;
> + }
> +
> + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
> + aq->qidx = qid;
> + aq->ctype = NIX_AQ_CTYPE_RQ;
> + aq->op = NIX_AQ_INSTOP_WRITE;
> +
> + aq->rq.sso_ena = 1;
> + aq->rq.sso_tt = tt;
> + aq->rq.sso_grp = ggrp;
> + aq->rq.ena_wqwd = 1;
> + /* Mbuf Header generation :
> + * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
> + * it already has data related to mbuf size, headroom, private area.
> + * > Using WQE_SKIP we can directly assign
> + * mbuf = wqe - sizeof(struct mbuf);
> + * so that mbuf header will not have unpredicted values while headroom
> + * and private data starts at the begining of wqe_data.
> + */
> + aq->rq.wqe_skip = 1;
> + aq->rq.wqe_caching = 1;
> + aq->rq.spb_ena = 0;
> + aq->rq.flow_tagw = 20; /* 20-bits */
> +
> + /* Flow Tag calculation :
> + *
> + * rq_tag <31:24> = good/bad_tag<8:0>;
> + * rq_tag <23:0> = [ltag]
> + *
> + * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
> + * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask &
> flow_tag);
> + *
> + * Setup :
> + * ltag<23:0> = (eth_port_id & 0xF) << 20;
> + * good/bad_tag<8:0> =
> + * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
> + *
> + * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
> + * <27:20> (eth_port_id) | <20:0> [TAG]
> + */
> +
> + aq->rq.ltag = (eth_port_id & 0xF) << 20;
> + aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
> + (RTE_EVENT_TYPE_ETHDEV << 4);
> + aq->rq.bad_utag = aq->rq.good_utag;
> +
> + aq->rq.ena = 1;
> + aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
> + aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
> +
> + otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
> + /* mask the bits to write. */
> + aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
> + aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
> + aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
> + aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
> + aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
> + aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
> + aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
> + aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
> + aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
> + aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
> + aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
> + aq->rq_mask.ena = ~(aq->rq_mask.ena);
> + aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
> + aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0) {
> + otx2_err("failed to init rx adapter context");
s/failed/Failed
> + goto fail;
> + }
> +
> + return 0;
> +fail:
> + return rc;
> +}
> +
> +static inline int
> +sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid) {
> + struct otx2_mbox *mbox = dev->mbox;
> + struct nix_aq_enq_req *aq;
> + int rc;
> +
> + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
> + aq->qidx = qid;
> + aq->ctype = NIX_AQ_CTYPE_CQ;
> + aq->op = NIX_AQ_INSTOP_INIT;
> +
> + aq->cq.ena = 1;
> + aq->cq.caching = 1;
> +
> + otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
> + aq->cq_mask.ena = ~(aq->cq_mask.ena);
> + aq->cq_mask.caching = ~(aq->cq_mask.caching);
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0) {
> + otx2_err("failed to init cq context");
s/failed/Failed
> + goto fail;
> + }
> +
> + aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
> + aq->qidx = qid;
> + aq->ctype = NIX_AQ_CTYPE_RQ;
> + aq->op = NIX_AQ_INSTOP_WRITE;
> +
> + aq->rq.sso_ena = 0;
> + aq->rq.sso_tt = SSO_TT_UNTAGGED;
> + aq->rq.sso_grp = 0;
> + aq->rq.ena_wqwd = 0;
> + aq->rq.wqe_caching = 0;
> + aq->rq.wqe_skip = 0;
> + aq->rq.spb_ena = 0;
> + aq->rq.flow_tagw = 0x20;
> + aq->rq.ltag = 0;
> + aq->rq.good_utag = 0;
> + aq->rq.bad_utag = 0;
> + aq->rq.ena = 1;
> + aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
> + aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
> +
> + otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
> + /* mask the bits to write. */
> + aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
> + aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
> + aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
> + aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
> + aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
> + aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
> + aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
> + aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
> + aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
> + aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
> + aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
> + aq->rq_mask.ena = ~(aq->rq_mask.ena);
> + aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
> + aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
> +
> + rc = otx2_mbox_process(mbox);
> + if (rc < 0) {
> + otx2_err("failed to clear rx adapter context");
s/failed/Failed
> + goto fail;
> + }
> +
> + return 0;
> +fail:
> + return rc;
> +}
> +
> void
> sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t
> event_type) { @@ -17,3 +208,66 @@ sso_updt_xae_cnt(struct otx2_sso_evdev
> *dev, void *data, uint32_t event_type)
> break;
> }
> }
> 2.21.0
@@ -40,7 +40,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_selftest.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_irq.c
LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs
-LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf
-LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2
+LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf -lrte_ethdev
+LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2 -lrte_pmd_octeontx2
include $(RTE_SDK)/mk/rte.lib.mk
@@ -26,4 +26,4 @@ foreach flag: extra_flags
endif
endforeach
-deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2']
+deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2']
@@ -1118,6 +1118,10 @@ static struct rte_eventdev_ops otx2_sso_ops = {
.port_unlink = otx2_sso_port_unlink,
.timeout_ticks = otx2_sso_timeout_ticks,
+ .eth_rx_adapter_caps_get = otx2_sso_rx_adapter_caps_get,
+ .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
+ .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
+
.timer_adapter_caps_get = otx2_tim_caps_get,
.xstats_get = otx2_sso_xstats_get,
@@ -6,9 +6,12 @@
#define __OTX2_EVDEV_H__
#include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_event_eth_rx_adapter.h>
#include "otx2_common.h"
#include "otx2_dev.h"
+#include "otx2_ethdev.h"
#include "otx2_mempool.h"
#define EVENTDEV_NAME_OCTEONTX2_PMD otx2_eventdev
@@ -234,6 +237,18 @@ void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
uint32_t event_type);
int sso_xae_reconfigure(struct rte_eventdev *event_dev);
void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
+
+int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ uint32_t *caps);
+int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id);
+
/* Clean up API's */
typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
@@ -4,6 +4,197 @@
#include "otx2_evdev.h"
+int
+otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+ int rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+ else
+ *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+ return 0;
+}
+
+static inline int
+sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
+ uint16_t eth_port_id)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->cq.ena = 0;
+ aq->cq.caching = 0;
+
+ otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+ aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("failed to disable cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.sso_ena = 1;
+ aq->rq.sso_tt = tt;
+ aq->rq.sso_grp = ggrp;
+ aq->rq.ena_wqwd = 1;
+ /* Mbuf Header generation :
+ * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
+ * it already has data related to mbuf size, headroom, private area.
+ * > Using WQE_SKIP we can directly assign
+ * mbuf = wqe - sizeof(struct mbuf);
+ * so that mbuf header will not have unpredicted values while headroom
+ * and private data starts at the begining of wqe_data.
+ */
+ aq->rq.wqe_skip = 1;
+ aq->rq.wqe_caching = 1;
+ aq->rq.spb_ena = 0;
+ aq->rq.flow_tagw = 20; /* 20-bits */
+
+ /* Flow Tag calculation :
+ *
+ * rq_tag <31:24> = good/bad_tag<8:0>;
+ * rq_tag <23:0> = [ltag]
+ *
+ * flow_tag_mask<31:0> = (1 << flow_tagw) - 1; <31:20>
+ * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
+ *
+ * Setup :
+ * ltag<23:0> = (eth_port_id & 0xF) << 20;
+ * good/bad_tag<8:0> =
+ * ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
+ *
+ * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
+ * <27:20> (eth_port_id) | <20:0> [TAG]
+ */
+
+ aq->rq.ltag = (eth_port_id & 0xF) << 20;
+ aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
+ (RTE_EVENT_TYPE_ETHDEV << 4);
+ aq->rq.bad_utag = aq->rq.good_utag;
+
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+ otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+ /* mask the bits to write. */
+ aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
+ aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
+ aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
+ aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
+ aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
+ aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
+ aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
+ aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
+ aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
+ aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
+ aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+ aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
+ aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("failed to init rx adapter context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
+static inline int
+sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
+{
+ struct otx2_mbox *mbox = dev->mbox;
+ struct nix_aq_enq_req *aq;
+ int rc;
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_CQ;
+ aq->op = NIX_AQ_INSTOP_INIT;
+
+ aq->cq.ena = 1;
+ aq->cq.caching = 1;
+
+ otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+ aq->cq_mask.ena = ~(aq->cq_mask.ena);
+ aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("failed to init cq context");
+ goto fail;
+ }
+
+ aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+ aq->qidx = qid;
+ aq->ctype = NIX_AQ_CTYPE_RQ;
+ aq->op = NIX_AQ_INSTOP_WRITE;
+
+ aq->rq.sso_ena = 0;
+ aq->rq.sso_tt = SSO_TT_UNTAGGED;
+ aq->rq.sso_grp = 0;
+ aq->rq.ena_wqwd = 0;
+ aq->rq.wqe_caching = 0;
+ aq->rq.wqe_skip = 0;
+ aq->rq.spb_ena = 0;
+ aq->rq.flow_tagw = 0x20;
+ aq->rq.ltag = 0;
+ aq->rq.good_utag = 0;
+ aq->rq.bad_utag = 0;
+ aq->rq.ena = 1;
+ aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+ aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+ otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+ /* mask the bits to write. */
+ aq->rq_mask.sso_ena = ~(aq->rq_mask.sso_ena);
+ aq->rq_mask.sso_tt = ~(aq->rq_mask.sso_tt);
+ aq->rq_mask.sso_grp = ~(aq->rq_mask.sso_grp);
+ aq->rq_mask.ena_wqwd = ~(aq->rq_mask.ena_wqwd);
+ aq->rq_mask.wqe_caching = ~(aq->rq_mask.wqe_caching);
+ aq->rq_mask.wqe_skip = ~(aq->rq_mask.wqe_skip);
+ aq->rq_mask.spb_ena = ~(aq->rq_mask.spb_ena);
+ aq->rq_mask.flow_tagw = ~(aq->rq_mask.flow_tagw);
+ aq->rq_mask.ltag = ~(aq->rq_mask.ltag);
+ aq->rq_mask.good_utag = ~(aq->rq_mask.good_utag);
+ aq->rq_mask.bad_utag = ~(aq->rq_mask.bad_utag);
+ aq->rq_mask.ena = ~(aq->rq_mask.ena);
+ aq->rq_mask.pb_caching = ~(aq->rq_mask.pb_caching);
+ aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+ rc = otx2_mbox_process(mbox);
+ if (rc < 0) {
+ otx2_err("failed to clear rx adapter context");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ return rc;
+}
+
void
sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
{
@@ -17,3 +208,66 @@ sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
break;
}
}
+
+int
+otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+ uint16_t port = eth_dev->data->port_id;
+ int i, rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ return -EINVAL;
+
+ if (rx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
+ rc |= sso_rxq_enable(otx2_eth_dev, i,
+ queue_conf->ev.sched_type,
+ queue_conf->ev.queue_id, port);
+ }
+ } else {
+ rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
+ queue_conf->ev.sched_type,
+ queue_conf->ev.queue_id, port);
+ }
+
+ if (rc < 0) {
+ otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
+ queue_conf->ev.queue_id);
+ return rc;
+ }
+
+ return 0;
+}
+
+int
+otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+ const struct rte_eth_dev *eth_dev,
+ int32_t rx_queue_id)
+{
+ struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+ int i, rc;
+
+ RTE_SET_USED(event_dev);
+ rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+ if (rc)
+ return -EINVAL;
+
+ if (rx_queue_id < 0) {
+ for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
+ rc = sso_rxq_disable(dev, i);
+ } else {
+ rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+ }
+
+ if (rc < 0)
+ otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
+ eth_dev->data->port_id, rx_queue_id);
+
+ return rc;
+}