[dpdk-dev] [PATCH 10/13] net/sfc: implement EF10 native Tx datapath

Andrew Rybchenko arybchenko at solarflare.com
Thu Mar 2 08:07:16 CET 2017


Signed-off-by: Andrew Rybchenko <arybchenko at solarflare.com>
---
 doc/guides/nics/sfc_efx.rst   |   5 +-
 drivers/net/sfc/Makefile      |   1 +
 drivers/net/sfc/sfc_dp_tx.h   |  17 ++
 drivers/net/sfc/sfc_ef10_tx.c | 439 ++++++++++++++++++++++++++++++++++++++++++
 drivers/net/sfc/sfc_ethdev.c  |   1 +
 drivers/net/sfc/sfc_ev.c      |  15 +-
 drivers/net/sfc/sfc_kvargs.h  |   3 +-
 drivers/net/sfc/sfc_tx.c      |   5 +
 8 files changed, 483 insertions(+), 3 deletions(-)
 create mode 100644 drivers/net/sfc/sfc_ef10_tx.c

diff --git a/doc/guides/nics/sfc_efx.rst b/doc/guides/nics/sfc_efx.rst
index e864ccc..ed0a59f 100644
--- a/doc/guides/nics/sfc_efx.rst
+++ b/doc/guides/nics/sfc_efx.rst
@@ -191,13 +191,16 @@ boolean parameters value.
   more efficient than libefx-based and provides richer packet type
   classification, but lacks Rx scatter support.
 
-- ``tx_datapath`` [auto|efx] (default **auto**)
+- ``tx_datapath`` [auto|efx|ef10] (default **auto**)
 
   Choose transmit datapath implementation.
   **auto** allows the driver itself to make a choice based on firmware
   features available and required by the datapath implementation.
   **efx** chooses libefx-based datapath which supports VLAN insertion
   (full-feature firmware variant only), TSO and multi-segment mbufs.
+  **ef10** chooses EF10 (SFN7xxx, SFN8xxx) native datapath which is
+  more efficient than libefx-based but has no VLAN insertion and TSO
+  support yet.
 
 - ``perf_profile`` [auto|throughput|low-latency] (default **throughput**)
 
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index 3c15722..bb7dcb2 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -92,6 +92,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tx.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_tso.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_dp.c
 SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_rx.c
+SRCS-$(CONFIG_RTE_LIBRTE_SFC_EFX_PMD) += sfc_ef10_tx.c
 
 VPATH += $(SRCDIR)/base
 
diff --git a/drivers/net/sfc/sfc_dp_tx.h b/drivers/net/sfc/sfc_dp_tx.h
index b6b7084..8c74428 100644
--- a/drivers/net/sfc/sfc_dp_tx.h
+++ b/drivers/net/sfc/sfc_dp_tx.h
@@ -74,6 +74,16 @@ struct sfc_dp_tx_qcreate_args {
 	unsigned int		flags;
 	/** Tx queue size */
 	unsigned int		txq_entries;
+	/** DMA-mapped Tx descriptors ring */
+	void			*txq_hw_ring;
+	/** Associated event queue size */
+	unsigned int		evq_entries;
+	/** Hardware event ring */
+	void			*evq_hw_ring;
+	/** The queue index in hardware (required to push right doorbell) */
+	unsigned int		hw_index;
+	/** Virtual address of the memory-mapped BAR to push Tx doorbell */
+	volatile void		*mem_bar;
 };
 
 /**
@@ -115,6 +125,11 @@ typedef void (sfc_dp_tx_qstop_t)(struct sfc_dp_txq *dp_txq,
 				 unsigned int *evq_read_ptr);
 
 /**
+ * Transmit event handler used during queue flush only.
+ */
+typedef bool (sfc_dp_tx_qtx_ev_t)(struct sfc_dp_txq *dp_txq, unsigned int id);
+
+/**
  * Transmit queue function called after the queue flush.
  */
 typedef void (sfc_dp_tx_qreap_t)(struct sfc_dp_txq *dp_txq);
@@ -130,6 +145,7 @@ struct sfc_dp_tx {
 	sfc_dp_tx_qdestroy_t		*qdestroy;
 	sfc_dp_tx_qstart_t		*qstart;
 	sfc_dp_tx_qstop_t		*qstop;
+	sfc_dp_tx_qtx_ev_t		*qtx_ev;
 	sfc_dp_tx_qreap_t		*qreap;
 	eth_tx_burst_t			pkt_burst;
 };
@@ -151,6 +167,7 @@ struct sfc_dp_tx {
 }
 
 extern struct sfc_dp_tx sfc_efx_tx;
+extern struct sfc_dp_tx sfc_ef10_tx;
 
 #ifdef __cplusplus
 }
diff --git a/drivers/net/sfc/sfc_ef10_tx.c b/drivers/net/sfc/sfc_ef10_tx.c
new file mode 100644
index 0000000..8718961
--- /dev/null
+++ b/drivers/net/sfc/sfc_ef10_tx.c
@@ -0,0 +1,439 @@
+/*-
+ * Copyright (c) 2016 Solarflare Communications Inc.
+ * All rights reserved.
+ *
+ * This software was jointly developed between OKTET Labs (under contract
+ * for Solarflare) and Solarflare Communications, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ *    this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ *    this list of conditions and the following disclaimer in the documentation
+ *    and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdbool.h>
+
+#include <rte_mbuf.h>
+#include <rte_io.h>
+
+#include "efx.h"
+#include "efx_types.h"
+#include "efx_regs.h"
+#include "efx_regs_ef10.h"
+
+#include "sfc_dp_tx.h"
+#include "sfc_tweak.h"
+#include "sfc_kvargs.h"
+
+/** Maximum length of the mbuf segment data */
+#define SFC_MBUF_SEG_LEN_MAX \
+	((1u << (8 * sizeof(((struct rte_mbuf *)0)->data_len))) - 1)
+
+/** Maximum length of the DMA descriptor data */
+#define SFC_EF10_TX_DMA_DESC_LEN_MAX \
+	((1u << ESF_DZ_TX_KER_BYTE_CNT_WIDTH) - 1)
+
+/** Maximum number of DMA descriptors per mbuf segment */
+#define SFC_EF10_TX_MBUF_SEG_DESCS_MAX \
+	SFC_DIV_ROUND_UP(SFC_MBUF_SEG_LEN_MAX, \
+			 SFC_EF10_TX_DMA_DESC_LEN_MAX)
+
+struct sfc_ef10_tx_sw_desc {
+	struct rte_mbuf			*mbuf;
+};
+
+struct sfc_ef10_txq {
+	unsigned int			flags;
+#define SFC_EF10_TXQ_STARTED		0x1
+#define SFC_EF10_TXQ_NOT_RUNNING	0x2
+#define SFC_EF10_TXQ_EXCEPTION		0x4
+
+	unsigned int			ptr_mask;
+	unsigned int			added;
+	unsigned int			completed;
+	unsigned int			free_thresh;
+	unsigned int			evq_read_ptr;
+	struct sfc_ef10_tx_sw_desc	*sw_ring;
+	efx_qword_t			*txq_hw_ring;
+	volatile void			*doorbell;
+	volatile efx_qword_t		*evq_hw_ring;
+
+	/* Datapath transmit queue anchor */
+	struct sfc_dp_txq		dp;
+	void				*ctrl;
+	sfc_dp_exception_t		*exception;
+};
+
+static inline struct sfc_ef10_txq *
+sfc_ef10_txq_by_dp_txq(struct sfc_dp_txq *dp_txq)
+{
+	return container_of(dp_txq, struct sfc_ef10_txq, dp);
+}
+
+static bool
+sfc_ef10_tx_get_event(volatile efx_qword_t *evq_hw_ring,
+		      unsigned int *read_ptr, const unsigned int ptr_mask,
+		      unsigned int *flags, efx_qword_t *tx_ev)
+{
+	if (unlikely(*flags & SFC_EF10_TXQ_EXCEPTION))
+		return false;
+
+	*tx_ev = evq_hw_ring[*read_ptr & ptr_mask];
+
+	if (tx_ev->eq_u64[0] == UINT64_MAX)
+		return false;
+
+	if (unlikely(EFX_QWORD_FIELD(*tx_ev, FSF_AZ_EV_CODE) !=
+		     FSE_AZ_EV_CODE_TX_EV)) {
+		/* Do not move read_ptr to keep the event for exception
+		 * handling
+		 */
+		*flags |= SFC_EF10_TXQ_EXCEPTION;
+		return false;
+	}
+
+	++(*read_ptr);
+	return true;
+}
+
+static void
+sfc_ef10_tx_reap(struct sfc_ef10_txq *txq)
+{
+	volatile efx_qword_t * const evq_hw_ring = txq->evq_hw_ring;
+	unsigned int old_read_ptr = txq->evq_read_ptr;
+	unsigned int evq_read_ptr = old_read_ptr;
+	const unsigned int ptr_mask = txq->ptr_mask;
+	unsigned int completed = txq->completed;
+	unsigned int pending = completed;
+	const unsigned int curr_done = pending - 1;
+	unsigned int anew_done = curr_done;
+	efx_qword_t tx_ev;
+
+	while (sfc_ef10_tx_get_event(evq_hw_ring, &evq_read_ptr,
+				     ptr_mask, &txq->flags, &tx_ev)) {
+		if (EFX_TEST_QWORD_BIT(tx_ev, ESF_DZ_TX_DROP_EVENT_LBN))
+			continue;
+
+		/* Update the latest done descriptor */
+		anew_done = EFX_QWORD_FIELD(tx_ev, ESF_DZ_TX_DESCR_INDX);
+	}
+	pending += (anew_done - curr_done) & ptr_mask;
+
+	if (pending != completed) {
+		do {
+			struct sfc_ef10_tx_sw_desc *txd;
+
+			txd = &txq->sw_ring[completed & ptr_mask];
+
+			if (txd->mbuf != NULL) {
+				rte_pktmbuf_free(txd->mbuf);
+				txd->mbuf = NULL;
+			}
+		} while (++completed != pending);
+
+		txq->completed = completed;
+	}
+
+	if (old_read_ptr != evq_read_ptr) {
+		do {
+			EFX_SET_QWORD(evq_hw_ring[old_read_ptr & ptr_mask]);
+		} while (++old_read_ptr != evq_read_ptr);
+
+		txq->evq_read_ptr = evq_read_ptr;
+	}
+}
+
+static void
+sfc_ef10_tx_qdesc_dma_create(phys_addr_t addr, uint16_t size, bool eop,
+			     efx_qword_t *edp)
+{
+	EFX_POPULATE_QWORD_4(*edp,
+			     ESF_DZ_TX_KER_TYPE, 0,
+			     ESF_DZ_TX_KER_CONT, !eop,
+			     ESF_DZ_TX_KER_BYTE_CNT, size,
+			     ESF_DZ_TX_KER_BUF_ADDR, addr);
+}
+
+static inline void
+sfc_ef10_tx_qpush(struct sfc_ef10_txq *txq, unsigned int added,
+		  unsigned int pushed)
+{
+	efx_qword_t desc;
+	efx_oword_t oword;
+
+	/*
+	 * This improves performance by pushing a TX descriptor at the same
+	 * time as the doorbell. The descriptor must be added to the TXQ,
+	 * so that can be used if the hardware decides not to use the pushed
+	 * descriptor.
+	 */
+	desc.eq_u64[0] = txq->txq_hw_ring[pushed & txq->ptr_mask].eq_u64[0];
+	EFX_POPULATE_OWORD_3(oword,
+		ERF_DZ_TX_DESC_WPTR, added & txq->ptr_mask,
+		ERF_DZ_TX_DESC_HWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_1),
+		ERF_DZ_TX_DESC_LWORD, EFX_QWORD_FIELD(desc, EFX_DWORD_0));
+
+	/* Make sure that all descriptor update (Tx and event) reach memory */
+	rte_wmb();
+
+	/* DMA sync to device is not required */
+
+	*(volatile __m128i *)txq->doorbell = oword.eo_u128[0];
+	rte_io_wmb();
+}
+
+static uint16_t
+sfc_ef10_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+	struct sfc_ef10_txq * const txq = sfc_ef10_txq_by_dp_txq(tx_queue);
+	unsigned int ptr_mask;
+	unsigned int added;
+	unsigned int dma_desc_space;
+	bool reap_done;
+	struct rte_mbuf **pktp;
+	struct rte_mbuf **pktp_end;
+
+	/* Exception handling may restart the TxQ so cache nothing before */
+	if (unlikely(txq->flags &
+		     (SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION))) {
+		if (txq->flags & SFC_EF10_TXQ_EXCEPTION)
+			txq->exception(txq->ctrl);
+		if (txq->flags & SFC_EF10_TXQ_NOT_RUNNING)
+			return 0;
+	}
+
+	ptr_mask = txq->ptr_mask;
+	added = txq->added;
+	dma_desc_space = EFX_TXQ_LIMIT(ptr_mask + 1) -
+			 (added - txq->completed);
+
+	reap_done = (dma_desc_space < txq->free_thresh);
+	if (reap_done) {
+		sfc_ef10_tx_reap(txq);
+		dma_desc_space = EFX_TXQ_LIMIT(ptr_mask + 1) -
+				 (added - txq->completed);
+	}
+
+	for (pktp = &tx_pkts[0], pktp_end = &tx_pkts[nb_pkts];
+	     pktp != pktp_end;
+	     ++pktp) {
+		struct rte_mbuf *m_seg = *pktp;
+		unsigned int pkt_start = added;
+		uint32_t pkt_len;
+
+		if (likely(pktp + 1 != pktp_end))
+			rte_mbuf_prefetch_part1(pktp[1]);
+
+		if (m_seg->nb_segs * SFC_EF10_TX_MBUF_SEG_DESCS_MAX >
+		    dma_desc_space) {
+			if (reap_done)
+				break;
+
+			sfc_ef10_tx_reap(txq);
+			reap_done = true;
+			dma_desc_space = EFX_TXQ_LIMIT(ptr_mask + 1) -
+				(added - txq->completed);
+			if (m_seg->nb_segs * SFC_EF10_TX_MBUF_SEG_DESCS_MAX >
+			    dma_desc_space)
+				break;
+		}
+
+		pkt_len = m_seg->pkt_len;
+		do {
+			phys_addr_t seg_addr = rte_mbuf_data_dma_addr(m_seg);
+			unsigned int seg_len = rte_pktmbuf_data_len(m_seg);
+
+			SFC_ASSERT(seg_len <= SFC_EF10_TX_DMA_DESC_LEN_MAX);
+
+			pkt_len -= seg_len;
+
+			sfc_ef10_tx_qdesc_dma_create(seg_addr,
+				seg_len, (pkt_len == 0),
+				&txq->txq_hw_ring[added & ptr_mask]);
+			++added;
+
+		} while ((m_seg = m_seg->next) != 0);
+
+		dma_desc_space -= (added - pkt_start);
+
+		/* Assign mbuf to the last used desc */
+		txq->sw_ring[(added - 1) & ptr_mask].mbuf = *pktp;
+	}
+
+	if (likely(added != txq->added)) {
+		sfc_ef10_tx_qpush(txq, added, txq->added);
+		txq->added = added;
+	}
+
+#if SFC_TX_XMIT_PKTS_REAP_AT_LEAST_ONCE
+	if (!reap_done)
+		sfc_ef10_tx_reap(txq);
+#endif
+
+	return pktp - &tx_pkts[0];
+}
+
+static void *
+sfc_ef10_txq_get_ctrl(struct sfc_dp_txq *dp_txq)
+{
+	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+	return txq->ctrl;
+}
+
+static const struct sfc_dp_txq_ops sfc_ef10_txq_ops = {
+	.get_ctrl	= sfc_ef10_txq_get_ctrl,
+};
+
+static sfc_dp_tx_qcreate_t sfc_ef10_tx_qcreate;
+static int
+sfc_ef10_tx_qcreate(void *ctrl, sfc_dp_exception_t *exception, int socket_id,
+		    const struct sfc_dp_tx_qcreate_args *args,
+		    struct sfc_dp_txq **dp_txqp)
+{
+	struct sfc_ef10_txq *txq;
+	int rc;
+
+	rc = EINVAL;
+	if (args->txq_entries != args->evq_entries)
+		goto fail_bad_args;
+
+	rc = ENOMEM;
+	txq = rte_zmalloc_socket("sfc-ef10-txq", sizeof(*txq),
+				 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq == NULL)
+		goto fail_txq_alloc;
+
+	rc = ENOMEM;
+	txq->sw_ring = rte_calloc_socket("sfc-ef10-txq-sw_ring",
+					 args->txq_entries,
+					 sizeof(*txq->sw_ring),
+					 RTE_CACHE_LINE_SIZE, socket_id);
+	if (txq->sw_ring == NULL)
+		goto fail_sw_ring_alloc;
+
+	txq->flags = SFC_EF10_TXQ_NOT_RUNNING;
+	txq->ptr_mask = args->txq_entries - 1;
+	txq->free_thresh = args->free_thresh;
+	txq->txq_hw_ring = args->txq_hw_ring;
+	txq->doorbell = (volatile uint8_t *)args->mem_bar +
+			ER_DZ_TX_DESC_UPD_REG_OFST +
+			args->hw_index * ER_DZ_TX_DESC_UPD_REG_STEP;
+	txq->evq_hw_ring = args->evq_hw_ring;
+
+	txq->dp.ops = &sfc_ef10_txq_ops;
+	txq->ctrl = ctrl;
+	txq->exception = exception;
+
+	*dp_txqp = &txq->dp;
+	return 0;
+
+fail_sw_ring_alloc:
+	rte_free(txq);
+
+fail_txq_alloc:
+fail_bad_args:
+	return rc;
+}
+
+static sfc_dp_tx_qdestroy_t sfc_ef10_tx_qdestroy;
+static void
+sfc_ef10_tx_qdestroy(struct sfc_dp_txq *dp_txq)
+{
+	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+	rte_free(txq->sw_ring);
+	rte_free(txq);
+}
+
+static sfc_dp_tx_qstart_t sfc_ef10_tx_qstart;
+static int
+sfc_ef10_tx_qstart(struct sfc_dp_txq *dp_txq, unsigned int evq_read_ptr,
+		   unsigned int txq_desc_index)
+{
+	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+	txq->evq_read_ptr = evq_read_ptr;
+	txq->added = txq->completed = txq_desc_index;
+
+	txq->flags |= SFC_EF10_TXQ_STARTED;
+	txq->flags &= ~(SFC_EF10_TXQ_NOT_RUNNING | SFC_EF10_TXQ_EXCEPTION);
+
+	return 0;
+}
+
+static sfc_dp_tx_qstop_t sfc_ef10_tx_qstop;
+static void
+sfc_ef10_tx_qstop(struct sfc_dp_txq *dp_txq, unsigned int *evq_read_ptr)
+{
+	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+	txq->flags |= SFC_EF10_TXQ_NOT_RUNNING;
+
+	*evq_read_ptr = txq->evq_read_ptr;
+}
+
+static sfc_dp_tx_qtx_ev_t sfc_ef10_tx_qtx_ev;
+static bool
+sfc_ef10_tx_qtx_ev(struct sfc_dp_txq *dp_txq, __rte_unused unsigned int id)
+{
+	__rte_unused struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+
+	SFC_ASSERT(txq->flags & SFC_EF10_TXQ_NOT_RUNNING);
+
+	/*
+	 * It is safe to ignore Tx event since we reap all mbufs on
+	 * queue purge anyway.
+	 */
+
+	return false;
+}
+
+static sfc_dp_tx_qreap_t sfc_ef10_tx_qreap;
+static void
+sfc_ef10_tx_qreap(struct sfc_dp_txq *dp_txq)
+{
+	struct sfc_ef10_txq *txq = sfc_ef10_txq_by_dp_txq(dp_txq);
+	unsigned int txds;
+
+	for (txds = 0; txds <= txq->ptr_mask; ++txds) {
+		if (txq->sw_ring[txds].mbuf != NULL) {
+			rte_pktmbuf_free(txq->sw_ring[txds].mbuf);
+			txq->sw_ring[txds].mbuf = NULL;
+		}
+	}
+
+	txq->flags &= ~SFC_EF10_TXQ_STARTED;
+}
+
+struct sfc_dp_tx sfc_ef10_tx = {
+	.dp = {
+		.name		= SFC_KVARG_DATAPATH_EF10,
+		.type		= SFC_DP_TX,
+		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
+	},
+	.features		= 0,
+	.qcreate		= sfc_ef10_tx_qcreate,
+	.qdestroy		= sfc_ef10_tx_qdestroy,
+	.qstart			= sfc_ef10_tx_qstart,
+	.qtx_ev			= sfc_ef10_tx_qtx_ev,
+	.qstop			= sfc_ef10_tx_qstop,
+	.qreap			= sfc_ef10_tx_qreap,
+	.pkt_burst		= sfc_ef10_xmit_pkts,
+};
diff --git a/drivers/net/sfc/sfc_ethdev.c b/drivers/net/sfc/sfc_ethdev.c
index 9cf8624..168c965 100644
--- a/drivers/net/sfc/sfc_ethdev.c
+++ b/drivers/net/sfc/sfc_ethdev.c
@@ -1334,6 +1334,7 @@
 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
 		sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
 
+		sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
 		sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
 	}
 }
diff --git a/drivers/net/sfc/sfc_ev.c b/drivers/net/sfc/sfc_ev.c
index 04e923f..5e7c619 100644
--- a/drivers/net/sfc/sfc_ev.c
+++ b/drivers/net/sfc/sfc_ev.c
@@ -210,6 +210,19 @@
 }
 
 static boolean_t
+sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
+{
+	struct sfc_evq *evq = arg;
+	struct sfc_dp_txq *dp_txq;
+
+	dp_txq = evq->dp_txq;
+	SFC_ASSERT(dp_txq != NULL);
+
+	SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
+	return evq->sa->dp_tx->qtx_ev(dp_txq, id);
+}
+
+static boolean_t
 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
 		 __rte_unused uint32_t data)
 {
@@ -466,7 +479,7 @@
 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
 	.eec_initialized	= sfc_ev_initialized,
 	.eec_rx			= sfc_ev_nop_rx,
-	.eec_tx			= sfc_ev_nop_tx,
+	.eec_tx			= sfc_ev_dp_tx,
 	.eec_exception		= sfc_ev_exception,
 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
diff --git a/drivers/net/sfc/sfc_kvargs.h b/drivers/net/sfc/sfc_kvargs.h
index 14f46db..68cca4f 100644
--- a/drivers/net/sfc/sfc_kvargs.h
+++ b/drivers/net/sfc/sfc_kvargs.h
@@ -62,7 +62,8 @@
 
 #define SFC_KVARG_TX_DATAPATH		"tx_datapath"
 #define SFC_KVARG_VALUES_TX_DATAPATH \
-	"[" SFC_KVARG_DATAPATH_EFX "]"
+	"[" SFC_KVARG_DATAPATH_EFX "|" \
+	    SFC_KVARG_DATAPATH_EF10 "]"
 
 struct sfc_adapter;
 
diff --git a/drivers/net/sfc/sfc_tx.c b/drivers/net/sfc/sfc_tx.c
index 1dc97be..d3d5ecc 100644
--- a/drivers/net/sfc/sfc_tx.c
+++ b/drivers/net/sfc/sfc_tx.c
@@ -195,6 +195,11 @@
 	args.free_thresh = txq->free_thresh;
 	args.flags = tx_conf->txq_flags;
 	args.txq_entries = txq_info->entries;
+	args.txq_hw_ring = txq->mem.esm_base;
+	args.evq_entries = txq_info->entries;
+	args.evq_hw_ring = evq->mem.esm_base;
+	args.hw_index = txq->hw_index;
+	args.mem_bar = sa->mem_bar.esb_base;
 
 	rc = sa->dp_tx->qcreate(txq, sfc_tx_dp_exception, socket_id, &args,
 				&txq->dp);
-- 
1.8.2.3



More information about the dev mailing list