[dpdk-dev] [PATCH 17/23] bnxt: add support for LRO
Ajit Khaparde
ajit.khaparde at broadcom.com
Thu May 18 03:58:07 CEST 2017
This patch adds support to enable and disable LRO
To support this feature, the driver creates an aggregrator ring.
When the hardware starts doing LRO, it sends a tpa_start completion.
When the driver receives a tpa_end completion, it indicates that the
LRO chaining is complete.
Signed-off-by: Steeven Li <steeven.li at broadcom.com>
Signed-off-by: Ajit Khaparde <ajit.khaparde at broadcom.com>
---
drivers/net/bnxt/bnxt_ethdev.c | 7 +
drivers/net/bnxt/bnxt_hwrm.c | 44 ++-
drivers/net/bnxt/bnxt_hwrm.h | 2 +
drivers/net/bnxt/bnxt_ring.c | 32 +-
drivers/net/bnxt/bnxt_ring.h | 4 +-
drivers/net/bnxt/bnxt_rxq.c | 12 +
drivers/net/bnxt/bnxt_rxq.h | 2 +
drivers/net/bnxt/bnxt_rxr.c | 315 ++++++++++++----
drivers/net/bnxt/bnxt_rxr.h | 40 ++
drivers/net/bnxt/hsi_struct_def_dpdk.h | 647 ++++++++++++++++++++++++++++++++-
10 files changed, 1019 insertions(+), 86 deletions(-)
diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c
index 5f22091..7fafd02 100644
--- a/drivers/net/bnxt/bnxt_ethdev.c
+++ b/drivers/net/bnxt/bnxt_ethdev.c
@@ -286,6 +286,13 @@ static int bnxt_init_chip(struct bnxt *bp)
goto err_out;
}
}
+
+ bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 1);
+ else
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, 0);
}
rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0], 0, NULL);
if (rc) {
diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c
index 136365c..bea855e 100644
--- a/drivers/net/bnxt/bnxt_hwrm.c
+++ b/drivers/net/bnxt/bnxt_hwrm.c
@@ -1148,19 +1148,15 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
HWRM_PREP(req, VNIC_PLCMODES_CFG, -1, resp);
req.flags = rte_cpu_to_le_32(
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_REGULAR_PLACEMENT |
HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV4 | //TODO
-// HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_HDS_IPV6);
+
req.enables = rte_cpu_to_le_32(
HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
-// HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID);
size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
size -= RTE_PKTMBUF_HEADROOM;
req.jumbo_thresh = rte_cpu_to_le_16(size);
-// req.hds_threshold = rte_cpu_to_le_16(size);
req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
@@ -1170,6 +1166,41 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
return rc;
}
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable)
+{
+ int rc = 0;
+ struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
+ struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
+
+ HWRM_PREP(req, VNIC_TPA_CFG, -1, resp);
+
+ if (enable) {
+ req.enables = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
+ HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
+ req.flags = rte_cpu_to_le_32(
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
+ HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
+ req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+ req.max_agg_segs = rte_cpu_to_le_16(5);
+ req.max_aggs =
+ rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
+ req.min_agg_len = rte_cpu_to_le_32(512);
+ }
+
+ rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
+
+ HWRM_CHECK_RESULT;
+
+ return rc;
+}
+
int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
{
struct hwrm_func_cfg_input req = {0};
@@ -1562,6 +1593,9 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
bnxt_clear_hwrm_vnic_filters(bp, vnic);
bnxt_hwrm_vnic_ctx_free(bp, vnic);
+
+ bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
+
bnxt_hwrm_vnic_free(bp, vnic);
}
/* Ring resources */
diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h
index 9478d12..2d707a3 100644
--- a/drivers/net/bnxt/bnxt_hwrm.h
+++ b/drivers/net/bnxt/bnxt_hwrm.h
@@ -101,6 +101,8 @@ int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool enable);
int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp);
int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp);
diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c
index 3eed354..dd68a3f 100644
--- a/drivers/net/bnxt/bnxt_ring.c
+++ b/drivers/net/bnxt/bnxt_ring.c
@@ -31,6 +31,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bitmap.h>
#include <rte_memzone.h>
#include <unistd.h>
@@ -137,11 +138,23 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size *
sizeof(struct rx_prod_pkt_bd)) : 0;
- int total_alloc_len = rx_ring_start + rx_ring_len;
- int ag_ring_start = 0;
+ int ag_ring_start = rx_ring_start + rx_ring_len;
+ int ag_ring_len = rx_ring_len * AGG_RING_SIZE_FACTOR;
- ag_ring_start = rx_ring_start + rx_ring_len;
- total_alloc_len = ag_ring_start + rx_ring_len * AGG_RING_SIZE_FACTOR;
+ int ag_bitmap_start = ag_ring_start + ag_ring_len;
+ int ag_bitmap_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(rte_bitmap_get_memory_footprint(
+ rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR)) : 0;
+
+ int tpa_info_start = ag_bitmap_start + ag_bitmap_len;
+ int tpa_info_len = rx_ring_info ?
+ RTE_CACHE_LINE_ROUNDUP(BNXT_TPA_MAX *
+ sizeof(struct bnxt_tpa_info)) : 0;
+
+ int total_alloc_len = tpa_info_start;
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ total_alloc_len += tpa_info_len;
snprintf(mz_name, RTE_MEMZONE_NAMESIZE,
"bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain,
@@ -230,6 +243,17 @@ int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx,
rx_ring_info->ag_buf_ring =
(struct bnxt_sw_rx_bd *)rx_ring->vmem;
}
+
+ rx_ring_info->ag_bitmap =
+ rte_bitmap_init(rx_ring_info->rx_ring_struct->ring_size *
+ AGG_RING_SIZE_FACTOR, (uint8_t *)mz->addr +
+ ag_bitmap_start, ag_bitmap_len);
+
+ /* TPA info */
+ if (bp->eth_dev->data->dev_conf.rxmode.enable_lro)
+ rx_ring_info->tpa_info =
+ ((struct bnxt_tpa_info *)((char *)mz->addr +
+ tpa_info_start));
}
cp_ring->bd = ((char *)mz->addr + cp_ring_start);
diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h
index b5bd287..6d1eb58 100644
--- a/drivers/net/bnxt/bnxt_ring.h
+++ b/drivers/net/bnxt/bnxt_ring.h
@@ -57,8 +57,8 @@
#define DEFAULT_RX_RING_SIZE 256
#define DEFAULT_TX_RING_SIZE 256
-#define MAX_TPA 128
-#define AGG_RING_SIZE_FACTOR 2
+#define BNXT_TPA_MAX 64
+#define AGG_RING_SIZE_FACTOR 2
/* These assume 4k pages */
#define MAX_RX_DESC_CNT (8 * 1024)
diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c
index 5a5def5..b0bbed1 100644
--- a/drivers/net/bnxt/bnxt_rxq.c
+++ b/drivers/net/bnxt/bnxt_rxq.c
@@ -214,6 +214,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
{
struct bnxt_sw_rx_bd *sw_ring;
+ struct bnxt_tpa_info *tpa_info;
uint16_t i;
if (rxq) {
@@ -236,6 +237,17 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
}
}
}
+
+ /* Free up mbufs in TPA */
+ tpa_info = rxq->rx_ring->tpa_info;
+ if (tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ if (tpa_info[i].mbuf) {
+ rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+ tpa_info[i].mbuf = NULL;
+ }
+ }
+ }
}
}
diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h
index 0695214..01aaa00 100644
--- a/drivers/net/bnxt/bnxt_rxq.h
+++ b/drivers/net/bnxt/bnxt_rxq.h
@@ -59,6 +59,8 @@ struct bnxt_rx_queue {
uint32_t rx_buf_use_size; /* useable size */
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_cp_ring_info *cp_ring;
+
+ struct bnxt_tpa_info *rx_tpa;
};
void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq);
diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c
index 12869a4..de86161 100644
--- a/drivers/net/bnxt/bnxt_rxr.c
+++ b/drivers/net/bnxt/bnxt_rxr.c
@@ -34,6 +34,7 @@
#include <inttypes.h>
#include <stdbool.h>
+#include <rte_bitmap.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -102,24 +103,28 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq,
return 0;
}
-#ifdef BNXT_DEBUG
-static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
+static inline void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr,
struct rte_mbuf *mbuf)
{
- uint16_t prod = rxr->rx_prod;
+ uint16_t prod = RING_NEXT(rxr->rx_ring_struct, rxr->rx_prod);
struct bnxt_sw_rx_bd *prod_rx_buf;
- struct rx_prod_pkt_bd *prod_bd, *cons_bd;
+ struct rx_prod_pkt_bd *prod_bd;
prod_rx_buf = &rxr->rx_buf_ring[prod];
+ RTE_ASSERT(prod_rx_buf->mbuf == NULL);
+ RTE_ASSERT(mbuf != NULL);
+
prod_rx_buf->mbuf = mbuf;
prod_bd = &rxr->rx_desc_ring[prod];
- cons_bd = &rxr->rx_desc_ring[cons];
- prod_bd->addr = cons_bd->addr;
+ prod_bd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(mbuf));
+
+ rxr->rx_prod = prod;
}
+#ifdef BNXT_DEBUG
static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
struct rte_mbuf *mbuf)
{
@@ -138,7 +143,192 @@ static void bnxt_reuse_ag_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons,
}
#endif
-static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
+static inline
+struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr,
+ uint16_t cons)
+{
+ struct bnxt_sw_rx_bd *cons_rx_buf;
+ struct rte_mbuf *mbuf;
+
+ cons_rx_buf = &rxr->rx_buf_ring[cons];
+ RTE_ASSERT(cons_rx_buf->mbuf != NULL);
+ mbuf = cons_rx_buf->mbuf;
+ cons_rx_buf->mbuf = NULL;
+ return mbuf;
+}
+
+static void bnxt_tpa_start(struct bnxt_rx_queue *rxq,
+ struct rx_tpa_start_cmpl *tpa_start,
+ struct rx_tpa_start_cmpl_hi *tpa_start1)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = rte_le_to_cpu_32(tpa_start->agg_id &
+ RX_TPA_START_CMPL_AGG_ID_MASK) >> RX_TPA_START_CMPL_AGG_ID_SFT;
+ uint16_t data_cons;
+ struct bnxt_tpa_info *tpa_info;
+ struct rte_mbuf *mbuf;
+
+ data_cons = tpa_start->opaque;
+ tpa_info = &rxr->tpa_info[agg_id];
+
+ mbuf = bnxt_consume_rx_buf(rxr, data_cons);
+
+ bnxt_reuse_rx_mbuf(rxr, tpa_info->mbuf);
+
+ tpa_info->mbuf = mbuf;
+ tpa_info->len = rte_le_to_cpu_32(tpa_start->len);
+
+ mbuf->nb_segs = 1;
+ mbuf->next = NULL;
+ mbuf->pkt_len = rte_le_to_cpu_32(tpa_start->len);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = rxq->port_id;
+ mbuf->ol_flags = PKT_RX_LRO;
+ if (likely(tpa_start->flags_type &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) {
+ mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash);
+ mbuf->ol_flags |= PKT_RX_RSS_HASH;
+ } else {
+ mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code);
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ }
+ if (tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) {
+ mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata);
+ mbuf->ol_flags |= PKT_RX_VLAN_PKT;
+ }
+ if (likely(tpa_start1->flags2 &
+ rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+
+ /* recycle next mbuf */
+ data_cons = RING_NEXT(rxr->rx_ring_struct, data_cons);
+ bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons));
+}
+
+static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr,
+ uint8_t agg_bufs, uint32_t raw_cp_cons)
+{
+ uint16_t last_cp_cons;
+ struct rx_pkt_cmpl *agg_cmpl;
+
+ raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs);
+ last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons);
+ agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons];
+ return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct);
+}
+
+/* TPA consume agg buffer out of order, allocate connected data only */
+static int bnxt_prod_ag_mbuf(struct bnxt_rx_queue *rxq)
+{
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t next = RING_NEXT(rxr->ag_ring_struct, rxr->ag_prod);
+
+ /* TODO batch allocation for better performance */
+ while (rte_bitmap_get(rxr->ag_bitmap, next)) {
+ if (unlikely(bnxt_alloc_ag_data(rxq, rxr, next))) {
+ RTE_LOG(ERR, PMD,
+ "agg mbuf alloc failed: prod=0x%x\n", next);
+ break;
+ }
+ rte_bitmap_clear(rxr->ag_bitmap, next);
+ rxr->ag_prod = next;
+ next = RING_NEXT(rxr->ag_ring_struct, next);
+ }
+
+ return 0;
+}
+
+static int bnxt_rx_pages(struct bnxt_rx_queue *rxq,
+ struct rte_mbuf *mbuf, uint32_t *tmp_raw_cons,
+ uint8_t agg_buf)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ int i;
+ uint16_t cp_cons, ag_cons;
+ struct rx_pkt_cmpl *rxcmp;
+ struct rte_mbuf *last = mbuf;
+
+ for (i = 0; i < agg_buf; i++) {
+ struct bnxt_sw_rx_bd *ag_buf;
+ struct rte_mbuf *ag_mbuf;
+ *tmp_raw_cons = NEXT_RAW_CMP(*tmp_raw_cons);
+ cp_cons = RING_CMP(cpr->cp_ring_struct, *tmp_raw_cons);
+ rxcmp = (struct rx_pkt_cmpl *)
+ &cpr->cp_desc_ring[cp_cons];
+
+#ifdef BNXT_DEBUG
+ bnxt_dump_cmpl(cp_cons, rxcmp);
+#endif
+
+ ag_cons = rxcmp->opaque;
+ RTE_ASSERT(ag_cons <= rxr->ag_ring_struct->ring_mask);
+ ag_buf = &rxr->ag_buf_ring[ag_cons];
+ ag_mbuf = ag_buf->mbuf;
+ RTE_ASSERT(ag_mbuf != NULL);
+
+ ag_mbuf->data_len = rte_le_to_cpu_16(rxcmp->len);
+
+ mbuf->nb_segs++;
+ mbuf->pkt_len += ag_mbuf->data_len;
+
+ last->next = ag_mbuf;
+ last = ag_mbuf;
+
+ ag_buf->mbuf = NULL;
+
+ /*
+ * As aggregation buffer consumed out of order in TPA module,
+ * use bitmap to track freed slots to be allocated and notified
+ * to NIC
+ */
+ rte_bitmap_set(rxr->ag_bitmap, ag_cons);
+ }
+ bnxt_prod_ag_mbuf(rxq);
+ return 0;
+}
+
+static inline struct rte_mbuf *bnxt_tpa_end(
+ struct bnxt_rx_queue *rxq,
+ uint32_t *raw_cp_cons,
+ struct rx_tpa_end_cmpl *tpa_end,
+ struct rx_tpa_end_cmpl_hi *tpa_end1 __rte_unused)
+{
+ struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
+ struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint8_t agg_id = (tpa_end->agg_id & RX_TPA_END_CMPL_AGG_ID_MASK)
+ >> RX_TPA_END_CMPL_AGG_ID_SFT;
+ struct rte_mbuf *mbuf;
+ uint8_t agg_bufs;
+ //unsigned int len;
+ struct bnxt_tpa_info *tpa_info;
+
+ tpa_info = &rxr->tpa_info[agg_id];
+ mbuf = tpa_info->mbuf;
+ RTE_ASSERT(mbuf != NULL);
+
+ rte_prefetch0(mbuf);
+ //len = tpa_info->len;
+ agg_bufs = (rte_le_to_cpu_32(tpa_end->agg_bufs_v1) &
+ RX_TPA_END_CMPL_AGG_BUFS_MASK) >> RX_TPA_END_CMPL_AGG_BUFS_SFT;
+ if (agg_bufs) {
+ if (!bnxt_agg_bufs_valid(cpr, agg_bufs, *raw_cp_cons))
+ return NULL;
+ bnxt_rx_pages(rxq, mbuf, raw_cp_cons, agg_bufs);
+ }
+ mbuf->l4_len = tpa_end->payload_offset;
+
+ struct rte_mbuf *new_data = __bnxt_alloc_rx_data(rxq->mb_pool);
+ RTE_ASSERT(new_data != NULL);
+ if (!new_data)
+ return NULL;
+ tpa_info->mbuf = new_data;
+
+ return mbuf;
+}
+
+static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
struct bnxt_rx_queue *rxq, uint32_t *raw_cons)
{
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
@@ -148,12 +338,13 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
uint32_t tmp_raw_cons = *raw_cons;
uint16_t cons, prod, cp_cons =
RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- uint16_t ag_cons, ag_prod = rxr->ag_prod;
- struct bnxt_sw_rx_bd *rx_buf;
+#ifdef BNXT_DEBUG
+ uint16_t ag_cons;
+#endif
struct rte_mbuf *mbuf;
int rc = 0;
- uint8_t i;
uint8_t agg_buf = 0;
+ uint16_t cmp_type;
rxcmp = (struct rx_pkt_cmpl *)
&cpr->cp_desc_ring[cp_cons];
@@ -165,12 +356,34 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct))
return -EBUSY;
+ cmp_type = CMP_TYPE(rxcmp);
+ if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_START) {
+ bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp,
+ (struct rx_tpa_start_cmpl_hi *)rxcmp1);
+ rc = -EINVAL; /* Continue w/o new mbuf */
+ goto next_rx;
+ } else if (cmp_type == RX_PKT_CMPL_TYPE_RX_L2_TPA_END) {
+ mbuf = bnxt_tpa_end(rxq, &tmp_raw_cons,
+ (struct rx_tpa_end_cmpl *)rxcmp,
+ (struct rx_tpa_end_cmpl_hi *)rxcmp1);
+ if (unlikely(!mbuf))
+ return -EBUSY;
+ *rx_pkt = mbuf;
+ goto next_rx;
+ } else if (cmp_type != 0x11) {
+ rc = -EINVAL;
+ goto next_rx;
+ }
+
+ agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
+ >> RX_PKT_CMPL_AGG_BUFS_SFT;
+ if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons))
+ return -EBUSY;
+
prod = rxr->rx_prod;
- /* EW - GRO deferred to phase 3 */
cons = rxcmp->opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- mbuf = rx_buf->mbuf;
+ mbuf = bnxt_consume_rx_buf(rxr, cons);
rte_prefetch0(mbuf);
if (mbuf == NULL)
@@ -190,61 +403,8 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
}
- agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK)
- >> RX_PKT_CMPL_AGG_BUFS_SFT;
- if (agg_buf) {
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons + agg_buf);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- if (!CMP_VALID(rxcmp, tmp_raw_cons + agg_buf,
- cpr->cp_ring_struct))
- return -EBUSY;
- RTE_LOG(DEBUG, PMD, "JUMBO Frame %d. %x, agg_buf %x,\n",
- mbuf->pkt_len, rxcmp->agg_bufs_v1, agg_buf);
- }
-
- for (i = 0; i < agg_buf; i++) {
- struct bnxt_sw_rx_bd *ag_buf;
- struct rte_mbuf *ag_mbuf;
- tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- rxcmp = (struct rx_pkt_cmpl *)
- &cpr->cp_desc_ring[cp_cons];
- ag_cons = rxcmp->opaque;
- ag_buf = &rxr->ag_buf_ring[ag_cons];
- ag_mbuf = ag_buf->mbuf;
- ag_mbuf->nb_segs = 1;
- ag_mbuf->data_len = rxcmp->len;
-
- mbuf->nb_segs++;
- mbuf->pkt_len += ag_mbuf->data_len;
- if (mbuf->next == NULL) {
- mbuf->next = ag_mbuf;
- } else {
- struct rte_mbuf *temp_mbuf = mbuf;
-
- while (temp_mbuf->next != NULL)
- temp_mbuf = temp_mbuf->next;
- temp_mbuf->next = ag_mbuf;
- }
- ag_buf->mbuf = NULL;
-
- ag_prod = RING_NEXT(rxr->ag_ring_struct, ag_prod);
- if (bnxt_alloc_ag_data(rxq, rxr, ag_prod)) {
- RTE_LOG(ERR, PMD,
- "agg mbuf alloc failed: prod=0x%x\n",
- ag_prod);
- rc = -ENOMEM;
- }
- rxr->ag_prod = ag_prod;
-
-#ifdef BNXT_DEBUG
- if (!CMP_VALID((struct cmpl_base *)
- &cpr->cp_desc_ring[cp_cons], tmp_raw_cons,
- cpr->cp_ring_struct))
- return -EBUSY;
-#endif
- }
+ if (agg_buf)
+ bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf);
if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
mbuf->vlan_tci = rxcmp1->metadata &
@@ -254,7 +414,6 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
mbuf->ol_flags |= PKT_RX_VLAN_PKT;
}
- rx_buf->mbuf = NULL;
#ifdef BNXT_DEBUG
if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) {
/* Re-install the mbuf back to the rx ring */
@@ -294,10 +453,8 @@ static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt,
*/
*rx_pkt = mbuf;
-#ifdef BNXT_DEBUG
+
next_rx:
-#endif
- //rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod);
*raw_cons = tmp_raw_cons;
@@ -333,7 +490,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons);
if (likely(!rc))
nb_rx_pkts++;
- else if (rc == -EBUSY) /* partial completion */
+ if (rc == -EBUSY) /* partial completion */
break;
}
raw_cons = NEXT_RAW_CMP(raw_cons);
@@ -341,6 +498,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
break;
}
+ cpr->cp_raw_cons = raw_cons;
if (prod == rxr->rx_prod && ag_prod == rxr->ag_prod) {
/*
* For PMD, there is no need to keep on pushing to REARM
@@ -348,7 +506,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
*/
return nb_rx_pkts;
}
- cpr->cp_raw_cons = raw_cons;
B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
B_RX_DB(rxr->rx_doorbell, rxr->rx_prod);
@@ -516,6 +673,16 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq)
rxr->ag_prod = prod;
prod = RING_NEXT(rxr->ag_ring_struct, prod);
}
+
+ if (rxr->tpa_info) {
+ for (i = 0; i < BNXT_TPA_MAX; i++) {
+ rxr->tpa_info[i].mbuf =
+ __bnxt_alloc_rx_data(rxq->mb_pool);
+ if (!rxr->tpa_info[i].mbuf)
+ return -ENOMEM;
+ }
+ }
+
RTE_LOG(DEBUG, PMD, "bnxt_init_one_rx_ring AGG Done!\n");
return 0;
diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h
index e104fbd..f8d6dc8 100644
--- a/drivers/net/bnxt/bnxt_rxr.h
+++ b/drivers/net/bnxt/bnxt_rxr.h
@@ -37,6 +37,39 @@
#define B_RX_DB(db, prod) \
(*(uint32_t *)db = (DB_KEY_RX | prod))
+#define BNXT_TPA_L4_SIZE(x) \
+ { \
+ typeof(x) hdr_info = (x); \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32) \
+ }
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
+
+enum pkt_hash_types {
+ PKT_HASH_TYPE_NONE, /* Undefined type */
+ PKT_HASH_TYPE_L2, /* Input: src_MAC, dest_MAC */
+ PKT_HASH_TYPE_L3, /* Input: src_IP, dst_IP */
+ PKT_HASH_TYPE_L4, /* Input: src_IP, dst_IP, src_port, dst_port */
+};
+
+struct bnxt_tpa_info {
+ struct rte_mbuf *mbuf;
+ uint16_t len;
+ unsigned short gso_type;
+ uint32_t flags2;
+ uint32_t metadata;
+ enum pkt_hash_types hash_type;
+ uint32_t rss_hash;
+ uint32_t hdr_info;
+};
+
struct bnxt_sw_rx_bd {
struct rte_mbuf *mbuf; /* data associated with RX descriptor */
};
@@ -57,6 +90,13 @@ struct bnxt_rx_ring_info {
struct bnxt_ring *rx_ring_struct;
struct bnxt_ring *ag_ring_struct;
+
+ /*
+ * To deal with out of order return from TPA, use free buffer indicator
+ */
+ struct rte_bitmap *ag_bitmap;
+
+ struct bnxt_tpa_info *tpa_info;
};
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h
index 935cb90..987ff53 100644
--- a/drivers/net/bnxt/hsi_struct_def_dpdk.h
+++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h
@@ -821,7 +821,9 @@ struct rx_pkt_cmpl {
* RX L2 completion: Completion of and L2 RX
* packet. Length = 32B
*/
- #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2 UINT32_C(0x11)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_START UINT32_C(0x13)
+ #define RX_PKT_CMPL_TYPE_RX_L2_TPA_END UINT32_C(0x15)
/*
* When this bit is '1', it indicates a packet that has an error
* of some type. Type of error is indicated in error_flags.
@@ -1229,6 +1231,473 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_REORDER_SFT 0
} __attribute__((packed));
+/* RX L2 TPA Start Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_start_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_START_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_START_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA Start Completion: Completion at the
+ * beginning of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_START_CMPL_TYPE_RX_TPA_START UINT32_C(0x13)
+ /* This bit will always be '0' for TPA start completions. */
+ #define RX_TPA_START_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_JUMBO \
+ (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_START_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_START_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* This bit is '1' if the RSS field in this completion is valid. */
+ #define RX_TPA_START_CMPL_FLAGS_RSS_VALID UINT32_C(0x400)
+ /* unused is 1 b */
+ #define RX_TPA_START_CMPL_FLAGS_UNUSED UINT32_C(0x800)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_SFT 12
+ /* TCP Packet: Indicates that the packet was IP and TCP. */
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12)
+ #define RX_TPA_START_CMPL_FLAGS_ITYPE_LAST \
+ RX_TPA_START_CMPL_FLAGS_ITYPE_TCP
+ #define RX_TPA_START_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_START_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value indicates the amount of packet data written to the
+ * buffer the opaque field in this completion corresponds to.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t v1;
+ /* unused1 is 7 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V1 UINT32_C(0x1)
+ /* unused1 is 7 b */
+ uint8_t rss_hash_type;
+ /*
+ * This is the RSS hash type for the packet. The value is packed
+ * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}
+ * . The value of tuple_extrac_op provides the information about
+ * what fields the hash was computed on. * 0: The RSS hash was
+ * computed over source IP address, destination IP address,
+ * source port, and destination port of inner IP and TCP or UDP
+ * headers. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 1: The RSS hash was computed over
+ * source IP address and destination IP address of inner IP
+ * header. Note: For non-tunneled packets, the packet headers
+ * are considered inner packet headers for the RSS hash
+ * computation purpose. * 2: The RSS hash was computed over
+ * source IP address, destination IP address, source port, and
+ * destination port of IP and TCP or UDP headers of outer tunnel
+ * headers. Note: For non-tunneled packets, this value is not
+ * applicable. * 3: The RSS hash was computed over source IP
+ * address and destination IP address of IP header of outer
+ * tunnel headers. Note: For non-tunneled packets, this value is
+ * not applicable. Note that 4-tuples values listed above are
+ * applicable for layer 4 protocols supported and enabled for
+ * RSS in the hardware, HWRM firmware, and drivers. For example,
+ * if RSS hash is supported and enabled for TCP traffic only,
+ * then the values of tuple_extract_op corresponding to 4-tuples
+ * are only valid for TCP traffic.
+ */
+ uint16_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 9 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_START_CMPL_AGG_ID_MASK UINT32_C(0xfe00)
+ #define RX_TPA_START_CMPL_AGG_ID_SFT 9
+ uint32_t rss_hash;
+ /*
+ * This value is the RSS hash value calculated for the packet
+ * based on the mode bits and key value in the VNIC.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX L2 TPA Start Completion Record */
+struct rx_tpa_start_cmpl_hi {
+ uint32_t flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the sum passed for
+ * all segments included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was calculated for the
+ * tunnel packet and that the sum passed for all segments
+ * included in the aggregation.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* This value indicates what format the metadata field is. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata informtaion. Value is zero. */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The metadata field contains the VLAN tag and
+ * TPID value. - metadata[11:0] contains the
+ * vlan VID value. - metadata[12] contains the
+ * vlan DE value. - metadata[15:13] contains the
+ * vlan PRI value. - metadata[31:16] contains
+ * the vlan TPID value.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4)
+ #define RX_TPA_START_CMPL_FLAGS2_META_FORMAT_LAST \
+ RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN
+ /*
+ * This field indicates the IP type for the inner-most IP
+ * header. A value of '0' indicates IPv4. A value of '1'
+ * indicates IPv6.
+ */
+ #define RX_TPA_START_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
+ uint32_t metadata;
+ /*
+ * This is data from the CFA block as indicated by the
+ * meta_format field.
+ */
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define RX_TPA_START_CMPL_METADATA_VID_MASK UINT32_C(0xfff)
+ #define RX_TPA_START_CMPL_METADATA_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define RX_TPA_START_CMPL_METADATA_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_TPA_START_CMPL_METADATA_PRI_MASK UINT32_C(0xe000)
+ #define RX_TPA_START_CMPL_METADATA_PRI_SFT 13
+ /* When meta_format=1, this value is the VLAN TPID. */
+ #define RX_TPA_START_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000)
+ #define RX_TPA_START_CMPL_METADATA_TPID_SFT 16
+ uint16_t v2;
+ /* unused4 is 15 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_START_CMPL_V2 UINT32_C(0x1)
+ /* unused4 is 15 b */
+ uint16_t cfa_code;
+ /*
+ * This field identifies the CFA action rule that was used for
+ * this packet.
+ */
+ uint32_t inner_l4_size_inner_l3_offset_inner_l2_offset_outer_l3_offset;
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the outer L3 header. If there is no outer L3 header, then
+ * this value is zero.
+ */
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_MASK UINT32_C(0x1ff)
+ #define RX_TPA_START_CMPL_OUTER_L3_OFFSET_SFT 0
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L2 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_MASK UINT32_C(0x3fe00)
+ #define RX_TPA_START_CMPL_INNER_L2_OFFSET_SFT 9
+ /*
+ * This is the offset from the beginning of the packet in bytes
+ * for the inner most L3 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_MASK UINT32_C(0x7fc0000)
+ #define RX_TPA_START_CMPL_INNER_L3_OFFSET_SFT 18
+ /*
+ * This is the size in bytes of the inner most L4 header. This
+ * can be subtracted from the payload_offset to determine the
+ * start of the inner most L4 header.
+ */
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_MASK UINT32_C(0xf8000000)
+ #define RX_TPA_START_CMPL_INNER_L4_SIZE_SFT 27
+} __attribute__((packed));
+
+/* RX TPA End Completion Record (32 bytes split to 2 16-byte struct) */
+struct rx_tpa_end_cmpl {
+ uint16_t flags_type;
+ /*
+ * This field indicates the exact type of the completion. By
+ * convention, the LSB identifies the length of the record in
+ * 16B units. Even values indicate 16B records. Odd values
+ * indicate 32B records.
+ */
+ #define RX_TPA_END_CMPL_TYPE_MASK UINT32_C(0x3f)
+ #define RX_TPA_END_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 TPA End Completion: Completion at the
+ * end of a TPA operation. Length = 32B
+ */
+ #define RX_TPA_END_CMPL_TYPE_RX_TPA_END UINT32_C(0x15)
+ /*
+ * When this bit is '1', it indicates a packet that has an error
+ * of some type. Type of error is indicated in error_flags.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ERROR UINT32_C(0x40)
+ /* This field indicates how the packet was placed in the buffer. */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_SFT 7
+ /*
+ * Jumbo: TPA Packet was placed using jumbo
+ * algorithm. This means that the first buffer
+ * will be filled with data before moving to
+ * aggregation buffers. Each aggregation buffer
+ * will be filled before moving to the next
+ * aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7)
+ /*
+ * Header/Data Separation: Packet was placed
+ * using Header/Data separation algorithm. The
+ * separation location is indicated by the itype
+ * field.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7)
+ /*
+ * GRO/Jumbo: Packet will be placed using
+ * GRO/Jumbo where the first packet is filled
+ * with data. Subsequent packets will be placed
+ * such that any one packet does not span two
+ * aggregation buffers unless it starts at the
+ * beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_JUMBO (UINT32_C(0x5) << 7)
+ /*
+ * GRO/Header-Data Separation: Packet will be
+ * placed using GRO/HDS where the header is in
+ * the first packet. Payload of each packet will
+ * be placed such that any one packet does not
+ * span two aggregation buffers unless it starts
+ * at the beginning of an aggregation buffer.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS (UINT32_C(0x6) << 7)
+ #define RX_TPA_END_CMPL_FLAGS_PLACEMENT_LAST \
+ RX_TPA_END_CMPL_FLAGS_PLACEMENT_GRO_HDS
+ /* unused is 2 b */
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_MASK UINT32_C(0xc00)
+ #define RX_TPA_END_CMPL_FLAGS_UNUSED_SFT 10
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was. - 2 TCP Packet Indicates that the packet was IP
+ * and TCP. This indicates that the ip_cs field is valid and
+ * that the tcp_udp_cs field is valid and contains the TCP
+ * checksum. This also indicates that the payload_offset field
+ * is valid.
+ */
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000)
+ #define RX_TPA_END_CMPL_FLAGS_ITYPE_SFT 12
+ #define RX_TPA_END_CMPL_FLAGS_MASK UINT32_C(0xffc0)
+ #define RX_TPA_END_CMPL_FLAGS_SFT 6
+ uint16_t len;
+ /*
+ * This value is zero for TPA End completions. There is no data
+ * in the buffer that corresponds to the opaque value in this
+ * completion.
+ */
+ uint32_t opaque;
+ /*
+ * This is a copy of the opaque field from the RX BD this
+ * completion corresponds to.
+ */
+ uint8_t agg_bufs_v1;
+ /* unused1 is 1 b */
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V1 UINT32_C(0x1)
+ /*
+ * This value is the number of aggregation buffers that follow
+ * this entry in the completion ring that are a part of this
+ * aggregation packet. If the value is zero, then the packet is
+ * completely contained in the buffer space provided in the
+ * aggregation start completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_BUFS_MASK UINT32_C(0x7e)
+ #define RX_TPA_END_CMPL_AGG_BUFS_SFT 1
+ /* unused1 is 1 b */
+ uint8_t tpa_segs;
+ /* This value is the number of segments in the TPA operation. */
+ uint8_t payload_offset;
+ /*
+ * This value indicates the offset in bytes from the beginning
+ * of the packet where the inner payload starts. This value is
+ * valid for TCP, UDP, FCoE, and RoCE packets. A value of zero
+ * indicates an offset of 256 bytes.
+ */
+ uint8_t agg_id;
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ /* unused2 is 1 b */
+ /*
+ * This is the aggregation ID that the completion is associated
+ * with. Use this number to correlate the TPA start completion
+ * with the TPA end completion.
+ */
+ #define RX_TPA_END_CMPL_AGG_ID_MASK UINT32_C(0xfe)
+ #define RX_TPA_END_CMPL_AGG_ID_SFT 1
+ uint32_t tsdelta;
+ /*
+ * For non-GRO packets, this value is the timestamp delta
+ * between earliest and latest timestamp values for TPA packet.
+ * If packets were not time stamped, then delta will be zero.
+ * For GRO packets, this field is zero except for the following
+ * sub-fields. - tsdelta[31] Timestamp present indication. When
+ * '0', no Timestamp option is in the packet. When '1', then a
+ * Timestamp option is present in the packet.
+ */
+} __attribute__((packed));
+
+/* last 16 bytes of RX TPA End Completion Record */
+struct rx_tpa_end_cmpl_hi {
+ uint32_t tpa_dup_acks;
+ /* unused3 is 28 b */
+ /*
+ * This value is the number of duplicate ACKs that have been
+ * received as part of the TPA operation.
+ */
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_MASK UINT32_C(0xf)
+ #define RX_TPA_END_CMPL_TPA_DUP_ACKS_SFT 0
+ /* unused3 is 28 b */
+ uint16_t tpa_seg_len;
+ /*
+ * This value is the valid when TPA completion is active. It
+ * indicates the length of the longest segment of the TPA
+ * operation for LRO mode and the length of the first segment in
+ * GRO mode. This value may be used by GRO software to re-
+ * construct the original packet stream from the TPA packet.
+ * This is the length of all but the last segment for GRO. In
+ * LRO mode this value may be used to indicate MSS size to the
+ * stack.
+ */
+ uint16_t unused_3;
+ /* unused4 is 16 b */
+ uint16_t errors_v2;
+ /*
+ * This value is written by the NIC such that it will be
+ * different for each pass through the completion queue. The
+ * even passes will write 1. The odd passes will write 0.
+ */
+ #define RX_TPA_END_CMPL_V2 UINT32_C(0x1)
+ /*
+ * This error indicates that there was some sort of problem with
+ * the BDs for the packet that was found after part of the
+ * packet was already placed. The packet should be treated as
+ * invalid.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_SFT 1
+ /*
+ * This error occurs when there is a fatal HW
+ * problem in the chip only. It indicates that
+ * there were not BDs on chip but that there was
+ * adequate reservation. provided by the TPA
+ * block.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \
+ (UINT32_C(0x2) << 1)
+ /*
+ * This error occurs when TPA block was not
+ * configured to reserve adequate BDs for TPA
+ * operations on this RX ring. All data for the
+ * TPA operation was not placed. This error can
+ * also be generated when the number of segments
+ * is not programmed correctly in TPA and the 33
+ * total aggregation buffers allowed for the TPA
+ * operation has been exceeded.
+ */
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR \
+ (UINT32_C(0x4) << 1)
+ #define RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_LAST \
+ RX_TPA_END_CMPL_ERRORS_BUFFER_ERROR_RSV_ERROR
+ #define RX_TPA_END_CMPL_ERRORS_MASK UINT32_C(0xfffe)
+ #define RX_TPA_END_CMPL_ERRORS_SFT 1
+ uint16_t unused_4;
+ /* unused5 is 16 b */
+ uint32_t start_opaque;
+ /*
+ * This is the opaque value that was completed for the TPA start
+ * completion that corresponds to this TPA end completion.
+ */
+} __attribute__((packed));
+
/* HWRM Forwarded Request (16 bytes) */
struct hwrm_fwd_req_cmpl {
uint16_t req_len_type;
@@ -6304,6 +6773,182 @@ struct hwrm_vnic_rss_cos_lb_ctx_free_output {
*/
} __attribute__((packed));
+/* hwrm_vnic_tpa_cfg */
+/* Description: This function is used to enable/configure TPA on the VNIC. */
+/* Input (40 bytes) */
+struct hwrm_vnic_tpa_cfg_input {
+ uint16_t req_type;
+ /*
+ * This value indicates what type of request this is. The format
+ * for the rest of the command is determined by this field.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * This value indicates the what completion ring the request
+ * will be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
+ * valid CR ring_id value for this function.
+ */
+ uint16_t seq_id;
+ /* This value indicates the command sequence number. */
+ uint16_t target_id;
+ /*
+ * Target ID of this command. 0x0 - 0xFFF8 - Used for function
+ * ids 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF
+ * - HWRM
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where the response will be written
+ * when the request is complete. This area must be 16B aligned
+ * and must be cleared to zero before the request is made.
+ */
+ uint32_t flags;
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of non-tunneled TCP
+ * packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA UINT32_C(0x1)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) of tunneled TCP packets.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA UINT32_C(0x2)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Windows
+ * Receive Segment Coalescing (RSC) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE UINT32_C(0x4)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) according to Linux
+ * Generic Receive Offload (GRO) rules.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO UINT32_C(0x8)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for TCP packets with IP
+ * ECN set to non-zero.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN UINT32_C(0x10)
+ /*
+ * When this bit is '1', the VNIC shall be configured to perform
+ * transparent packet aggregation (TPA) for GRE tunneled TCP
+ * packets only if all packets have the same GRE sequence.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ \
+ UINT32_C(0x20)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP/IPv4 packets with consecutively increasing
+ * IPIDs. In other words, the last packet that is being
+ * aggregated to an already existing aggregation context shall
+ * have IPID 1 more than the IPID of the last packet that was
+ * aggregated in that aggregation context.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_IPID_CHECK UINT32_C(0x40)
+ /*
+ * When this bit is '1' and the GRO mode is enabled, the VNIC
+ * shall be configured to perform transparent packet aggregation
+ * (TPA) for TCP packets with the same TTL (IPv4) or Hop limit
+ * (IPv6) value.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO_TTL_CHECK UINT32_C(0x80)
+ uint32_t enables;
+ /* This bit must be '1' for the max_agg_segs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS UINT32_C(0x1)
+ /* This bit must be '1' for the max_aggs field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the max_agg_timer field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
+ /* This bit must be '1' for the min_agg_len field to be configured. */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ uint16_t vnic_id;
+ /* Logical vnic ID */
+ uint16_t max_agg_segs;
+ /*
+ * This is the maximum number of TCP segments that can be
+ * aggregated (unit is Log2). Max value is 31.
+ */
+ /* 1 segment */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_1 UINT32_C(0x0)
+ /* 2 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_2 UINT32_C(0x1)
+ /* 4 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_4 UINT32_C(0x2)
+ /* 8 segments */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_8 UINT32_C(0x3)
+ /* Any segment size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGG_SEGS_MAX UINT32_C(0x1f)
+ uint16_t max_aggs;
+ /*
+ * This is the maximum number of aggregations this VNIC is
+ * allowed (unit is Log2). Max value is 7
+ */
+ /* 1 aggregation */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_1 UINT32_C(0x0)
+ /* 2 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_2 UINT32_C(0x1)
+ /* 4 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_4 UINT32_C(0x2)
+ /* 8 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_8 UINT32_C(0x3)
+ /* 16 aggregations */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_16 UINT32_C(0x4)
+ /* Any aggregation size larger than this is not valid */
+ #define HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX UINT32_C(0x7)
+ uint8_t unused_0;
+ uint8_t unused_1;
+ uint32_t max_agg_timer;
+ /*
+ * This is the maximum amount of time allowed for an aggregation
+ * context to complete after it was initiated.
+ */
+ uint32_t min_agg_len;
+ /*
+ * This is the minimum amount of payload length required to
+ * start an aggregation context.
+ */
+} __attribute__((packed));
+
+/* Output (16 bytes) */
+struct hwrm_vnic_tpa_cfg_output {
+ uint16_t error_code;
+ /*
+ * Pass/Fail or error type Note: receiver to verify the in
+ * parameters, and fail the call with an error when appropriate
+ */
+ uint16_t req_type;
+ /* This field returns the type of original request. */
+ uint16_t seq_id;
+ /* This field provides original sequence number of the command. */
+ uint16_t resp_len;
+ /*
+ * This field is the length of the response in bytes. The last
+ * byte of the response is a valid flag that will read as '1'
+ * when the command has been completely written to memory.
+ */
+ uint32_t unused_0;
+ uint8_t unused_1;
+ uint8_t unused_2;
+ uint8_t unused_3;
+ uint8_t valid;
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to an
+ * internal processor, the order of writes has to be such that
+ * this field is written last.
+ */
+} __attribute__((packed));
+
/* hwrm_ring_alloc */
/*
* Description: This command allocates and does basic preparation for a ring.
--
2.10.1 (Apple Git-78)
More information about the dev
mailing list