@@ -362,14 +362,44 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_rxq_ops def_rxq_ops = {
+static const
+struct iavf_rxq_ops def_rxq_ops = {
.release_mbufs = release_rxq_mbufs,
};
-static const struct iavf_txq_ops def_txq_ops = {
+static const
+struct iavf_txq_ops def_txq_ops = {
.release_mbufs = release_txq_mbufs,
};
+static const
+struct iavf_rxq_ops sse_vec_rxq_ops = {
+ .release_mbufs = iavf_rx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops sse_vec_txq_ops = {
+ .release_mbufs = iavf_tx_queue_release_mbufs_sse,
+};
+
+static const
+struct iavf_txq_ops avx512_vec_txq_ops = {
+ .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
+};
+
+static
+struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT] = def_rxq_ops,
+ [IAVF_REL_MBUFS_SSE_VEC] = sse_vec_rxq_ops,
+};
+
+static
+struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
+ [IAVF_REL_MBUFS_DEFAULT] = def_txq_ops,
+ [IAVF_REL_MBUFS_SSE_VEC] = sse_vec_txq_ops,
+ [IAVF_REL_MBUFS_AVX512_VEC] = avx512_vec_txq_ops,
+};
+
static inline void
iavf_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct iavf_rx_queue *rxq,
struct rte_mbuf *mb,
@@ -674,7 +704,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
rxq->q_set = true;
dev->data->rx_queues[queue_idx] = rxq;
rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
- rxq->ops = &def_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_rx_bulk_allow(rxq) == true) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
@@ -811,7 +841,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
txq->q_set = true;
dev->data->tx_queues[queue_idx] = txq;
txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
- txq->ops = &def_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
if (check_tx_vec_allow(txq) == false) {
struct iavf_adapter *ad =
@@ -943,7 +973,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
}
rxq = dev->data->rx_queues[rx_queue_id];
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -971,7 +1001,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
}
txq = dev->data->tx_queues[tx_queue_id];
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
@@ -986,7 +1016,7 @@ iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1000,7 +1030,7 @@ iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
if (!q)
return;
- q->ops->release_mbufs(q);
+ iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
rte_free(q->sw_ring);
rte_memzone_free(q->mz);
rte_free(q);
@@ -1034,7 +1064,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
txq = dev->data->tx_queues[i];
if (!txq)
continue;
- txq->ops->release_mbufs(txq);
+ iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
reset_tx_queue(txq);
dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -1042,7 +1072,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
rxq = dev->data->rx_queues[i];
if (!rxq)
continue;
- rxq->ops->release_mbufs(rxq);
+ iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
reset_rx_queue(rxq);
dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
}
@@ -187,6 +187,7 @@ struct iavf_rx_queue {
struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
struct rte_mbuf fake_mbuf; /* dummy mbuf */
uint8_t rxdid;
+ uint8_t rel_mbufs_type;
/* used for VPMD */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
@@ -246,6 +247,7 @@ struct iavf_tx_queue {
uint16_t last_desc_cleaned; /* last desc have been cleaned*/
uint16_t free_thresh;
uint16_t rs_thresh;
+ uint8_t rel_mbufs_type;
uint16_t port_id;
uint16_t queue_id;
@@ -389,6 +391,12 @@ struct iavf_32b_rx_flex_desc_comms_ipsec {
__le32 ipsec_said;
};
+enum iavf_rxtx_rel_mbufs_type {
+ IAVF_REL_MBUFS_DEFAULT = 0,
+ IAVF_REL_MBUFS_SSE_VEC = 1,
+ IAVF_REL_MBUFS_AVX512_VEC = 2,
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -692,6 +700,9 @@ int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
void iavf_set_default_ptype_table(struct rte_eth_dev *dev);
+void iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq);
+void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
+void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
static inline
void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
@@ -1992,7 +1992,7 @@ iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
return iavf_xmit_pkts_vec_avx512_cmn(tx_queue, tx_pkts, nb_pkts, false);
}
-static inline void
+void
iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
{
unsigned int i;
@@ -2012,14 +2012,10 @@ iavf_tx_queue_release_mbufs_avx512(struct iavf_tx_queue *txq)
}
}
-static const struct iavf_txq_ops avx512_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_avx512,
-};
-
int __rte_cold
iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq)
{
- txq->ops = &avx512_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_AVX512_VEC;
return 0;
}
@@ -1198,37 +1198,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
return nb_tx;
}
-static void __rte_cold
+void
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __rte_cold
+void
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
}
-static const struct iavf_rxq_ops sse_vec_rxq_ops = {
- .release_mbufs = iavf_rx_queue_release_mbufs_sse,
-};
-
-static const struct iavf_txq_ops sse_vec_txq_ops = {
- .release_mbufs = iavf_tx_queue_release_mbufs_sse,
-};
-
int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
- txq->ops = &sse_vec_txq_ops;
+ txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return 0;
}
int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
- rxq->ops = &sse_vec_rxq_ops;
+ rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
return iavf_rxq_vec_setup_default(rxq);
}