[v3,8/8] net/hns3: support query Rx descriptor status
Checks
Commit Message
From: Hongbo Zheng <zhenghongbo3@huawei.com>
Add support for query Rx descriptor status in hns3 driver. Check the
descriptor specified and provide the status information of the
corresponding descriptor.
Signed-off-by: Hongbo Zheng <zhenghongbo3@huawei.com>
Signed-off-by: Min Hu (Connor) <humin29@huawei.com>
---
doc/guides/nics/features/hns3.ini | 1 +
doc/guides/nics/features/hns3_vf.ini | 1 +
doc/guides/rel_notes/release_21_05.rst | 1 +
drivers/net/hns3/hns3_ethdev.c | 1 +
drivers/net/hns3/hns3_ethdev_vf.c | 1 +
drivers/net/hns3/hns3_rxtx.c | 36 ++++++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 1 +
7 files changed, 42 insertions(+)
@@ -35,6 +35,7 @@ L4 checksum offload = Y
Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
+Rx descriptor status = Y
Tx descriptor status = Y
Basic stats = Y
Extended stats = Y
@@ -33,6 +33,7 @@ Inner L3 checksum = Y
Inner L4 checksum = Y
Packet type parsing = Y
Tx descriptor status = Y
+Rx descriptor status = Y
Basic stats = Y
Extended stats = Y
Stats per queue = Y
@@ -63,6 +63,7 @@ New Features
* Added support for runtime config to select IO burst function.
* Added support for outer UDP checksum in Kunpeng930.
* Added support for query Tx descriptor status.
+ * Added support for query Rx descriptor status.
* **Updated NXP DPAA2 driver.**
@@ -6772,6 +6772,7 @@ hns3_dev_init(struct rte_eth_dev *eth_dev)
err_mp_init_secondary:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
+ eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->tx_descriptor_status = NULL;
@@ -2915,6 +2915,7 @@ hns3vf_dev_init(struct rte_eth_dev *eth_dev)
err_mp_init_secondary:
eth_dev->dev_ops = NULL;
eth_dev->rx_pkt_burst = NULL;
+ eth_dev->rx_descriptor_status = NULL;
eth_dev->tx_pkt_burst = NULL;
eth_dev->tx_pkt_prepare = NULL;
eth_dev->tx_descriptor_status = NULL;
@@ -4042,6 +4042,7 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
if (hns->hw.adapter_state == HNS3_NIC_STARTED &&
__atomic_load_n(&hns->hw.reset.resetting, __ATOMIC_RELAXED) == 0) {
eth_dev->rx_pkt_burst = hns3_get_rx_function(eth_dev);
+ eth_dev->rx_descriptor_status = hns3_dev_rx_descriptor_status;
eth_dev->tx_pkt_burst = hns3_get_tx_function(eth_dev, &prep);
eth_dev->tx_pkt_prepare = prep;
eth_dev->tx_descriptor_status = hns3_dev_tx_descriptor_status;
@@ -4258,6 +4259,41 @@ hns3_tx_done_cleanup(void *txq, uint32_t free_cnt)
}
int
+hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
+{
+ volatile struct hns3_desc *rxdp;
+ struct hns3_rx_queue *rxq;
+ struct rte_eth_dev *dev;
+ uint32_t bd_base_info;
+ uint16_t desc_id;
+
+ rxq = (struct hns3_rx_queue *)rx_queue;
+ if (offset >= rxq->nb_rx_desc)
+ return -EINVAL;
+
+ desc_id = (rxq->next_to_use + offset) % rxq->nb_rx_desc;
+ rxdp = &rxq->rx_ring[desc_id];
+ bd_base_info = rte_le_to_cpu_32(rxdp->rx.bd_base_info);
+ dev = &rte_eth_devices[rxq->port_id];
+ if (dev->rx_pkt_burst == hns3_recv_pkts ||
+ dev->rx_pkt_burst == hns3_recv_scattered_pkts) {
+ if (offset >= rxq->nb_rx_desc - rxq->rx_free_hold)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else if (dev->rx_pkt_burst == hns3_recv_pkts_vec ||
+ dev->rx_pkt_burst == hns3_recv_pkts_vec_sve){
+ if (offset >= rxq->nb_rx_desc - rxq->rx_rearm_nb)
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ } else {
+ return RTE_ETH_RX_DESC_UNAVAIL;
+ }
+
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return RTE_ETH_RX_DESC_AVAIL;
+ else
+ return RTE_ETH_RX_DESC_DONE;
+}
+
+int
hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
{
volatile struct hns3_desc *txdp;
@@ -720,6 +720,7 @@ void hns3_stop_all_txqs(struct rte_eth_dev *dev);
void hns3_restore_tqp_enable_state(struct hns3_hw *hw);
int hns3_tx_done_cleanup(void *txq, uint32_t free_cnt);
void hns3_enable_rxd_adv_layout(struct hns3_hw *hw);
+int hns3_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
int hns3_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
#endif /* _HNS3_RXTX_H_ */