[V2] net/hns3: support getting Tx and Rx queue information
Checks
Commit Message
From: Huisong Li <lihuisong@huawei.com>
This patch adds support for querying Rx/Tx queue information.
Currently, if there are not available Rx buffer descriptors in receiving
direction based on hns3 network engine, incoming packets will always be
dropped by hardware. This patch reports the '.rx_drop_en' information to
DPDK framework in the '.dev_infos_get', '.rxq_info_get' and
'.rx_queue_setup' ops implementation function.
Signed-off-by: Huisong Li <lihuisong@huawei.com>
Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
---
v1 -> v2: fix the typo in the commit log.
---
drivers/net/hns3/hns3_ethdev.c | 11 +++++++
drivers/net/hns3/hns3_ethdev_vf.c | 11 +++++++
drivers/net/hns3/hns3_rxtx.c | 51 +++++++++++++++++++++++++++++++
drivers/net/hns3/hns3_rxtx.h | 5 ++-
4 files changed, 77 insertions(+), 1 deletion(-)
Comments
On 8/18/2020 4:07 AM, Wei Hu (Xavier) wrote:
> From: Huisong Li <lihuisong@huawei.com>
>
> This patch adds support for querying Rx/Tx queue information.
>
> Currently, if there are not available Rx buffer descriptors in receiving
> direction based on hns3 network engine, incoming packets will always be
> dropped by hardware. This patch reports the '.rx_drop_en' information to
> DPDK framework in the '.dev_infos_get', '.rxq_info_get' and
> '.rx_queue_setup' ops implementation function.
>
> Signed-off-by: Huisong Li <lihuisong@huawei.com>
> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
<...>
> @@ -2495,6 +2495,15 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
> .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
> };
>
> + info->default_rxconf = (struct rte_eth_rxconf) {
> + /*
> + * If there are no available Rx buffer descriptors, incoming
> + * packets are always dropped by hardware based on hns3 network
> + * engine.
> + */
> + .rx_drop_en = 1,
> + };
Can you please separate this into two patches, first one for 'rx_drop_en'
related changes to existing code, second one to add queue info get functions?
And you may prefer to request backporting the 'rx_drop_en' patch.
<...>
> +void
> +hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
> + struct rte_eth_rxq_info *qinfo)
> +{
> + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> + struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
> +
> + if (rxq == NULL) {
> + hns3_err(hw, "queue pointer of rx queue_id (%u) is NULL.",
> + queue_id);
> + return;
> + }
'rxq' should not be 'NULL' at this stage, because of the "queue_id >=
dev->data->nb_rx_queues" check in 'rte_eth_rx_queue_info_get()'.
Can you please check if it can be 'NULL' and if it can be, better to check it in
the ethdev API, instead of doing in each PMD, both for 'rxq' & 'txq'.
Hi, Ferruh Yigit
On 2020/8/20 0:19, Ferruh Yigit wrote:
> On 8/18/2020 4:07 AM, Wei Hu (Xavier) wrote:
>> From: Huisong Li <lihuisong@huawei.com>
>>
>> This patch adds support for querying Rx/Tx queue information.
>>
>> Currently, if there are not available Rx buffer descriptors in receiving
>> direction based on hns3 network engine, incoming packets will always be
>> dropped by hardware. This patch reports the '.rx_drop_en' information to
>> DPDK framework in the '.dev_infos_get', '.rxq_info_get' and
>> '.rx_queue_setup' ops implementation function.
>>
>> Signed-off-by: Huisong Li <lihuisong@huawei.com>
>> Signed-off-by: Wei Hu (Xavier) <xavier.huwei@huawei.com>
> <...>
>
>> @@ -2495,6 +2495,15 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
>> .nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
>> };
>>
>> + info->default_rxconf = (struct rte_eth_rxconf) {
>> + /*
>> + * If there are no available Rx buffer descriptors, incoming
>> + * packets are always dropped by hardware based on hns3 network
>> + * engine.
>> + */
>> + .rx_drop_en = 1,
>> + };
> Can you please separate this into two patches, first one for 'rx_drop_en'
> related changes to existing code, second one to add queue info get functions?
>
> And you may prefer to request backporting the 'rx_drop_en' patch.
Thanks for you suggestion.
I will update them in V3.
>
> <...>
>> +void
>> +hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
>> + struct rte_eth_rxq_info *qinfo)
>> +{
>> + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
>> + struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
>> +
>> + if (rxq == NULL) {
>> + hns3_err(hw, "queue pointer of rx queue_id (%u) is NULL.",
>> + queue_id);
>> + return;
>> + }
> 'rxq' should not be 'NULL' at this stage, because of the "queue_id >=
> dev->data->nb_rx_queues" check in 'rte_eth_rx_queue_info_get()'.
> Can you please check if it can be 'NULL' and if it can be, better to check it in
> the ethdev API, instead of doing in each PMD, both for 'rxq' & 'txq'.
OK, I will send a email to add check rxq & txq in the
rte_eth_rx_queue_info_get
and rte_eth_tx_queue_info_get API function.
Thanks, Xavier.
@@ -2495,6 +2495,15 @@ hns3_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
.nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
};
+ info->default_rxconf = (struct rte_eth_rxconf) {
+ /*
+ * If there are no available Rx buffer descriptors, incoming
+ * packets are always dropped by hardware based on hns3 network
+ * engine.
+ */
+ .rx_drop_en = 1,
+ };
+
info->vmdq_queue_num = 0;
info->reta_size = HNS3_RSS_IND_TBL_SIZE;
@@ -5413,6 +5422,8 @@ static const struct eth_dev_ops hns3_eth_dev_ops = {
.tx_queue_release = hns3_dev_tx_queue_release,
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
+ .rxq_info_get = hns3_rxq_info_get,
+ .txq_info_get = hns3_txq_info_get,
.dev_configure = hns3_dev_configure,
.flow_ctrl_get = hns3_flow_ctrl_get,
.flow_ctrl_set = hns3_flow_ctrl_set,
@@ -947,6 +947,15 @@ hns3vf_dev_infos_get(struct rte_eth_dev *eth_dev, struct rte_eth_dev_info *info)
.nb_mtu_seg_max = HNS3_MAX_NON_TSO_BD_PER_PKT,
};
+ info->default_rxconf = (struct rte_eth_rxconf) {
+ /*
+ * If there are no available Rx buffer descriptors, incoming
+ * packets are always dropped by hardware based on hns3 network
+ * engine.
+ */
+ .rx_drop_en = 1,
+ };
+
info->vmdq_queue_num = 0;
info->reta_size = HNS3_RSS_IND_TBL_SIZE;
@@ -2473,6 +2482,8 @@ static const struct eth_dev_ops hns3vf_eth_dev_ops = {
.tx_queue_release = hns3_dev_tx_queue_release,
.rx_queue_intr_enable = hns3_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = hns3_dev_rx_queue_intr_disable,
+ .rxq_info_get = hns3_rxq_info_get,
+ .txq_info_get = hns3_txq_info_get,
.dev_configure = hns3vf_dev_configure,
.mac_addr_add = hns3vf_add_mac_addr,
.mac_addr_remove = hns3vf_remove_mac_addr,
@@ -1251,6 +1251,12 @@ hns3_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t nb_desc,
return -EINVAL;
}
+ if (conf->rx_drop_en == 0)
+ hns3_warn(hw, "if there are no available Rx descriptors,"
+ "incoming packets are always dropped. input parameter"
+ " conf->rx_drop_en(%u) is uneffective.",
+ conf->rx_drop_en);
+
if (dev->data->rx_queues[idx]) {
hns3_rx_queue_release(dev->data->rx_queues[idx]);
dev->data->rx_queues[idx] = NULL;
@@ -2814,3 +2820,48 @@ void hns3_set_rxtx_function(struct rte_eth_dev *eth_dev)
eth_dev->tx_pkt_prepare = hns3_dummy_rxtx_burst;
}
}
+
+void
+hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_rx_queue *rxq = dev->data->rx_queues[queue_id];
+
+ if (rxq == NULL) {
+ hns3_err(hw, "queue pointer of rx queue_id (%u) is NULL.",
+ queue_id);
+ return;
+ }
+
+ qinfo->mp = rxq->mb_pool;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+
+ /*
+ * If there are no available Rx buffer descriptors, incoming packets
+ * are always dropped by hardware based on hns3 network engine.
+ */
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+}
+
+void
+hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct hns3_tx_queue *txq = dev->data->tx_queues[queue_id];
+
+ if (txq == NULL) {
+ hns3_err(hw, "queue pointer of tx queue_id (%u) is NULL.",
+ queue_id);
+ return;
+ }
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+ qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+}
@@ -402,5 +402,8 @@ int hns3_set_fake_rx_or_tx_queues(struct rte_eth_dev *dev, uint16_t nb_rx_q,
int hns3_config_gro(struct hns3_hw *hw, bool en);
int hns3_restore_gro_conf(struct hns3_hw *hw);
void hns3_update_all_queues_pvid_state(struct hns3_hw *hw);
-
+void hns3_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo);
+void hns3_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo);
#endif /* _HNS3_RXTX_H_ */