[dpdk-stable] patch 'net/hns3: fix delay for waiting to stop Rx/Tx' has been queued to stable release 19.11.10

christian.ehrhardt at canonical.com christian.ehrhardt at canonical.com
Tue Aug 10 17:39:11 CEST 2021


Hi,

FYI, your patch has been queued to stable release 19.11.10

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 08/12/21. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Queued patches are on a temporary branch at:
https://github.com/cpaelzer/dpdk-stable-queue

This queued commit can be viewed at:
https://github.com/cpaelzer/dpdk-stable-queue/commit/249c35152a9bcd6d4c4b52776602750552dcf294

Thanks.

Christian Ehrhardt <christian.ehrhardt at canonical.com>

---
>From 249c35152a9bcd6d4c4b52776602750552dcf294 Mon Sep 17 00:00:00 2001
From: Huisong Li <lihuisong at huawei.com>
Date: Sun, 13 Jun 2021 10:31:52 +0800
Subject: [PATCH] net/hns3: fix delay for waiting to stop Rx/Tx

[ upstream commit 4d8cce267840556cec8483c61f8cfbf25873496d ]

When the primary process executes dev_stop or is being reset, the packet
sending and receiving functions is changed. In this moment, the primary
process requests secondary processes to change their Rx/Tx functions, and
delays a period of time in case of crashes when queues are still in use.
The delay time depends on the number of queues actually used, instead of
the maximum number of queues supported by the device.

Fixes: 23d4b61fee5d ("net/hns3: support multiple process")

Signed-off-by: Huisong Li <lihuisong at huawei.com>
Signed-off-by: Min Hu (Connor) <humin29 at huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    | 1184 ++++++++++++++++++++++++++++-
 drivers/net/hns3/hns3_ethdev_vf.c |    4 +-
 2 files changed, 1184 insertions(+), 4 deletions(-)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index ac82e0b5ef..e1bc55682c 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -4742,7 +4742,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
 	/* Disable datapath on secondary process. */
 	hns3_mp_req_stop_rxtx(dev);
 	/* Prevent crashes when queues are still in use. */
-	rte_delay_ms(hw->tqps_num);
+	rte_delay_ms(hw->cfg_max_queues);
 
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
@@ -5130,10 +5130,1190 @@ hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
 		reset_level = HNS3_IMP_RESET;
 	else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
 		reset_level = HNS3_GLOBAL_RESET;
+<<<<<<< HEAD
 	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
 		reset_level = HNS3_FUNC_RESET;
 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
 		reset_level = HNS3_FLR_RESET;
+||||||| constructed merge base
+	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+		reset_level = HNS3_FUNC_RESET;
+	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+		reset_level = HNS3_FLR_RESET;
+
+	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+		return HNS3_NONE_RESET;
+
+	return reset_level;
+}
+
+static void
+hns3_record_imp_error(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+
+	reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
+		hns3_warn(hw, "Detected IMP RD poison!");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
+		hns3_warn(hw, "Detected IMP CMDQ error!");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+	int ret;
+
+	switch (hw->reset.level) {
+	case HNS3_FUNC_RESET:
+		ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
+		if (ret)
+			return ret;
+
+		/*
+		 * After performaning pf reset, it is not necessary to do the
+		 * mailbox handling or send any command to firmware, because
+		 * any mailbox handling or command to firmware is only valid
+		 * after hns3_cmd_init is called.
+		 */
+		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		hw->reset.stats.request_cnt++;
+		break;
+	case HNS3_IMP_RESET:
+		hns3_record_imp_error(hns);
+		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+	struct hns3_pf_rst_done_cmd *req;
+	struct hns3_cmd_desc desc;
+
+	req = (struct hns3_pf_rst_done_cmd *)desc.data;
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+	req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+	return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hw->mac.link_status = ETH_LINK_DOWN;
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
+		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+		hns3_update_linkstatus_and_event(hw, false);
+	}
+
+	hns3_set_rxtx_function(eth_dev);
+	rte_wmb();
+	/* Disable datapath on secondary process. */
+	hns3_mp_req_stop_rxtx(eth_dev);
+	rte_delay_ms(hw->cfg_max_queues);
+
+	rte_spinlock_lock(&hw->lock);
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+	    hw->adapter_state == HNS3_NIC_STOPPING) {
+		hns3_enable_all_queues(hw, false);
+		hns3_do_stop(hns);
+		hw->reset.mbuf_deferred_free = true;
+	} else
+		hw->reset.mbuf_deferred_free = false;
+
+	/*
+	 * It is cumbersome for hardware to pick-and-choose entries for deletion
+	 * from table space. Hence, for function reset software intervention is
+	 * required to delete the entries
+	 */
+	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+		hns3_configure_all_mc_mac_addr(hns, true);
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	if (hw->reset.level == HNS3_IMP_RESET ||
+	    hw->reset.level == HNS3_GLOBAL_RESET)
+		hns3_set_rst_done(hw);
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hns3_set_rxtx_function(eth_dev);
+	hns3_mp_req_start_rxtx(eth_dev);
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
+		/*
+		 * This API parent function already hold the hns3_hw.lock, the
+		 * hns3_service_handler may report lse, in bonding application
+		 * it will call driver's ops which may acquire the hns3_hw.lock
+		 * again, thus lead to deadlock.
+		 * We defer calls hns3_service_handler to avoid the deadlock.
+		 */
+		rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
+				  hns3_service_handler, eth_dev);
+
+		/* Enable interrupt of all rx queues before enabling queues */
+		hns3_dev_all_rx_queue_intr_enable(hw, true);
+		/*
+		 * Enable state of each rxq and txq will be recovered after
+		 * reset, so we need to restore them before enable all tqps;
+		 */
+		hns3_restore_tqp_enable_state(hw);
+		/*
+		 * When finished the initialization, enable queues to receive
+		 * and transmit packets.
+		 */
+		hns3_enable_all_queues(hw, true);
+	}
+
+	return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_configure_all_mac_addr(hns, false);
+	if (ret)
+		return ret;
+
+	ret = hns3_configure_all_mc_mac_addr(hns, false);
+	if (ret)
+		goto err_mc_mac;
+
+	ret = hns3_dev_promisc_restore(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_table(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_conf(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_all_fdir_filter(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_ptp(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_rx_interrupt(hw);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_gro_conf(hw);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_fec(hw);
+	if (ret)
+		goto err_promisc;
+
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+		ret = hns3_do_start(hns, false);
+		if (ret)
+			goto err_promisc;
+		hns3_info(hw, "hns3 dev restart successful!");
+	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	return 0;
+
+err_promisc:
+	hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+	hns3_configure_all_mac_addr(hns, true);
+	return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+	struct hns3_adapter *hns = (struct hns3_adapter *)param;
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset_level;
+	struct timeval tv_delta;
+	struct timeval tv_start;
+	struct timeval tv;
+	uint64_t msec;
+	int ret;
+
+	/*
+	 * The interrupt is not triggered within the delay time.
+	 * The interrupt may have been lost. It is necessary to handle
+	 * the interrupt to recover from the error.
+	 */
+	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+			    SCHEDULE_DEFERRED) {
+		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				  __ATOMIC_RELAXED);
+		hns3_err(hw, "Handling interrupts in delayed tasks");
+		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+		if (reset_level == HNS3_NONE_RESET) {
+			hns3_err(hw, "No reset level is set, try IMP reset");
+			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+		}
+	}
+	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+
+	/*
+	 * Check if there is any ongoing reset in the hardware. This status can
+	 * be checked from reset_pending. If there is then, we need to wait for
+	 * hardware to complete reset.
+	 *    a. If we are able to figure out in reasonable time that hardware
+	 *       has fully resetted then, we can proceed with driver, client
+	 *       reset.
+	 *    b. else, we can come back later to check this status so re-sched
+	 *       now.
+	 */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+	if (reset_level != HNS3_NONE_RESET) {
+		hns3_clock_gettime(&tv_start);
+		ret = hns3_reset_process(hns, reset_level);
+		hns3_clock_gettime(&tv);
+		timersub(&tv, &tv_start, &tv_delta);
+		msec = hns3_clock_calctime_ms(&tv_delta);
+		if (msec > HNS3_RESET_PROCESS_MS)
+			hns3_err(hw, "%d handle long time delta %" PRIu64
+				     " ms time=%ld.%.6ld",
+				 hw->reset.level, msec,
+				 tv.tv_sec, tv.tv_usec);
+		if (ret == -EAGAIN)
+			return;
+	}
+
+	/* Check if we got any *new* reset requests to be honored */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+	if (reset_level != HNS3_NONE_RESET)
+		hns3_msix_process(hns, reset_level);
+}
+
+static unsigned int
+hns3_get_speed_capa_num(uint16_t device_id)
+{
+	unsigned int num;
+
+	switch (device_id) {
+	case HNS3_DEV_ID_25GE:
+	case HNS3_DEV_ID_25GE_RDMA:
+		num = 2;
+		break;
+	case HNS3_DEV_ID_100G_RDMA_MACSEC:
+	case HNS3_DEV_ID_200G_RDMA:
+		num = 1;
+		break;
+	default:
+		num = 0;
+		break;
+	}
+
+	return num;
+}
+
+static int
+hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
+			uint16_t device_id)
+{
+	switch (device_id) {
+	case HNS3_DEV_ID_25GE:
+	/* fallthrough */
+	case HNS3_DEV_ID_25GE_RDMA:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
+
+		/* In HNS3 device, the 25G NIC is compatible with 10G rate */
+		speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
+		speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
+		break;
+	case HNS3_DEV_ID_100G_RDMA_MACSEC:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
+		break;
+	case HNS3_DEV_ID_200G_RDMA:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
+		break;
+	default:
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+static int
+hns3_fec_get_capability(struct rte_eth_dev *dev,
+			struct rte_eth_fec_capa *speed_fec_capa,
+			unsigned int num)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	uint16_t device_id = pci_dev->id.device_id;
+	unsigned int capa_num;
+	int ret;
+
+	capa_num = hns3_get_speed_capa_num(device_id);
+	if (capa_num == 0) {
+		hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
+			 device_id);
+		return -ENOTSUP;
+	}
+
+	if (speed_fec_capa == NULL || num < capa_num)
+		return capa_num;
+
+	ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
+	if (ret)
+		return -ENOTSUP;
+
+	return capa_num;
+}
+
+static int
+get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
+{
+	struct hns3_config_fec_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/*
+	 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
+	 * in device of link speed
+	 * below 10 Gbps.
+	 */
+	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+		*state = 0;
+		return 0;
+	}
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
+	req = (struct hns3_config_fec_cmd *)desc.data;
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "get current fec auto state failed, ret = %d",
+			 ret);
+		return ret;
+	}
+
+	*state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
+	return 0;
+}
+
+static int
+hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
+{
+	struct hns3_sfp_info_cmd *resp;
+	uint32_t tmp_fec_capa;
+	uint8_t auto_state;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/*
+	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
+	 * configured FEC mode is returned.
+	 * If link is up, current FEC mode is returned.
+	 */
+	if (hw->mac.link_status == ETH_LINK_DOWN) {
+		ret = get_current_fec_auto_state(hw, &auto_state);
+		if (ret)
+			return ret;
+
+		if (auto_state == 0x1) {
+			*fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
+			return 0;
+		}
+	}
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
+	resp = (struct hns3_sfp_info_cmd *)desc.data;
+	resp->query_type = HNS3_ACTIVE_QUERY;
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret == -EOPNOTSUPP) {
+		hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
+		return ret;
+	} else if (ret) {
+		hns3_err(hw, "get FEC failed, ret = %d", ret);
+		return ret;
+	}
+
+	/*
+	 * FEC mode order defined in hns3 hardware is inconsistend with
+	 * that defined in the ethdev library. So the sequence needs
+	 * to be converted.
+	 */
+	switch (resp->active_fec) {
+	case HNS3_HW_FEC_MODE_NOFEC:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		break;
+	case HNS3_HW_FEC_MODE_BASER:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+		break;
+	case HNS3_HW_FEC_MODE_RS:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+		break;
+	default:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		break;
+	}
+
+	*fec_capa = tmp_fec_capa;
+	return 0;
+}
+
+static int
+hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	return hns3_fec_get_internal(hw, fec_capa);
+}
+
+static int
+hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
+{
+	struct hns3_config_fec_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
+
+	req = (struct hns3_config_fec_cmd *)desc.data;
+	switch (mode) {
+	case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
+		hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
+		break;
+	default:
+		return 0;
+	}
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "set fec mode failed, ret = %d", ret);
+
+	return ret;
+}
+
+static uint32_t
+get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
+{
+	struct hns3_mac *mac = &hw->mac;
+	uint32_t cur_capa;
+
+	switch (mac->link_speed) {
+	case ETH_SPEED_NUM_10G:
+		cur_capa = fec_capa[1].capa;
+		break;
+	case ETH_SPEED_NUM_25G:
+	case ETH_SPEED_NUM_100G:
+	case ETH_SPEED_NUM_200G:
+		cur_capa = fec_capa[0].capa;
+		break;
+	default:
+		cur_capa = 0;
+		break;
+	}
+
+	return cur_capa;
+}
+
+static bool
+is_fec_mode_one_bit_set(uint32_t mode)
+{
+	int cnt = 0;
+	uint8_t i;
+
+	for (i = 0; i < sizeof(mode); i++)
+		if (mode >> i & 0x1)
+			cnt++;
+
+	return cnt == 1 ? true : false;
+}
+
+static int
+hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+{
+#define FEC_CAPA_NUM 2
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+	struct hns3_pf *pf = &hns->pf;
+
+	struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
+	uint32_t cur_capa;
+	uint32_t num = FEC_CAPA_NUM;
+	int ret;
+
+	ret = hns3_fec_get_capability(dev, fec_capa, num);
+	if (ret < 0)
+		return ret;
+
+	/* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
+	if (!is_fec_mode_one_bit_set(mode)) {
+		hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
+			     "FEC mode should be only one bit set", mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check whether the configured mode is within the FEC capability.
+	 * If not, the configured mode will not be supported.
+	 */
+	cur_capa = get_current_speed_fec_cap(hw, fec_capa);
+	if (!(cur_capa & mode)) {
+		hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
+		return -EINVAL;
+	}
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_fec_hw(hw, mode);
+	if (ret) {
+		rte_spinlock_unlock(&hw->lock);
+		return ret;
+	}
+
+	pf->fec_mode = mode;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_restore_fec(struct hns3_hw *hw)
+{
+	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+	struct hns3_pf *pf = &hns->pf;
+	uint32_t mode = pf->fec_mode;
+	int ret;
+=======
+	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+		reset_level = HNS3_FUNC_RESET;
+	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+		reset_level = HNS3_FLR_RESET;
+
+	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+		return HNS3_NONE_RESET;
+
+	return reset_level;
+}
+
+static void
+hns3_record_imp_error(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+
+	reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
+		hns3_warn(hw, "Detected IMP RD poison!");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+
+	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
+		hns3_warn(hw, "Detected IMP CMDQ error!");
+		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
+	}
+}
+
+static int
+hns3_prepare_reset(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	uint32_t reg_val;
+	int ret;
+
+	switch (hw->reset.level) {
+	case HNS3_FUNC_RESET:
+		ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
+		if (ret)
+			return ret;
+
+		/*
+		 * After performaning pf reset, it is not necessary to do the
+		 * mailbox handling or send any command to firmware, because
+		 * any mailbox handling or command to firmware is only valid
+		 * after hns3_cmd_init is called.
+		 */
+		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
+		hw->reset.stats.request_cnt++;
+		break;
+	case HNS3_IMP_RESET:
+		hns3_record_imp_error(hns);
+		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
+		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
+			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static int
+hns3_set_rst_done(struct hns3_hw *hw)
+{
+	struct hns3_pf_rst_done_cmd *req;
+	struct hns3_cmd_desc desc;
+
+	req = (struct hns3_pf_rst_done_cmd *)desc.data;
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
+	req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
+	return hns3_cmd_send(hw, &desc, 1);
+}
+
+static int
+hns3_stop_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hw->mac.link_status = ETH_LINK_DOWN;
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
+		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
+		hns3_update_linkstatus_and_event(hw, false);
+	}
+
+	hns3_set_rxtx_function(eth_dev);
+	rte_wmb();
+	/* Disable datapath on secondary process. */
+	hns3_mp_req_stop_rxtx(eth_dev);
+	rte_delay_ms(hw->cfg_max_queues);
+
+	rte_spinlock_lock(&hw->lock);
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
+	    hw->adapter_state == HNS3_NIC_STOPPING) {
+		hns3_enable_all_queues(hw, false);
+		hns3_do_stop(hns);
+		hw->reset.mbuf_deferred_free = true;
+	} else
+		hw->reset.mbuf_deferred_free = false;
+
+	/*
+	 * It is cumbersome for hardware to pick-and-choose entries for deletion
+	 * from table space. Hence, for function reset software intervention is
+	 * required to delete the entries
+	 */
+	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
+		hns3_configure_all_mc_mac_addr(hns, true);
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_start_service(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	struct rte_eth_dev *eth_dev;
+
+	if (hw->reset.level == HNS3_IMP_RESET ||
+	    hw->reset.level == HNS3_GLOBAL_RESET)
+		hns3_set_rst_done(hw);
+	eth_dev = &rte_eth_devices[hw->data->port_id];
+	hns3_set_rxtx_function(eth_dev);
+	hns3_mp_req_start_rxtx(eth_dev);
+	if (hw->adapter_state == HNS3_NIC_STARTED) {
+		/*
+		 * This API parent function already hold the hns3_hw.lock, the
+		 * hns3_service_handler may report lse, in bonding application
+		 * it will call driver's ops which may acquire the hns3_hw.lock
+		 * again, thus lead to deadlock.
+		 * We defer calls hns3_service_handler to avoid the deadlock.
+		 */
+		rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
+				  hns3_service_handler, eth_dev);
+
+		/* Enable interrupt of all rx queues before enabling queues */
+		hns3_dev_all_rx_queue_intr_enable(hw, true);
+		/*
+		 * Enable state of each rxq and txq will be recovered after
+		 * reset, so we need to restore them before enable all tqps;
+		 */
+		hns3_restore_tqp_enable_state(hw);
+		/*
+		 * When finished the initialization, enable queues to receive
+		 * and transmit packets.
+		 */
+		hns3_enable_all_queues(hw, true);
+	}
+
+	return 0;
+}
+
+static int
+hns3_restore_conf(struct hns3_adapter *hns)
+{
+	struct hns3_hw *hw = &hns->hw;
+	int ret;
+
+	ret = hns3_configure_all_mac_addr(hns, false);
+	if (ret)
+		return ret;
+
+	ret = hns3_configure_all_mc_mac_addr(hns, false);
+	if (ret)
+		goto err_mc_mac;
+
+	ret = hns3_dev_promisc_restore(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_table(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_vlan_conf(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_all_fdir_filter(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_ptp(hns);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_rx_interrupt(hw);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_gro_conf(hw);
+	if (ret)
+		goto err_promisc;
+
+	ret = hns3_restore_fec(hw);
+	if (ret)
+		goto err_promisc;
+
+	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
+		ret = hns3_do_start(hns, false);
+		if (ret)
+			goto err_promisc;
+		hns3_info(hw, "hns3 dev restart successful!");
+	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
+		hw->adapter_state = HNS3_NIC_CONFIGURED;
+	return 0;
+
+err_promisc:
+	hns3_configure_all_mc_mac_addr(hns, true);
+err_mc_mac:
+	hns3_configure_all_mac_addr(hns, true);
+	return ret;
+}
+
+static void
+hns3_reset_service(void *param)
+{
+	struct hns3_adapter *hns = (struct hns3_adapter *)param;
+	struct hns3_hw *hw = &hns->hw;
+	enum hns3_reset_level reset_level;
+	struct timeval tv_delta;
+	struct timeval tv_start;
+	struct timeval tv;
+	uint64_t msec;
+	int ret;
+
+	/*
+	 * The interrupt is not triggered within the delay time.
+	 * The interrupt may have been lost. It is necessary to handle
+	 * the interrupt to recover from the error.
+	 */
+	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
+			    SCHEDULE_DEFERRED) {
+		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
+				  __ATOMIC_RELAXED);
+		hns3_err(hw, "Handling interrupts in delayed tasks");
+		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
+		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+		if (reset_level == HNS3_NONE_RESET) {
+			hns3_err(hw, "No reset level is set, try IMP reset");
+			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
+		}
+	}
+	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
+
+	/*
+	 * Check if there is any ongoing reset in the hardware. This status can
+	 * be checked from reset_pending. If there is then, we need to wait for
+	 * hardware to complete reset.
+	 *    a. If we are able to figure out in reasonable time that hardware
+	 *       has fully resetted then, we can proceed with driver, client
+	 *       reset.
+	 *    b. else, we can come back later to check this status so re-sched
+	 *       now.
+	 */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
+	if (reset_level != HNS3_NONE_RESET) {
+		hns3_clock_gettime(&tv_start);
+		ret = hns3_reset_process(hns, reset_level);
+		hns3_clock_gettime(&tv);
+		timersub(&tv, &tv_start, &tv_delta);
+		msec = hns3_clock_calctime_ms(&tv_delta);
+		if (msec > HNS3_RESET_PROCESS_MS)
+			hns3_err(hw, "%d handle long time delta %" PRIu64
+				     " ms time=%ld.%.6ld",
+				 hw->reset.level, msec,
+				 tv.tv_sec, tv.tv_usec);
+		if (ret == -EAGAIN)
+			return;
+	}
+
+	/* Check if we got any *new* reset requests to be honored */
+	reset_level = hns3_get_reset_level(hns, &hw->reset.request);
+	if (reset_level != HNS3_NONE_RESET)
+		hns3_msix_process(hns, reset_level);
+}
+
+static unsigned int
+hns3_get_speed_capa_num(uint16_t device_id)
+{
+	unsigned int num;
+
+	switch (device_id) {
+	case HNS3_DEV_ID_25GE:
+	case HNS3_DEV_ID_25GE_RDMA:
+		num = 2;
+		break;
+	case HNS3_DEV_ID_100G_RDMA_MACSEC:
+	case HNS3_DEV_ID_200G_RDMA:
+		num = 1;
+		break;
+	default:
+		num = 0;
+		break;
+	}
+
+	return num;
+}
+
+static int
+hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
+			uint16_t device_id)
+{
+	switch (device_id) {
+	case HNS3_DEV_ID_25GE:
+	/* fallthrough */
+	case HNS3_DEV_ID_25GE_RDMA:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
+
+		/* In HNS3 device, the 25G NIC is compatible with 10G rate */
+		speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
+		speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
+		break;
+	case HNS3_DEV_ID_100G_RDMA_MACSEC:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
+		break;
+	case HNS3_DEV_ID_200G_RDMA:
+		speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
+		speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
+		break;
+	default:
+		return -ENOTSUP;
+	}
+
+	return 0;
+}
+
+static int
+hns3_fec_get_capability(struct rte_eth_dev *dev,
+			struct rte_eth_fec_capa *speed_fec_capa,
+			unsigned int num)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+	uint16_t device_id = pci_dev->id.device_id;
+	unsigned int capa_num;
+	int ret;
+
+	capa_num = hns3_get_speed_capa_num(device_id);
+	if (capa_num == 0) {
+		hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
+			 device_id);
+		return -ENOTSUP;
+	}
+
+	if (speed_fec_capa == NULL || num < capa_num)
+		return capa_num;
+
+	ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
+	if (ret)
+		return -ENOTSUP;
+
+	return capa_num;
+}
+
+static int
+get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
+{
+	struct hns3_config_fec_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/*
+	 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
+	 * in device of link speed
+	 * below 10 Gbps.
+	 */
+	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
+		*state = 0;
+		return 0;
+	}
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
+	req = (struct hns3_config_fec_cmd *)desc.data;
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret) {
+		hns3_err(hw, "get current fec auto state failed, ret = %d",
+			 ret);
+		return ret;
+	}
+
+	*state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
+	return 0;
+}
+
+static int
+hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
+{
+	struct hns3_sfp_info_cmd *resp;
+	uint32_t tmp_fec_capa;
+	uint8_t auto_state;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	/*
+	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
+	 * configured FEC mode is returned.
+	 * If link is up, current FEC mode is returned.
+	 */
+	if (hw->mac.link_status == ETH_LINK_DOWN) {
+		ret = get_current_fec_auto_state(hw, &auto_state);
+		if (ret)
+			return ret;
+
+		if (auto_state == 0x1) {
+			*fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
+			return 0;
+		}
+	}
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
+	resp = (struct hns3_sfp_info_cmd *)desc.data;
+	resp->query_type = HNS3_ACTIVE_QUERY;
+
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret == -EOPNOTSUPP) {
+		hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
+		return ret;
+	} else if (ret) {
+		hns3_err(hw, "get FEC failed, ret = %d", ret);
+		return ret;
+	}
+
+	/*
+	 * FEC mode order defined in hns3 hardware is inconsistend with
+	 * that defined in the ethdev library. So the sequence needs
+	 * to be converted.
+	 */
+	switch (resp->active_fec) {
+	case HNS3_HW_FEC_MODE_NOFEC:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		break;
+	case HNS3_HW_FEC_MODE_BASER:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
+		break;
+	case HNS3_HW_FEC_MODE_RS:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
+		break;
+	default:
+		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
+		break;
+	}
+
+	*fec_capa = tmp_fec_capa;
+	return 0;
+}
+
+static int
+hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
+{
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+	return hns3_fec_get_internal(hw, fec_capa);
+}
+
+static int
+hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
+{
+	struct hns3_config_fec_cmd *req;
+	struct hns3_cmd_desc desc;
+	int ret;
+
+	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
+
+	req = (struct hns3_config_fec_cmd *)desc.data;
+	switch (mode) {
+	case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
+		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
+				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
+		break;
+	case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
+		hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
+		break;
+	default:
+		return 0;
+	}
+	ret = hns3_cmd_send(hw, &desc, 1);
+	if (ret)
+		hns3_err(hw, "set fec mode failed, ret = %d", ret);
+
+	return ret;
+}
+
+static uint32_t
+get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
+{
+	struct hns3_mac *mac = &hw->mac;
+	uint32_t cur_capa;
+
+	switch (mac->link_speed) {
+	case ETH_SPEED_NUM_10G:
+		cur_capa = fec_capa[1].capa;
+		break;
+	case ETH_SPEED_NUM_25G:
+	case ETH_SPEED_NUM_100G:
+	case ETH_SPEED_NUM_200G:
+		cur_capa = fec_capa[0].capa;
+		break;
+	default:
+		cur_capa = 0;
+		break;
+	}
+
+	return cur_capa;
+}
+
+static bool
+is_fec_mode_one_bit_set(uint32_t mode)
+{
+	int cnt = 0;
+	uint8_t i;
+
+	for (i = 0; i < sizeof(mode); i++)
+		if (mode >> i & 0x1)
+			cnt++;
+
+	return cnt == 1 ? true : false;
+}
+
+static int
+hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
+{
+#define FEC_CAPA_NUM 2
+	struct hns3_adapter *hns = dev->data->dev_private;
+	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
+	struct hns3_pf *pf = &hns->pf;
+
+	struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
+	uint32_t cur_capa;
+	uint32_t num = FEC_CAPA_NUM;
+	int ret;
+
+	ret = hns3_fec_get_capability(dev, fec_capa, num);
+	if (ret < 0)
+		return ret;
+
+	/* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
+	if (!is_fec_mode_one_bit_set(mode)) {
+		hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
+			     "FEC mode should be only one bit set", mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Check whether the configured mode is within the FEC capability.
+	 * If not, the configured mode will not be supported.
+	 */
+	cur_capa = get_current_speed_fec_cap(hw, fec_capa);
+	if (!(cur_capa & mode)) {
+		hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
+		return -EINVAL;
+	}
+
+	rte_spinlock_lock(&hw->lock);
+	ret = hns3_set_fec_hw(hw, mode);
+	if (ret) {
+		rte_spinlock_unlock(&hw->lock);
+		return ret;
+	}
+
+	pf->fec_mode = mode;
+	rte_spinlock_unlock(&hw->lock);
+
+	return 0;
+}
+
+static int
+hns3_restore_fec(struct hns3_hw *hw)
+{
+	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
+	struct hns3_pf *pf = &hns->pf;
+	uint32_t mode = pf->fec_mode;
+	int ret;
+>>>>>>> net/hns3: fix delay for waiting to stop Rx/Tx
 
 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
 		return HNS3_NONE_RESET;
@@ -5201,7 +6381,7 @@ hns3_stop_service(struct hns3_adapter *hns)
 	rte_wmb();
 	/* Disable datapath on secondary process. */
 	hns3_mp_req_stop_rxtx(eth_dev);
-	rte_delay_ms(hw->tqps_num);
+	rte_delay_ms(hw->cfg_max_queues);
 
 	rte_spinlock_lock(&hw->lock);
 	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c b/drivers/net/hns3/hns3_ethdev_vf.c
index a7b6188eea..eb3edf3464 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -1631,7 +1631,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
 	/* Disable datapath on secondary process. */
 	hns3_mp_req_stop_rxtx(dev);
 	/* Prevent crashes when queues are still in use. */
-	rte_delay_ms(hw->tqps_num);
+	rte_delay_ms(hw->cfg_max_queues);
 
 	rte_spinlock_lock(&hw->lock);
 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
@@ -2005,7 +2005,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)
 	rte_wmb();
 	/* Disable datapath on secondary process. */
 	hns3_mp_req_stop_rxtx(eth_dev);
-	rte_delay_ms(hw->tqps_num);
+	rte_delay_ms(hw->cfg_max_queues);
 
 	rte_spinlock_lock(&hw->lock);
 	if (hw->adapter_state == HNS3_NIC_STARTED ||
-- 
2.32.0

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2021-08-10 15:11:14.264171339 +0200
+++ 0031-net-hns3-fix-delay-for-waiting-to-stop-Rx-Tx.patch	2021-08-10 15:11:12.962637696 +0200
@@ -1 +1 @@
-From 4d8cce267840556cec8483c61f8cfbf25873496d Mon Sep 17 00:00:00 2001
+From 249c35152a9bcd6d4c4b52776602750552dcf294 Mon Sep 17 00:00:00 2001
@@ -5,0 +6,2 @@
+[ upstream commit 4d8cce267840556cec8483c61f8cfbf25873496d ]
+
@@ -14 +15,0 @@
-Cc: stable at dpdk.org
@@ -19,3 +20,3 @@
- drivers/net/hns3/hns3_ethdev.c    | 4 ++--
- drivers/net/hns3/hns3_ethdev_vf.c | 4 ++--
- 2 files changed, 4 insertions(+), 4 deletions(-)
+ drivers/net/hns3/hns3_ethdev.c    | 1184 ++++++++++++++++++++++++++++-
+ drivers/net/hns3/hns3_ethdev_vf.c |    4 +-
+ 2 files changed, 1184 insertions(+), 4 deletions(-)
@@ -24 +25 @@
-index 20491305e7..dff265828e 100644
+index ac82e0b5ef..e1bc55682c 100644
@@ -27 +28 @@
-@@ -5895,7 +5895,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
+@@ -4742,7 +4742,7 @@ hns3_dev_stop(struct rte_eth_dev *dev)
@@ -35,2 +36,1193 @@
- 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
-@@ -6511,7 +6511,7 @@ hns3_stop_service(struct hns3_adapter *hns)
+ 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+@@ -5130,10 +5130,1190 @@ hns3_get_reset_level(struct hns3_adapter *hns, uint64_t *levels)
+ 		reset_level = HNS3_IMP_RESET;
+ 	else if (hns3_atomic_test_bit(HNS3_GLOBAL_RESET, levels))
+ 		reset_level = HNS3_GLOBAL_RESET;
++<<<<<<< HEAD
+ 	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
+ 		reset_level = HNS3_FUNC_RESET;
+ 	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
+ 		reset_level = HNS3_FLR_RESET;
++||||||| constructed merge base
++	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
++		reset_level = HNS3_FUNC_RESET;
++	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
++		reset_level = HNS3_FLR_RESET;
++
++	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
++		return HNS3_NONE_RESET;
++
++	return reset_level;
++}
++
++static void
++hns3_record_imp_error(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	uint32_t reg_val;
++
++	reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
++	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
++		hns3_warn(hw, "Detected IMP RD poison!");
++		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
++	}
++
++	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
++		hns3_warn(hw, "Detected IMP CMDQ error!");
++		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
++	}
++}
++
++static int
++hns3_prepare_reset(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	uint32_t reg_val;
++	int ret;
++
++	switch (hw->reset.level) {
++	case HNS3_FUNC_RESET:
++		ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
++		if (ret)
++			return ret;
++
++		/*
++		 * After performaning pf reset, it is not necessary to do the
++		 * mailbox handling or send any command to firmware, because
++		 * any mailbox handling or command to firmware is only valid
++		 * after hns3_cmd_init is called.
++		 */
++		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++		hw->reset.stats.request_cnt++;
++		break;
++	case HNS3_IMP_RESET:
++		hns3_record_imp_error(hns);
++		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
++			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++static int
++hns3_set_rst_done(struct hns3_hw *hw)
++{
++	struct hns3_pf_rst_done_cmd *req;
++	struct hns3_cmd_desc desc;
++
++	req = (struct hns3_pf_rst_done_cmd *)desc.data;
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
++	req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
++	return hns3_cmd_send(hw, &desc, 1);
++}
++
++static int
++hns3_stop_service(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	struct rte_eth_dev *eth_dev;
++
++	eth_dev = &rte_eth_devices[hw->data->port_id];
++	hw->mac.link_status = ETH_LINK_DOWN;
++	if (hw->adapter_state == HNS3_NIC_STARTED) {
++		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
++		hns3_update_linkstatus_and_event(hw, false);
++	}
++
++	hns3_set_rxtx_function(eth_dev);
++	rte_wmb();
++	/* Disable datapath on secondary process. */
++	hns3_mp_req_stop_rxtx(eth_dev);
++	rte_delay_ms(hw->cfg_max_queues);
++
++	rte_spinlock_lock(&hw->lock);
++	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
++	    hw->adapter_state == HNS3_NIC_STOPPING) {
++		hns3_enable_all_queues(hw, false);
++		hns3_do_stop(hns);
++		hw->reset.mbuf_deferred_free = true;
++	} else
++		hw->reset.mbuf_deferred_free = false;
++
++	/*
++	 * It is cumbersome for hardware to pick-and-choose entries for deletion
++	 * from table space. Hence, for function reset software intervention is
++	 * required to delete the entries
++	 */
++	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
++		hns3_configure_all_mc_mac_addr(hns, true);
++	rte_spinlock_unlock(&hw->lock);
++
++	return 0;
++}
++
++static int
++hns3_start_service(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	struct rte_eth_dev *eth_dev;
++
++	if (hw->reset.level == HNS3_IMP_RESET ||
++	    hw->reset.level == HNS3_GLOBAL_RESET)
++		hns3_set_rst_done(hw);
++	eth_dev = &rte_eth_devices[hw->data->port_id];
++	hns3_set_rxtx_function(eth_dev);
++	hns3_mp_req_start_rxtx(eth_dev);
++	if (hw->adapter_state == HNS3_NIC_STARTED) {
++		/*
++		 * This API parent function already hold the hns3_hw.lock, the
++		 * hns3_service_handler may report lse, in bonding application
++		 * it will call driver's ops which may acquire the hns3_hw.lock
++		 * again, thus lead to deadlock.
++		 * We defer calls hns3_service_handler to avoid the deadlock.
++		 */
++		rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
++				  hns3_service_handler, eth_dev);
++
++		/* Enable interrupt of all rx queues before enabling queues */
++		hns3_dev_all_rx_queue_intr_enable(hw, true);
++		/*
++		 * Enable state of each rxq and txq will be recovered after
++		 * reset, so we need to restore them before enable all tqps;
++		 */
++		hns3_restore_tqp_enable_state(hw);
++		/*
++		 * When finished the initialization, enable queues to receive
++		 * and transmit packets.
++		 */
++		hns3_enable_all_queues(hw, true);
++	}
++
++	return 0;
++}
++
++static int
++hns3_restore_conf(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	int ret;
++
++	ret = hns3_configure_all_mac_addr(hns, false);
++	if (ret)
++		return ret;
++
++	ret = hns3_configure_all_mc_mac_addr(hns, false);
++	if (ret)
++		goto err_mc_mac;
++
++	ret = hns3_dev_promisc_restore(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_vlan_table(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_vlan_conf(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_all_fdir_filter(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_ptp(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_rx_interrupt(hw);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_gro_conf(hw);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_fec(hw);
++	if (ret)
++		goto err_promisc;
++
++	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
++		ret = hns3_do_start(hns, false);
++		if (ret)
++			goto err_promisc;
++		hns3_info(hw, "hns3 dev restart successful!");
++	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
++		hw->adapter_state = HNS3_NIC_CONFIGURED;
++	return 0;
++
++err_promisc:
++	hns3_configure_all_mc_mac_addr(hns, true);
++err_mc_mac:
++	hns3_configure_all_mac_addr(hns, true);
++	return ret;
++}
++
++static void
++hns3_reset_service(void *param)
++{
++	struct hns3_adapter *hns = (struct hns3_adapter *)param;
++	struct hns3_hw *hw = &hns->hw;
++	enum hns3_reset_level reset_level;
++	struct timeval tv_delta;
++	struct timeval tv_start;
++	struct timeval tv;
++	uint64_t msec;
++	int ret;
++
++	/*
++	 * The interrupt is not triggered within the delay time.
++	 * The interrupt may have been lost. It is necessary to handle
++	 * the interrupt to recover from the error.
++	 */
++	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
++			    SCHEDULE_DEFERRED) {
++		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
++				  __ATOMIC_RELAXED);
++		hns3_err(hw, "Handling interrupts in delayed tasks");
++		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
++		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
++		if (reset_level == HNS3_NONE_RESET) {
++			hns3_err(hw, "No reset level is set, try IMP reset");
++			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
++		}
++	}
++	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
++
++	/*
++	 * Check if there is any ongoing reset in the hardware. This status can
++	 * be checked from reset_pending. If there is then, we need to wait for
++	 * hardware to complete reset.
++	 *    a. If we are able to figure out in reasonable time that hardware
++	 *       has fully resetted then, we can proceed with driver, client
++	 *       reset.
++	 *    b. else, we can come back later to check this status so re-sched
++	 *       now.
++	 */
++	reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
++	if (reset_level != HNS3_NONE_RESET) {
++		hns3_clock_gettime(&tv_start);
++		ret = hns3_reset_process(hns, reset_level);
++		hns3_clock_gettime(&tv);
++		timersub(&tv, &tv_start, &tv_delta);
++		msec = hns3_clock_calctime_ms(&tv_delta);
++		if (msec > HNS3_RESET_PROCESS_MS)
++			hns3_err(hw, "%d handle long time delta %" PRIu64
++				     " ms time=%ld.%.6ld",
++				 hw->reset.level, msec,
++				 tv.tv_sec, tv.tv_usec);
++		if (ret == -EAGAIN)
++			return;
++	}
++
++	/* Check if we got any *new* reset requests to be honored */
++	reset_level = hns3_get_reset_level(hns, &hw->reset.request);
++	if (reset_level != HNS3_NONE_RESET)
++		hns3_msix_process(hns, reset_level);
++}
++
++static unsigned int
++hns3_get_speed_capa_num(uint16_t device_id)
++{
++	unsigned int num;
++
++	switch (device_id) {
++	case HNS3_DEV_ID_25GE:
++	case HNS3_DEV_ID_25GE_RDMA:
++		num = 2;
++		break;
++	case HNS3_DEV_ID_100G_RDMA_MACSEC:
++	case HNS3_DEV_ID_200G_RDMA:
++		num = 1;
++		break;
++	default:
++		num = 0;
++		break;
++	}
++
++	return num;
++}
++
++static int
++hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
++			uint16_t device_id)
++{
++	switch (device_id) {
++	case HNS3_DEV_ID_25GE:
++	/* fallthrough */
++	case HNS3_DEV_ID_25GE_RDMA:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
++
++		/* In HNS3 device, the 25G NIC is compatible with 10G rate */
++		speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
++		speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
++		break;
++	case HNS3_DEV_ID_100G_RDMA_MACSEC:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
++		break;
++	case HNS3_DEV_ID_200G_RDMA:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
++		break;
++	default:
++		return -ENOTSUP;
++	}
++
++	return 0;
++}
++
++static int
++hns3_fec_get_capability(struct rte_eth_dev *dev,
++			struct rte_eth_fec_capa *speed_fec_capa,
++			unsigned int num)
++{
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
++	uint16_t device_id = pci_dev->id.device_id;
++	unsigned int capa_num;
++	int ret;
++
++	capa_num = hns3_get_speed_capa_num(device_id);
++	if (capa_num == 0) {
++		hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
++			 device_id);
++		return -ENOTSUP;
++	}
++
++	if (speed_fec_capa == NULL || num < capa_num)
++		return capa_num;
++
++	ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
++	if (ret)
++		return -ENOTSUP;
++
++	return capa_num;
++}
++
++static int
++get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
++{
++	struct hns3_config_fec_cmd *req;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	/*
++	 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
++	 * in device of link speed
++	 * below 10 Gbps.
++	 */
++	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
++		*state = 0;
++		return 0;
++	}
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
++	req = (struct hns3_config_fec_cmd *)desc.data;
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret) {
++		hns3_err(hw, "get current fec auto state failed, ret = %d",
++			 ret);
++		return ret;
++	}
++
++	*state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
++	return 0;
++}
++
++static int
++hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
++{
++	struct hns3_sfp_info_cmd *resp;
++	uint32_t tmp_fec_capa;
++	uint8_t auto_state;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	/*
++	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
++	 * configured FEC mode is returned.
++	 * If link is up, current FEC mode is returned.
++	 */
++	if (hw->mac.link_status == ETH_LINK_DOWN) {
++		ret = get_current_fec_auto_state(hw, &auto_state);
++		if (ret)
++			return ret;
++
++		if (auto_state == 0x1) {
++			*fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
++			return 0;
++		}
++	}
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
++	resp = (struct hns3_sfp_info_cmd *)desc.data;
++	resp->query_type = HNS3_ACTIVE_QUERY;
++
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret == -EOPNOTSUPP) {
++		hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
++		return ret;
++	} else if (ret) {
++		hns3_err(hw, "get FEC failed, ret = %d", ret);
++		return ret;
++	}
++
++	/*
++	 * FEC mode order defined in hns3 hardware is inconsistend with
++	 * that defined in the ethdev library. So the sequence needs
++	 * to be converted.
++	 */
++	switch (resp->active_fec) {
++	case HNS3_HW_FEC_MODE_NOFEC:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
++		break;
++	case HNS3_HW_FEC_MODE_BASER:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
++		break;
++	case HNS3_HW_FEC_MODE_RS:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
++		break;
++	default:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
++		break;
++	}
++
++	*fec_capa = tmp_fec_capa;
++	return 0;
++}
++
++static int
++hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
++{
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++
++	return hns3_fec_get_internal(hw, fec_capa);
++}
++
++static int
++hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
++{
++	struct hns3_config_fec_cmd *req;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
++
++	req = (struct hns3_config_fec_cmd *)desc.data;
++	switch (mode) {
++	case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
++		hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
++		break;
++	default:
++		return 0;
++	}
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret)
++		hns3_err(hw, "set fec mode failed, ret = %d", ret);
++
++	return ret;
++}
++
++static uint32_t
++get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
++{
++	struct hns3_mac *mac = &hw->mac;
++	uint32_t cur_capa;
++
++	switch (mac->link_speed) {
++	case ETH_SPEED_NUM_10G:
++		cur_capa = fec_capa[1].capa;
++		break;
++	case ETH_SPEED_NUM_25G:
++	case ETH_SPEED_NUM_100G:
++	case ETH_SPEED_NUM_200G:
++		cur_capa = fec_capa[0].capa;
++		break;
++	default:
++		cur_capa = 0;
++		break;
++	}
++
++	return cur_capa;
++}
++
++static bool
++is_fec_mode_one_bit_set(uint32_t mode)
++{
++	int cnt = 0;
++	uint8_t i;
++
++	for (i = 0; i < sizeof(mode); i++)
++		if (mode >> i & 0x1)
++			cnt++;
++
++	return cnt == 1 ? true : false;
++}
++
++static int
++hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
++{
++#define FEC_CAPA_NUM 2
++	struct hns3_adapter *hns = dev->data->dev_private;
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
++	struct hns3_pf *pf = &hns->pf;
++
++	struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
++	uint32_t cur_capa;
++	uint32_t num = FEC_CAPA_NUM;
++	int ret;
++
++	ret = hns3_fec_get_capability(dev, fec_capa, num);
++	if (ret < 0)
++		return ret;
++
++	/* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
++	if (!is_fec_mode_one_bit_set(mode)) {
++		hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
++			     "FEC mode should be only one bit set", mode);
++		return -EINVAL;
++	}
++
++	/*
++	 * Check whether the configured mode is within the FEC capability.
++	 * If not, the configured mode will not be supported.
++	 */
++	cur_capa = get_current_speed_fec_cap(hw, fec_capa);
++	if (!(cur_capa & mode)) {
++		hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
++		return -EINVAL;
++	}
++
++	rte_spinlock_lock(&hw->lock);
++	ret = hns3_set_fec_hw(hw, mode);
++	if (ret) {
++		rte_spinlock_unlock(&hw->lock);
++		return ret;
++	}
++
++	pf->fec_mode = mode;
++	rte_spinlock_unlock(&hw->lock);
++
++	return 0;
++}
++
++static int
++hns3_restore_fec(struct hns3_hw *hw)
++{
++	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
++	struct hns3_pf *pf = &hns->pf;
++	uint32_t mode = pf->fec_mode;
++	int ret;
++=======
++	else if (hns3_atomic_test_bit(HNS3_FUNC_RESET, levels))
++		reset_level = HNS3_FUNC_RESET;
++	else if (hns3_atomic_test_bit(HNS3_FLR_RESET, levels))
++		reset_level = HNS3_FLR_RESET;
++
++	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
++		return HNS3_NONE_RESET;
++
++	return reset_level;
++}
++
++static void
++hns3_record_imp_error(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	uint32_t reg_val;
++
++	reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
++	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B)) {
++		hns3_warn(hw, "Detected IMP RD poison!");
++		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_RD_POISON_B, 0);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
++	}
++
++	if (hns3_get_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B)) {
++		hns3_warn(hw, "Detected IMP CMDQ error!");
++		hns3_set_bit(reg_val, HNS3_VECTOR0_IMP_CMDQ_ERR_B, 0);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val);
++	}
++}
++
++static int
++hns3_prepare_reset(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	uint32_t reg_val;
++	int ret;
++
++	switch (hw->reset.level) {
++	case HNS3_FUNC_RESET:
++		ret = hns3_func_reset_cmd(hw, HNS3_PF_FUNC_ID);
++		if (ret)
++			return ret;
++
++		/*
++		 * After performaning pf reset, it is not necessary to do the
++		 * mailbox handling or send any command to firmware, because
++		 * any mailbox handling or command to firmware is only valid
++		 * after hns3_cmd_init is called.
++		 */
++		__atomic_store_n(&hw->reset.disable_cmd, 1, __ATOMIC_RELAXED);
++		hw->reset.stats.request_cnt++;
++		break;
++	case HNS3_IMP_RESET:
++		hns3_record_imp_error(hns);
++		reg_val = hns3_read_dev(hw, HNS3_VECTOR0_OTER_EN_REG);
++		hns3_write_dev(hw, HNS3_VECTOR0_OTER_EN_REG, reg_val |
++			       BIT(HNS3_VECTOR0_IMP_RESET_INT_B));
++		break;
++	default:
++		break;
++	}
++	return 0;
++}
++
++static int
++hns3_set_rst_done(struct hns3_hw *hw)
++{
++	struct hns3_pf_rst_done_cmd *req;
++	struct hns3_cmd_desc desc;
++
++	req = (struct hns3_pf_rst_done_cmd *)desc.data;
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_PF_RST_DONE, false);
++	req->pf_rst_done |= HNS3_PF_RESET_DONE_BIT;
++	return hns3_cmd_send(hw, &desc, 1);
++}
++
++static int
++hns3_stop_service(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	struct rte_eth_dev *eth_dev;
++
++	eth_dev = &rte_eth_devices[hw->data->port_id];
++	hw->mac.link_status = ETH_LINK_DOWN;
++	if (hw->adapter_state == HNS3_NIC_STARTED) {
++		rte_eal_alarm_cancel(hns3_service_handler, eth_dev);
++		hns3_update_linkstatus_and_event(hw, false);
++	}
++
++	hns3_set_rxtx_function(eth_dev);
++	rte_wmb();
++	/* Disable datapath on secondary process. */
++	hns3_mp_req_stop_rxtx(eth_dev);
++	rte_delay_ms(hw->cfg_max_queues);
++
++	rte_spinlock_lock(&hw->lock);
++	if (hns->hw.adapter_state == HNS3_NIC_STARTED ||
++	    hw->adapter_state == HNS3_NIC_STOPPING) {
++		hns3_enable_all_queues(hw, false);
++		hns3_do_stop(hns);
++		hw->reset.mbuf_deferred_free = true;
++	} else
++		hw->reset.mbuf_deferred_free = false;
++
++	/*
++	 * It is cumbersome for hardware to pick-and-choose entries for deletion
++	 * from table space. Hence, for function reset software intervention is
++	 * required to delete the entries
++	 */
++	if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED) == 0)
++		hns3_configure_all_mc_mac_addr(hns, true);
++	rte_spinlock_unlock(&hw->lock);
++
++	return 0;
++}
++
++static int
++hns3_start_service(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	struct rte_eth_dev *eth_dev;
++
++	if (hw->reset.level == HNS3_IMP_RESET ||
++	    hw->reset.level == HNS3_GLOBAL_RESET)
++		hns3_set_rst_done(hw);
++	eth_dev = &rte_eth_devices[hw->data->port_id];
++	hns3_set_rxtx_function(eth_dev);
++	hns3_mp_req_start_rxtx(eth_dev);
++	if (hw->adapter_state == HNS3_NIC_STARTED) {
++		/*
++		 * This API parent function already hold the hns3_hw.lock, the
++		 * hns3_service_handler may report lse, in bonding application
++		 * it will call driver's ops which may acquire the hns3_hw.lock
++		 * again, thus lead to deadlock.
++		 * We defer calls hns3_service_handler to avoid the deadlock.
++		 */
++		rte_eal_alarm_set(HNS3_SERVICE_QUICK_INTERVAL,
++				  hns3_service_handler, eth_dev);
++
++		/* Enable interrupt of all rx queues before enabling queues */
++		hns3_dev_all_rx_queue_intr_enable(hw, true);
++		/*
++		 * Enable state of each rxq and txq will be recovered after
++		 * reset, so we need to restore them before enable all tqps;
++		 */
++		hns3_restore_tqp_enable_state(hw);
++		/*
++		 * When finished the initialization, enable queues to receive
++		 * and transmit packets.
++		 */
++		hns3_enable_all_queues(hw, true);
++	}
++
++	return 0;
++}
++
++static int
++hns3_restore_conf(struct hns3_adapter *hns)
++{
++	struct hns3_hw *hw = &hns->hw;
++	int ret;
++
++	ret = hns3_configure_all_mac_addr(hns, false);
++	if (ret)
++		return ret;
++
++	ret = hns3_configure_all_mc_mac_addr(hns, false);
++	if (ret)
++		goto err_mc_mac;
++
++	ret = hns3_dev_promisc_restore(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_vlan_table(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_vlan_conf(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_all_fdir_filter(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_ptp(hns);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_rx_interrupt(hw);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_gro_conf(hw);
++	if (ret)
++		goto err_promisc;
++
++	ret = hns3_restore_fec(hw);
++	if (ret)
++		goto err_promisc;
++
++	if (hns->hw.adapter_state == HNS3_NIC_STARTED) {
++		ret = hns3_do_start(hns, false);
++		if (ret)
++			goto err_promisc;
++		hns3_info(hw, "hns3 dev restart successful!");
++	} else if (hw->adapter_state == HNS3_NIC_STOPPING)
++		hw->adapter_state = HNS3_NIC_CONFIGURED;
++	return 0;
++
++err_promisc:
++	hns3_configure_all_mc_mac_addr(hns, true);
++err_mc_mac:
++	hns3_configure_all_mac_addr(hns, true);
++	return ret;
++}
++
++static void
++hns3_reset_service(void *param)
++{
++	struct hns3_adapter *hns = (struct hns3_adapter *)param;
++	struct hns3_hw *hw = &hns->hw;
++	enum hns3_reset_level reset_level;
++	struct timeval tv_delta;
++	struct timeval tv_start;
++	struct timeval tv;
++	uint64_t msec;
++	int ret;
++
++	/*
++	 * The interrupt is not triggered within the delay time.
++	 * The interrupt may have been lost. It is necessary to handle
++	 * the interrupt to recover from the error.
++	 */
++	if (__atomic_load_n(&hw->reset.schedule, __ATOMIC_RELAXED) ==
++			    SCHEDULE_DEFERRED) {
++		__atomic_store_n(&hw->reset.schedule, SCHEDULE_REQUESTED,
++				  __ATOMIC_RELAXED);
++		hns3_err(hw, "Handling interrupts in delayed tasks");
++		hns3_interrupt_handler(&rte_eth_devices[hw->data->port_id]);
++		reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
++		if (reset_level == HNS3_NONE_RESET) {
++			hns3_err(hw, "No reset level is set, try IMP reset");
++			hns3_atomic_set_bit(HNS3_IMP_RESET, &hw->reset.pending);
++		}
++	}
++	__atomic_store_n(&hw->reset.schedule, SCHEDULE_NONE, __ATOMIC_RELAXED);
++
++	/*
++	 * Check if there is any ongoing reset in the hardware. This status can
++	 * be checked from reset_pending. If there is then, we need to wait for
++	 * hardware to complete reset.
++	 *    a. If we are able to figure out in reasonable time that hardware
++	 *       has fully resetted then, we can proceed with driver, client
++	 *       reset.
++	 *    b. else, we can come back later to check this status so re-sched
++	 *       now.
++	 */
++	reset_level = hns3_get_reset_level(hns, &hw->reset.pending);
++	if (reset_level != HNS3_NONE_RESET) {
++		hns3_clock_gettime(&tv_start);
++		ret = hns3_reset_process(hns, reset_level);
++		hns3_clock_gettime(&tv);
++		timersub(&tv, &tv_start, &tv_delta);
++		msec = hns3_clock_calctime_ms(&tv_delta);
++		if (msec > HNS3_RESET_PROCESS_MS)
++			hns3_err(hw, "%d handle long time delta %" PRIu64
++				     " ms time=%ld.%.6ld",
++				 hw->reset.level, msec,
++				 tv.tv_sec, tv.tv_usec);
++		if (ret == -EAGAIN)
++			return;
++	}
++
++	/* Check if we got any *new* reset requests to be honored */
++	reset_level = hns3_get_reset_level(hns, &hw->reset.request);
++	if (reset_level != HNS3_NONE_RESET)
++		hns3_msix_process(hns, reset_level);
++}
++
++static unsigned int
++hns3_get_speed_capa_num(uint16_t device_id)
++{
++	unsigned int num;
++
++	switch (device_id) {
++	case HNS3_DEV_ID_25GE:
++	case HNS3_DEV_ID_25GE_RDMA:
++		num = 2;
++		break;
++	case HNS3_DEV_ID_100G_RDMA_MACSEC:
++	case HNS3_DEV_ID_200G_RDMA:
++		num = 1;
++		break;
++	default:
++		num = 0;
++		break;
++	}
++
++	return num;
++}
++
++static int
++hns3_get_speed_fec_capa(struct rte_eth_fec_capa *speed_fec_capa,
++			uint16_t device_id)
++{
++	switch (device_id) {
++	case HNS3_DEV_ID_25GE:
++	/* fallthrough */
++	case HNS3_DEV_ID_25GE_RDMA:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[1].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[1].capa;
++
++		/* In HNS3 device, the 25G NIC is compatible with 10G rate */
++		speed_fec_capa[1].speed = speed_fec_capa_tbl[0].speed;
++		speed_fec_capa[1].capa = speed_fec_capa_tbl[0].capa;
++		break;
++	case HNS3_DEV_ID_100G_RDMA_MACSEC:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[4].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[4].capa;
++		break;
++	case HNS3_DEV_ID_200G_RDMA:
++		speed_fec_capa[0].speed = speed_fec_capa_tbl[5].speed;
++		speed_fec_capa[0].capa = speed_fec_capa_tbl[5].capa;
++		break;
++	default:
++		return -ENOTSUP;
++	}
++
++	return 0;
++}
++
++static int
++hns3_fec_get_capability(struct rte_eth_dev *dev,
++			struct rte_eth_fec_capa *speed_fec_capa,
++			unsigned int num)
++{
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
++	uint16_t device_id = pci_dev->id.device_id;
++	unsigned int capa_num;
++	int ret;
++
++	capa_num = hns3_get_speed_capa_num(device_id);
++	if (capa_num == 0) {
++		hns3_err(hw, "device(0x%x) is not supported by hns3 PMD",
++			 device_id);
++		return -ENOTSUP;
++	}
++
++	if (speed_fec_capa == NULL || num < capa_num)
++		return capa_num;
++
++	ret = hns3_get_speed_fec_capa(speed_fec_capa, device_id);
++	if (ret)
++		return -ENOTSUP;
++
++	return capa_num;
++}
++
++static int
++get_current_fec_auto_state(struct hns3_hw *hw, uint8_t *state)
++{
++	struct hns3_config_fec_cmd *req;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	/*
++	 * CMD(HNS3_OPC_CONFIG_FEC_MODE) read is not supported
++	 * in device of link speed
++	 * below 10 Gbps.
++	 */
++	if (hw->mac.link_speed < ETH_SPEED_NUM_10G) {
++		*state = 0;
++		return 0;
++	}
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, true);
++	req = (struct hns3_config_fec_cmd *)desc.data;
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret) {
++		hns3_err(hw, "get current fec auto state failed, ret = %d",
++			 ret);
++		return ret;
++	}
++
++	*state = req->fec_mode & (1U << HNS3_MAC_CFG_FEC_AUTO_EN_B);
++	return 0;
++}
++
++static int
++hns3_fec_get_internal(struct hns3_hw *hw, uint32_t *fec_capa)
++{
++	struct hns3_sfp_info_cmd *resp;
++	uint32_t tmp_fec_capa;
++	uint8_t auto_state;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	/*
++	 * If link is down and AUTO is enabled, AUTO is returned, otherwise,
++	 * configured FEC mode is returned.
++	 * If link is up, current FEC mode is returned.
++	 */
++	if (hw->mac.link_status == ETH_LINK_DOWN) {
++		ret = get_current_fec_auto_state(hw, &auto_state);
++		if (ret)
++			return ret;
++
++		if (auto_state == 0x1) {
++			*fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(AUTO);
++			return 0;
++		}
++	}
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_GET_SFP_INFO, true);
++	resp = (struct hns3_sfp_info_cmd *)desc.data;
++	resp->query_type = HNS3_ACTIVE_QUERY;
++
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret == -EOPNOTSUPP) {
++		hns3_err(hw, "IMP do not support get FEC, ret = %d", ret);
++		return ret;
++	} else if (ret) {
++		hns3_err(hw, "get FEC failed, ret = %d", ret);
++		return ret;
++	}
++
++	/*
++	 * FEC mode order defined in hns3 hardware is inconsistend with
++	 * that defined in the ethdev library. So the sequence needs
++	 * to be converted.
++	 */
++	switch (resp->active_fec) {
++	case HNS3_HW_FEC_MODE_NOFEC:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
++		break;
++	case HNS3_HW_FEC_MODE_BASER:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
++		break;
++	case HNS3_HW_FEC_MODE_RS:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(RS);
++		break;
++	default:
++		tmp_fec_capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC);
++		break;
++	}
++
++	*fec_capa = tmp_fec_capa;
++	return 0;
++}
++
++static int
++hns3_fec_get(struct rte_eth_dev *dev, uint32_t *fec_capa)
++{
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
++
++	return hns3_fec_get_internal(hw, fec_capa);
++}
++
++static int
++hns3_set_fec_hw(struct hns3_hw *hw, uint32_t mode)
++{
++	struct hns3_config_fec_cmd *req;
++	struct hns3_cmd_desc desc;
++	int ret;
++
++	hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_CONFIG_FEC_MODE, false);
++
++	req = (struct hns3_config_fec_cmd *)desc.data;
++	switch (mode) {
++	case RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_OFF);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(BASER):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_BASER);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(RS):
++		hns3_set_field(req->fec_mode, HNS3_MAC_CFG_FEC_MODE_M,
++				HNS3_MAC_CFG_FEC_MODE_S, HNS3_MAC_FEC_RS);
++		break;
++	case RTE_ETH_FEC_MODE_CAPA_MASK(AUTO):
++		hns3_set_bit(req->fec_mode, HNS3_MAC_CFG_FEC_AUTO_EN_B, 1);
++		break;
++	default:
++		return 0;
++	}
++	ret = hns3_cmd_send(hw, &desc, 1);
++	if (ret)
++		hns3_err(hw, "set fec mode failed, ret = %d", ret);
++
++	return ret;
++}
++
++static uint32_t
++get_current_speed_fec_cap(struct hns3_hw *hw, struct rte_eth_fec_capa *fec_capa)
++{
++	struct hns3_mac *mac = &hw->mac;
++	uint32_t cur_capa;
++
++	switch (mac->link_speed) {
++	case ETH_SPEED_NUM_10G:
++		cur_capa = fec_capa[1].capa;
++		break;
++	case ETH_SPEED_NUM_25G:
++	case ETH_SPEED_NUM_100G:
++	case ETH_SPEED_NUM_200G:
++		cur_capa = fec_capa[0].capa;
++		break;
++	default:
++		cur_capa = 0;
++		break;
++	}
++
++	return cur_capa;
++}
++
++static bool
++is_fec_mode_one_bit_set(uint32_t mode)
++{
++	int cnt = 0;
++	uint8_t i;
++
++	for (i = 0; i < sizeof(mode); i++)
++		if (mode >> i & 0x1)
++			cnt++;
++
++	return cnt == 1 ? true : false;
++}
++
++static int
++hns3_fec_set(struct rte_eth_dev *dev, uint32_t mode)
++{
++#define FEC_CAPA_NUM 2
++	struct hns3_adapter *hns = dev->data->dev_private;
++	struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(hns);
++	struct hns3_pf *pf = &hns->pf;
++
++	struct rte_eth_fec_capa fec_capa[FEC_CAPA_NUM];
++	uint32_t cur_capa;
++	uint32_t num = FEC_CAPA_NUM;
++	int ret;
++
++	ret = hns3_fec_get_capability(dev, fec_capa, num);
++	if (ret < 0)
++		return ret;
++
++	/* HNS3 PMD driver only support one bit set mode, e.g. 0x1, 0x4 */
++	if (!is_fec_mode_one_bit_set(mode)) {
++		hns3_err(hw, "FEC mode(0x%x) not supported in HNS3 PMD, "
++			     "FEC mode should be only one bit set", mode);
++		return -EINVAL;
++	}
++
++	/*
++	 * Check whether the configured mode is within the FEC capability.
++	 * If not, the configured mode will not be supported.
++	 */
++	cur_capa = get_current_speed_fec_cap(hw, fec_capa);
++	if (!(cur_capa & mode)) {
++		hns3_err(hw, "unsupported FEC mode = 0x%x", mode);
++		return -EINVAL;
++	}
++
++	rte_spinlock_lock(&hw->lock);
++	ret = hns3_set_fec_hw(hw, mode);
++	if (ret) {
++		rte_spinlock_unlock(&hw->lock);
++		return ret;
++	}
++
++	pf->fec_mode = mode;
++	rte_spinlock_unlock(&hw->lock);
++
++	return 0;
++}
++
++static int
++hns3_restore_fec(struct hns3_hw *hw)
++{
++	struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
++	struct hns3_pf *pf = &hns->pf;
++	uint32_t mode = pf->fec_mode;
++	int ret;
++>>>>>>> net/hns3: fix delay for waiting to stop Rx/Tx
+ 
+ 	if (hw->reset.level != HNS3_NONE_RESET && reset_level < hw->reset.level)
+ 		return HNS3_NONE_RESET;
+@@ -5201,7 +6381,7 @@ hns3_stop_service(struct hns3_adapter *hns)
@@ -46 +1238 @@
-index 41dd8ee129..7a5c162964 100644
+index a7b6188eea..eb3edf3464 100644
@@ -49 +1241 @@
-@@ -2107,7 +2107,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
+@@ -1631,7 +1631,7 @@ hns3vf_dev_stop(struct rte_eth_dev *dev)
@@ -57,2 +1249,2 @@
- 	if (__atomic_load_n(&hw->reset.resetting, __ATOMIC_RELAXED) == 0) {
-@@ -2558,7 +2558,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)
+ 	if (rte_atomic16_read(&hw->reset.resetting) == 0) {
+@@ -2005,7 +2005,7 @@ hns3vf_stop_service(struct hns3_adapter *hns)


More information about the stable mailing list