[PATCH v4] net/ice: fix ice dcf control thread crash

Zhang, Qi Z qi.z.zhang at intel.com
Fri Mar 17 11:15:06 CET 2023



> -----Original Message-----
> From: Ye, MingjinX <mingjinx.ye at intel.com>
> Sent: Friday, March 17, 2023 1:10 PM
> To: dev at dpdk.org
> Cc: Yang, Qiming <qiming.yang at intel.com>; stable at dpdk.org; Zhou, YidingX
> <yidingx.zhou at intel.com>; Ye, MingjinX <mingjinx.ye at intel.com>; Zhang,
> Ke1X <ke1x.zhang at intel.com>; Zhang, Qi Z <qi.z.zhang at intel.com>
> Subject: [PATCH v4] net/ice: fix ice dcf control thread crash
> 
> The control thread accesses the hardware resources after the resources were
> released, which results in a segment error.
> 
> The 'ice-reset' threads are detached, so thread resources cannot be
> reclaimed by `pthread_join` calls.
> 
> This commit synchronizes the number of 'ice-reset' threads by adding two
> variables (the 'vsi_update_thread_num' static global and the
> 'vsi_thread_lock' static global spinlock). When releasing HW resources, we
> clear the event callback function. That makes these threads exit quickly.
> After the number of 'ice-reset' threads decreased to be 0, we release
> resources.
> 
> Fixes: 3b3757bda3c3 ("net/ice: get VF hardware index in DCF")
> Fixes: 931ee54072b1 ("net/ice: support QoS bandwidth config after VF reset
> in DCF")
> Fixes: c7e1a1a3bfeb ("net/ice: refactor DCF VLAN handling")
> Fixes: 0b02c9519432 ("net/ice: handle PF initialization by DCF")
> Fixes: b71573ec2fc2 ("net/ice: retry getting VF VSI map after failure")
> Fixes: 7564d5509611 ("net/ice: add DCF hardware initialization")
> Cc: stable at dpdk.org
> 
> Signed-off-by: Ke Zhang <ke1x.zhang at intel.com>
> Signed-off-by: Mingjin Ye <mingjinx.ye at intel.com>
> ---
> v2: add pthread_exit() for windows
> ---
> v3: Optimization. It is unsafe for a thread to forcibly exit, which will cause
> the spin lock to not be released correctly
> ---
> v4: Safely wait for all event threads to end
> ---
>  drivers/net/ice/ice_dcf.c        | 18 ++++++++++++++--
>  drivers/net/ice/ice_dcf.h        |  1 +
>  drivers/net/ice/ice_dcf_parent.c | 37 ++++++++++++++++++++++++++++++++
>  3 files changed, 54 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/net/ice/ice_dcf.c b/drivers/net/ice/ice_dcf.c index
> 1c3d22ae0f..169520f5bb 100644
> --- a/drivers/net/ice/ice_dcf.c
> +++ b/drivers/net/ice/ice_dcf.c
> @@ -543,6 +543,8 @@ ice_dcf_handle_vsi_update_event(struct ice_dcf_hw
> *hw)
>  	ice_dcf_disable_irq0(hw);
> 
>  	for (;;) {
> +		if (hw->vc_event_msg_cb == NULL)
> +			break;
>  		if (ice_dcf_get_vf_resource(hw) == 0 &&
>  		    ice_dcf_get_vf_vsi_map(hw) >= 0) {
>  			err = 0;
> @@ -555,8 +557,10 @@ ice_dcf_handle_vsi_update_event(struct ice_dcf_hw
> *hw)
>  		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
>  	}
> 
> -	rte_intr_enable(pci_dev->intr_handle);
> -	ice_dcf_enable_irq0(hw);
> +	if (hw->vc_event_msg_cb != NULL) {
> +		rte_intr_enable(pci_dev->intr_handle);
> +		ice_dcf_enable_irq0(hw);
> +	}
> 
>  	rte_spinlock_unlock(&hw->vc_cmd_send_lock);
> 
> @@ -749,6 +753,12 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev,
> struct ice_dcf_hw *hw)
>  	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
>  	struct rte_intr_handle *intr_handle = pci_dev->intr_handle;
> 
> +	/* Clear event callbacks, `VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE`
> +	 * event will be ignored and all running `ice-thread` threads
> +	 * will exit quickly.
> +	 */
> +	hw->vc_event_msg_cb = NULL;
> +
>  	if (hw->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_QOS)
>  		if (hw->tm_conf.committed) {
>  			ice_dcf_clear_bw(hw);
> @@ -760,6 +770,10 @@ ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev,
> struct ice_dcf_hw *hw)
>  	rte_intr_callback_unregister(intr_handle,
>  				     ice_dcf_dev_interrupt_handler, hw);
> 
> +	/* Wait for all `ice-thread` threads to exit. */
> +	while (ice_dcf_event_handle_num() > 0)
> +		rte_delay_ms(ICE_DCF_ARQ_CHECK_TIME);
> +
>  	ice_dcf_mode_disable(hw);
>  	iavf_shutdown_adminq(&hw->avf);
> 
> diff --git a/drivers/net/ice/ice_dcf.h b/drivers/net/ice/ice_dcf.h index
> 7f42ebabe9..6c636a7497 100644
> --- a/drivers/net/ice/ice_dcf.h
> +++ b/drivers/net/ice/ice_dcf.h
> @@ -143,6 +143,7 @@ int ice_dcf_execute_virtchnl_cmd(struct ice_dcf_hw
> *hw,  int ice_dcf_send_aq_cmd(void *dcf_hw, struct ice_aq_desc *desc,
>  			void *buf, uint16_t buf_size);
>  int ice_dcf_handle_vsi_update_event(struct ice_dcf_hw *hw);
> +int ice_dcf_event_handle_num(void);
>  int ice_dcf_init_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> void ice_dcf_uninit_hw(struct rte_eth_dev *eth_dev, struct ice_dcf_hw *hw);
> int ice_dcf_configure_rss_key(struct ice_dcf_hw *hw); diff --git
> a/drivers/net/ice/ice_dcf_parent.c b/drivers/net/ice/ice_dcf_parent.c
> index 01e390ddda..0ff08e179e 100644
> --- a/drivers/net/ice/ice_dcf_parent.c
> +++ b/drivers/net/ice/ice_dcf_parent.c
> @@ -14,6 +14,9 @@
> 
>  #define ICE_DCF_VSI_UPDATE_SERVICE_INTERVAL	100000 /* us */
>  static rte_spinlock_t vsi_update_lock = RTE_SPINLOCK_INITIALIZER;
> +static rte_spinlock_t vsi_thread_lock = RTE_SPINLOCK_INITIALIZER;
> +static int vsi_update_thread_num;

Is this correct? Consider a scenario where we have two NICs and two DCFs running in the same DPDK process. Should the parameters be specific to each DCF?

> +
> 
>  struct ice_dcf_reset_event_param {
>  	struct ice_dcf_hw *dcf_hw;
> @@ -130,6 +133,9 @@ ice_dcf_vsi_update_service_handler(void *param)
> 
>  	rte_spinlock_lock(&vsi_update_lock);
> 
> +	if (hw->vc_event_msg_cb == NULL)
> +		goto update_end;
> +
>  	if (!ice_dcf_handle_vsi_update_event(hw)) {
>  		__atomic_store_n(&parent_adapter->dcf_state_on, true,
>  				 __ATOMIC_RELAXED);
> @@ -150,10 +156,14 @@ ice_dcf_vsi_update_service_handler(void *param)
>  	if (hw->tm_conf.committed)
>  		ice_dcf_replay_vf_bw(hw, reset_param->vf_id);
> 
> +update_end:
>  	rte_spinlock_unlock(&vsi_update_lock);
> 
>  	free(param);
> 
> +	rte_spinlock_lock(&vsi_thread_lock);
> +	vsi_update_thread_num--;
> +	rte_spinlock_unlock(&vsi_thread_lock);
>  	return NULL;
>  }
> 
> @@ -183,6 +193,10 @@ start_vsi_reset_thread(struct ice_dcf_hw *dcf_hw,
> bool vfr, uint16_t vf_id)
>  		PMD_DRV_LOG(ERR, "Failed to start the thread for reset
> handling");
>  		free(param);
>  	}
> +
> +	rte_spinlock_lock(&vsi_thread_lock);
> +	vsi_update_thread_num++;
> +	rte_spinlock_unlock(&vsi_thread_lock);
>  }
> 
>  static uint32_t
> @@ -262,6 +276,18 @@ ice_dcf_handle_pf_event_msg(struct ice_dcf_hw
> *dcf_hw,
>  		PMD_DRV_LOG(DEBUG,
> "VIRTCHNL_EVENT_PF_DRIVER_CLOSE event");
>  		break;
>  	case VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE:
> +		/* If the event handling callback is empty, the event cannot
> +		 * be handled. Therefore we ignore this event.
> +		 */
> +		if (dcf_hw->vc_event_msg_cb == NULL) {
> +			PMD_DRV_LOG(DEBUG,
> +				"VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE
> event "
> +				"received: VF%u with VSI num %u, ignore
> processing",
> +			    pf_msg->event_data.vf_vsi_map.vf_id,
> +			    pf_msg->event_data.vf_vsi_map.vsi_id);
> +			break;
> +		}
> +
>  		PMD_DRV_LOG(DEBUG,
> "VIRTCHNL_EVENT_DCF_VSI_MAP_UPDATE event : VF%u with VSI num %u",
>  			    pf_msg->event_data.vf_vsi_map.vf_id,
>  			    pf_msg->event_data.vf_vsi_map.vsi_id);
> @@ -505,3 +531,14 @@ ice_dcf_uninit_parent_adapter(struct rte_eth_dev
> *eth_dev)
>  	ice_flow_uninit(parent_adapter);
>  	ice_dcf_uninit_parent_hw(parent_hw);
>  }
> +
> +int ice_dcf_event_handle_num(void)
> +{
> +	int ret;
> +
> +	rte_spinlock_lock(&vsi_thread_lock);
> +	ret = vsi_update_thread_num;
> +	rte_spinlock_unlock(&vsi_thread_lock);
> +
> +	return ret;
> +}
> --
> 2.25.1



More information about the stable mailing list