[PATCH 19.11] net/iavf: net/iavf: fix mbuf release in multi-process
Christian Ehrhardt
christian.ehrhardt at canonical.com
Wed Aug 3 11:58:42 CEST 2022
On Wed, Jul 20, 2022 at 7:31 AM Ke Zhang <ke1x.zhang at intel.com> wrote:
>
> [ upstream commit fced83c1229e0ad89f26d07fa7bd46b8767d9f5c ]
Thank you,
your patch was too late in regard to my original deadline and I was
then unavailable for a while.
In the meantime more patches came in and I do not want to waste any of
them just because they were late.
Your patch is applied to the WIP branch now, but currently testing of
-rc1 is going on which I do not want to disrupt.
If we need an -rc2 anyway or generally have the time to do an -rc2
without too much disruption it will be in 19.11.13, otherwise it is
already queued for 19.11.14
> In the multiple process environment, the subprocess operates on the
> shared memory and changes the function pointer of the main process,
> resulting in the failure to find the address of the function when main
> process releasing, resulting in crash.
>
> Fixes: 319c421f3890 ("net/avf: enable SSE Rx Tx")
> Cc: stable at dpdk.org
>
> Signed-off-by: Ke Zhang <ke1x.zhang at intel.com>
> ---
> drivers/net/iavf/iavf_rxtx.c | 32 +++++++++++++++++-----------
> drivers/net/iavf/iavf_rxtx.h | 10 +++++++++
> drivers/net/iavf/iavf_rxtx_vec_sse.c | 16 ++++----------
> 3 files changed, 34 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
> index d406f0d99..eda102a5a 100644
> --- a/drivers/net/iavf/iavf_rxtx.c
> +++ b/drivers/net/iavf/iavf_rxtx.c
> @@ -295,12 +295,20 @@ release_txq_mbufs(struct iavf_tx_queue *txq)
> }
> }
>
> -static const struct iavf_rxq_ops def_rxq_ops = {
> - .release_mbufs = release_rxq_mbufs,
> +static const
> +struct iavf_rxq_ops iavf_rxq_release_mbufs_ops[] = {
> + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_rxq_mbufs,
> +#ifdef RTE_ARCH_X86
> + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_rx_queue_release_mbufs_sse,
> +#endif
> };
>
> -static const struct iavf_txq_ops def_txq_ops = {
> - .release_mbufs = release_txq_mbufs,
> +static const
> +struct iavf_txq_ops iavf_txq_release_mbufs_ops[] = {
> + [IAVF_REL_MBUFS_DEFAULT].release_mbufs = release_txq_mbufs,
> +#ifdef RTE_ARCH_X86
> + [IAVF_REL_MBUFS_SSE_VEC].release_mbufs = iavf_tx_queue_release_mbufs_sse,
> +#endif
> };
>
> int
> @@ -402,7 +410,7 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> rxq->q_set = TRUE;
> dev->data->rx_queues[queue_idx] = rxq;
> rxq->qrx_tail = hw->hw_addr + IAVF_QRX_TAIL1(rxq->queue_id);
> - rxq->ops = &def_rxq_ops;
> + rxq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
>
> if (check_rx_bulk_allow(rxq) == TRUE) {
> PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
> @@ -513,7 +521,7 @@ iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
> txq->q_set = TRUE;
> dev->data->tx_queues[queue_idx] = txq;
> txq->qtx_tail = hw->hw_addr + IAVF_QTX_TAIL1(queue_idx);
> - txq->ops = &def_txq_ops;
> + txq->rel_mbufs_type = IAVF_REL_MBUFS_DEFAULT;
>
> if (check_tx_vec_allow(txq) == FALSE) {
> struct iavf_adapter *ad =
> @@ -618,7 +626,7 @@ iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
> }
>
> rxq = dev->data->rx_queues[rx_queue_id];
> - rxq->ops->release_mbufs(rxq);
> + iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
> reset_rx_queue(rxq);
> dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
>
> @@ -646,7 +654,7 @@ iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
> }
>
> txq = dev->data->tx_queues[tx_queue_id];
> - txq->ops->release_mbufs(txq);
> + iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
> reset_tx_queue(txq);
> dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
>
> @@ -661,7 +669,7 @@ iavf_dev_rx_queue_release(void *rxq)
> if (!q)
> return;
>
> - q->ops->release_mbufs(q);
> + iavf_rxq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
> rte_free(q->sw_ring);
> rte_memzone_free(q->mz);
> rte_free(q);
> @@ -675,7 +683,7 @@ iavf_dev_tx_queue_release(void *txq)
> if (!q)
> return;
>
> - q->ops->release_mbufs(q);
> + iavf_txq_release_mbufs_ops[q->rel_mbufs_type].release_mbufs(q);
> rte_free(q->sw_ring);
> rte_memzone_free(q->mz);
> rte_free(q);
> @@ -699,7 +707,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
> txq = dev->data->tx_queues[i];
> if (!txq)
> continue;
> - txq->ops->release_mbufs(txq);
> + iavf_txq_release_mbufs_ops[txq->rel_mbufs_type].release_mbufs(txq);
> reset_tx_queue(txq);
> dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
> }
> @@ -707,7 +715,7 @@ iavf_stop_queues(struct rte_eth_dev *dev)
> rxq = dev->data->rx_queues[i];
> if (!rxq)
> continue;
> - rxq->ops->release_mbufs(rxq);
> + iavf_rxq_release_mbufs_ops[rxq->rel_mbufs_type].release_mbufs(rxq);
> reset_rx_queue(rxq);
> dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED;
> }
> diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
> index c3baf9386..6be05deb3 100644
> --- a/drivers/net/iavf/iavf_rxtx.h
> +++ b/drivers/net/iavf/iavf_rxtx.h
> @@ -87,6 +87,7 @@ struct iavf_rx_queue {
> struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
> struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
> struct rte_mbuf fake_mbuf; /* dummy mbuf */
> + uint8_t rel_mbufs_type;
>
> /* used for VPMD */
> uint16_t rxrearm_nb; /* number of remaining to be re-armed */
> @@ -132,6 +133,7 @@ struct iavf_tx_queue {
> uint16_t last_desc_cleaned; /* last desc have been cleaned*/
> uint16_t free_thresh;
> uint16_t rs_thresh;
> + uint8_t rel_mbufs_type;
>
> uint16_t port_id;
> uint16_t queue_id;
> @@ -156,6 +158,12 @@ union iavf_tx_offload {
> };
> };
>
> +enum iavf_rxtx_rel_mbufs_type {
> + IAVF_REL_MBUFS_DEFAULT = 0,
> + IAVF_REL_MBUFS_SSE_VEC = 1,
> + IAVF_REL_MBUFS_AVX512_VEC = 2,
> +};
> +
> int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
> uint16_t queue_idx,
> uint16_t nb_desc,
> @@ -215,6 +223,8 @@ int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
> int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
> int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
> int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
> +void iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq);
> +void iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq);
>
> static inline
> void iavf_dump_rx_descriptor(struct iavf_rx_queue *rxq,
> diff --git a/drivers/net/iavf/iavf_rxtx_vec_sse.c b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> index aefa81ecd..3c2eed0cd 100644
> --- a/drivers/net/iavf/iavf_rxtx_vec_sse.c
> +++ b/drivers/net/iavf/iavf_rxtx_vec_sse.c
> @@ -668,37 +668,29 @@ iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
> return nb_tx;
> }
>
> -static void __attribute__((cold))
> +void
> iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
> {
> _iavf_rx_queue_release_mbufs_vec(rxq);
> }
>
> -static void __attribute__((cold))
> +void
> iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
> {
> _iavf_tx_queue_release_mbufs_vec(txq);
> }
>
> -static const struct iavf_rxq_ops sse_vec_rxq_ops = {
> - .release_mbufs = iavf_rx_queue_release_mbufs_sse,
> -};
> -
> -static const struct iavf_txq_ops sse_vec_txq_ops = {
> - .release_mbufs = iavf_tx_queue_release_mbufs_sse,
> -};
> -
> int __attribute__((cold))
> iavf_txq_vec_setup(struct iavf_tx_queue *txq)
> {
> - txq->ops = &sse_vec_txq_ops;
> + txq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
> return 0;
> }
>
> int __attribute__((cold))
> iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
> {
> - rxq->ops = &sse_vec_rxq_ops;
> + rxq->rel_mbufs_type = IAVF_REL_MBUFS_SSE_VEC;
> return iavf_rxq_vec_setup_default(rxq);
> }
>
> --
> 2.25.1
>
--
Christian Ehrhardt
Senior Staff Engineer, Ubuntu Server
Canonical Ltd
More information about the stable
mailing list