[dpdk-stable] [PATCH 18.11] net/virtio: cleanup on demand when in-order Tx
Kevin Traynor
ktraynor at redhat.com
Thu Mar 19 00:01:46 CET 2020
On 16/03/2020 12:35, Marvin Liu wrote:
> [ upstream commit e76097f8489a0c4c8044f3382ad81452c4771624 ]
>
> Check whether space are enough before burst enqueue operation. If more
> space is needed, will try to clean up used descriptors for space on
> demand. It can give more chances to free used descriptors, thus will
> help RFC2544 performance. Also deduct failed xmit packets from total
> xmit number.
>
> Fixes: e5f456a98d3c ("net/virtio: support in-order Rx and Tx")
>
> Signed-off-by: Marvin Liu <yong.liu at intel.com>
> Reviewed-by: Maxime Coquelin <maxime.coquelin at redhat.com>
>
> diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c
> index 306009d96..2bbda8525 100644
> --- a/drivers/net/virtio/virtio_rxtx.c
> +++ b/drivers/net/virtio/virtio_rxtx.c
> @@ -1471,6 +1471,21 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> return nb_tx;
> }
>
> +static __rte_always_inline int
> +virtio_xmit_try_cleanup_inorder(struct virtqueue *vq, uint16_t need)
> +{
> + uint16_t nb_used, nb_clean, nb_descs;
> +
> + nb_descs = vq->vq_free_cnt + need;
> + nb_used = VIRTQUEUE_NUSED(vq);
> + virtio_rmb();
> + nb_clean = RTE_MIN(need, (int)nb_used);
> +
> + virtio_xmit_cleanup_inorder(vq, nb_clean);
> +
> + return nb_descs - vq->vq_free_cnt;
> +}
> +
> uint16_t
> virtio_xmit_pkts_inorder(void *tx_queue,
> struct rte_mbuf **tx_pkts,
> @@ -1480,8 +1495,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> struct virtqueue *vq = txvq->vq;
> struct virtio_hw *hw = vq->hw;
> uint16_t hdr_size = hw->vtnet_hdr_size;
> - uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
> + uint16_t nb_used, nb_tx = 0, nb_inorder_pkts = 0;
> struct rte_mbuf *inorder_pkts[nb_pkts];
> + int need;
>
> if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
> return nb_tx;
> @@ -1497,14 +1513,9 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
> virtio_xmit_cleanup_inorder(vq, nb_used);
>
> - if (unlikely(!vq->vq_free_cnt))
> - virtio_xmit_cleanup_inorder(vq, nb_used);
> -
> - nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
> -
> - for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
> + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
> struct rte_mbuf *txm = tx_pkts[nb_tx];
> - int slots, need;
> + int slots;
>
> /* optimize ring usage */
> if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
> @@ -1524,6 +1535,17 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> + if (unlikely(need > 0)) {
> + need = virtio_xmit_try_cleanup_inorder(vq,
> + need);
> + if (unlikely(need > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to "
> + "transmit");
> + break;
> + }
> + }
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> nb_inorder_pkts = 0;
> @@ -1532,13 +1554,7 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> slots = txm->nb_segs + 1;
> need = slots - vq->vq_free_cnt;
> if (unlikely(need > 0)) {
> - nb_used = VIRTQUEUE_NUSED(vq);
> - virtio_rmb();
> - need = RTE_MIN(need, (int)nb_used);
> -
> - virtio_xmit_cleanup_inorder(vq, need);
> -
> - need = slots - vq->vq_free_cnt;
> + need = virtio_xmit_try_cleanup_inorder(vq, slots);
>
> if (unlikely(need > 0)) {
> PMD_TX_LOG(ERR,
> @@ -1554,9 +1570,21 @@ virtio_xmit_pkts_inorder(void *tx_queue,
> }
>
> /* Transmit all inorder packets */
> - if (nb_inorder_pkts)
> + if (nb_inorder_pkts) {
> + need = nb_inorder_pkts - vq->vq_free_cnt;
> + if (unlikely(need > 0)) {
> + need = virtio_xmit_try_cleanup_inorder(vq, need);
> + if (unlikely(need > 0)) {
> + PMD_TX_LOG(ERR,
> + "No free tx descriptors to transmit");
> + nb_inorder_pkts = vq->vq_free_cnt;
> + nb_tx -= need;
> + }
> + }
> +
> virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
> nb_inorder_pkts);
> + }
>
> txvq->stats.packets += nb_tx;
>
>
Applied, thanks.
More information about the stable
mailing list