[dpdk-stable] patch 'net/mlx5: fix index handling for Tx ring' has been queued to stable release 17.02.1
Yuanhan Liu
yuanhan.liu at linux.intel.com
Thu May 25 11:50:08 CEST 2017
Hi,
FYI, your patch has been queued to stable release 17.02.1
Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 05/28/17. So please
shout if anyone has objections.
Thanks.
--yliu
---
>From 7280802b7e280666d538735cf2d97a33450f0ba3 Mon Sep 17 00:00:00 2001
From: Yongseok Koh <yskoh at mellanox.com>
Date: Tue, 9 May 2017 13:49:30 -0700
Subject: [PATCH] net/mlx5: fix index handling for Tx ring
[ upstream commit ac180a21f621fd2f8c8af97d6fa1e36f8bcebc1f ]
In case of resource deficiency on Tx, mlx5_tx_burst() breaks the loop
without rolling back consumed resources (txq->wqes[] and txq->elts[]). This
can make application crash because unposted mbufs can be freed while
processing completions. Other Tx functions don't have this issue.
Fixes: 3f13f8c23a7c ("net/mlx5: support hardware TSO")
Fixes: f04f1d51564b ("net/mlx5: fix Tx WQE corruption caused by starvation")
Reported-by: Hanoch Haim <hhaim at cisco.com>
Signed-off-by: Yongseok Koh <yskoh at mellanox.com>
Acked-by: Adrien Mazarguil <adrien.mazarguil at 6wind.com>
---
drivers/net/mlx5/mlx5_rxtx.c | 25 ++++++++++++++-----------
1 file changed, 14 insertions(+), 11 deletions(-)
diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 797b66e..17ba44c 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -369,6 +369,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
uint16_t max_wqe;
unsigned int comp;
volatile struct mlx5_wqe_v *wqe = NULL;
+ volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
unsigned int segs_n = 0;
struct rte_mbuf *buf = NULL;
uint8_t *raw;
@@ -389,6 +390,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
volatile rte_v128u32_t *dseg = NULL;
uint32_t length;
unsigned int ds = 0;
+ unsigned int sg = 0; /* counter of additional segs attached. */
uintptr_t addr;
uint64_t naddr;
uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
@@ -583,12 +585,14 @@ next_seg:
};
(*txq->elts)[elts_head] = buf;
elts_head = (elts_head + 1) & (elts_n - 1);
- ++j;
- --segs_n;
- if (segs_n)
+ ++sg;
+ /* Advance counter only if all segs are successfully posted. */
+ if (sg < segs_n) {
goto next_seg;
- else
+ } else {
--pkts_n;
+ j += sg;
+ }
next_pkt:
++i;
/* Initialize known and common part of the WQE structure. */
@@ -605,6 +609,8 @@ next_pkt:
(ehdr << 16) | htons(pkt_inline_sz),
};
txq->wqe_ci += (ds + 3) / 4;
+ /* Save the last successful WQE for completion request */
+ last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Increment sent bytes counter. */
txq->stats.obytes += total_length;
@@ -613,16 +619,14 @@ next_pkt:
/* Take a shortcut if nothing must be sent. */
if (unlikely(i == 0))
return 0;
+ txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
/* Check whether completion threshold has been reached. */
comp = txq->elts_comp + i + j;
if (comp >= MLX5_TX_COMP_THRESH) {
- volatile struct mlx5_wqe_ctrl *w =
- (volatile struct mlx5_wqe_ctrl *)wqe;
-
/* Request completion on last WQE. */
- w->ctrl2 = htonl(8);
+ last_wqe->ctrl2 = htonl(8);
/* Save elts_head in unused "immediate" field of WQE. */
- w->ctrl3 = elts_head;
+ last_wqe->ctrl3 = txq->elts_head;
txq->elts_comp = 0;
} else {
txq->elts_comp = comp;
@@ -632,8 +636,7 @@ next_pkt:
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
- txq->elts_head = elts_head;
+ mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
return i;
}
--
1.9.0
More information about the stable
mailing list