[PATCH] net/ice: fix race condition for multi-cores
Simei Su
simei.su at intel.com
Wed Jun 8 04:46:01 CEST 2022
In multi-cores cases for Rx timestamp offload, to avoid phc time being
frequently overwritten, move related variables from ice_adapter to
ice_rx_queue structure, and each queue will handle timestamp calculation
by itself.
Fixes: 953e74e6b73a ("net/ice: enable Rx timestamp on flex descriptor")
Fixes: 5543827fc6df ("net/ice: improve performance of Rx timestamp offload")
Cc: stable at dpdk.org
Signed-off-by: Simei Su <simei.su at intel.com>
---
drivers/net/ice/ice_ethdev.h | 3 ---
drivers/net/ice/ice_rxtx.c | 48 ++++++++++++++++++++++----------------------
drivers/net/ice/ice_rxtx.h | 3 +++
3 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/net/ice/ice_ethdev.h b/drivers/net/ice/ice_ethdev.h
index f9f4a1c..c257bb2 100644
--- a/drivers/net/ice/ice_ethdev.h
+++ b/drivers/net/ice/ice_ethdev.h
@@ -606,9 +606,6 @@ struct ice_adapter {
struct rte_timecounter tx_tstamp_tc;
bool ptp_ena;
uint64_t time_hw;
- uint32_t hw_time_high; /* high 32 bits of timestamp */
- uint32_t hw_time_low; /* low 32 bits of timestamp */
- uint64_t hw_time_update; /* SW time of HW record updating */
struct ice_fdir_prof_info fdir_prof_info[ICE_MAX_PTGS];
struct ice_rss_prof_info rss_prof_info[ICE_MAX_PTGS];
/* True if DCF state of the associated PF is on */
diff --git a/drivers/net/ice/ice_rxtx.c b/drivers/net/ice/ice_rxtx.c
index 91cdc56..71e5c6f 100644
--- a/drivers/net/ice/ice_rxtx.c
+++ b/drivers/net/ice/ice_rxtx.c
@@ -1593,7 +1593,7 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (unlikely(sw_cur_time - ad->hw_time_update > 4))
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
is_tsinit = 1;
}
#endif
@@ -1637,16 +1637,16 @@ ice_rx_scan_hw_ring(struct ice_rx_queue *rxq)
if (unlikely(is_tsinit)) {
ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1,
rxq->time_high);
- ad->hw_time_low = (uint32_t)ts_ns;
- ad->hw_time_high = (uint32_t)(ts_ns >> 32);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
is_tsinit = false;
} else {
- if (rxq->time_high < ad->hw_time_low)
- ad->hw_time_high += 1;
- ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
- ad->hw_time_low = rxq->time_high;
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
- ad->hw_time_update = rte_get_timer_cycles() /
+ rxq->hw_time_update = rte_get_timer_cycles() /
(rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(mb,
ice_timestamp_dynfield_offset,
@@ -1859,7 +1859,7 @@ ice_recv_scattered_pkts(void *rx_queue,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (unlikely(sw_cur_time - ad->hw_time_update > 4))
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
is_tsinit = true;
}
#endif
@@ -1979,16 +1979,16 @@ ice_recv_scattered_pkts(void *rx_queue,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
if (unlikely(is_tsinit)) {
ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
- ad->hw_time_low = (uint32_t)ts_ns;
- ad->hw_time_high = (uint32_t)(ts_ns >> 32);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
is_tsinit = false;
} else {
- if (rxq->time_high < ad->hw_time_low)
- ad->hw_time_high += 1;
- ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
- ad->hw_time_low = rxq->time_high;
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
- ad->hw_time_update = rte_get_timer_cycles() /
+ rxq->hw_time_update = rte_get_timer_cycles() /
(rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
(ice_timestamp_dynfield_offset),
@@ -2369,7 +2369,7 @@ ice_recv_pkts(void *rx_queue,
if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) {
uint64_t sw_cur_time = rte_get_timer_cycles() / (rte_get_timer_hz() / 1000);
- if (unlikely(sw_cur_time - ad->hw_time_update > 4))
+ if (unlikely(sw_cur_time - rxq->hw_time_update > 4))
is_tsinit = 1;
}
#endif
@@ -2430,16 +2430,16 @@ ice_recv_pkts(void *rx_queue,
rte_le_to_cpu_32(rxd.wb.flex_ts.ts_high);
if (unlikely(is_tsinit)) {
ts_ns = ice_tstamp_convert_32b_64b(hw, ad, 1, rxq->time_high);
- ad->hw_time_low = (uint32_t)ts_ns;
- ad->hw_time_high = (uint32_t)(ts_ns >> 32);
+ rxq->hw_time_low = (uint32_t)ts_ns;
+ rxq->hw_time_high = (uint32_t)(ts_ns >> 32);
is_tsinit = false;
} else {
- if (rxq->time_high < ad->hw_time_low)
- ad->hw_time_high += 1;
- ts_ns = (uint64_t)ad->hw_time_high << 32 | rxq->time_high;
- ad->hw_time_low = rxq->time_high;
+ if (rxq->time_high < rxq->hw_time_low)
+ rxq->hw_time_high += 1;
+ ts_ns = (uint64_t)rxq->hw_time_high << 32 | rxq->time_high;
+ rxq->hw_time_low = rxq->time_high;
}
- ad->hw_time_update = rte_get_timer_cycles() /
+ rxq->hw_time_update = rte_get_timer_cycles() /
(rte_get_timer_hz() / 1000);
*RTE_MBUF_DYNFIELD(rxm,
(ice_timestamp_dynfield_offset),
diff --git a/drivers/net/ice/ice_rxtx.h b/drivers/net/ice/ice_rxtx.h
index bb18a01..f5337d5 100644
--- a/drivers/net/ice/ice_rxtx.h
+++ b/drivers/net/ice/ice_rxtx.h
@@ -95,6 +95,9 @@ struct ice_rx_queue {
uint32_t time_high;
uint32_t hw_register_set;
const struct rte_memzone *mz;
+ uint32_t hw_time_high; /* high 32 bits of timestamp */
+ uint32_t hw_time_low; /* low 32 bits of timestamp */
+ uint64_t hw_time_update; /* SW time of HW record updating */
};
struct ice_tx_entry {
--
2.9.5
More information about the stable
mailing list