[dpdk-stable] patch 'eventdev: use C11 atomics for lcore timer armed flag' has been queued to stable release 19.11.4

luca.boccassi at gmail.com luca.boccassi at gmail.com
Fri Jul 24 13:58:59 CEST 2020


Hi,

FYI, your patch has been queued to stable release 19.11.4

Note it hasn't been pushed to http://dpdk.org/browse/dpdk-stable yet.
It will be pushed if I get no objections before 07/26/20. So please
shout if anyone has objections.

Also note that after the patch there's a diff of the upstream commit vs the
patch applied to the branch. This will indicate if there was any rebasing
needed to apply to the stable branch. If there were code changes for rebasing
(ie: not only metadata diffs), please double check that the rebase was
correctly done.

Thanks.

Luca Boccassi

---
>From 3939291be17174407903296e4f47986515e3e9fc Mon Sep 17 00:00:00 2001
From: Phil Yang <phil.yang at arm.com>
Date: Tue, 7 Jul 2020 23:54:51 +0800
Subject: [PATCH] eventdev: use C11 atomics for lcore timer armed flag

[ upstream commit 1028d63eb214c5961b2df9d4b5662dab082b839e ]

The in_use flag is a per core variable which is not shared between
lcores in the normal case and the access of this variable should be
ordered on the same core. However, if non-EAL thread pick the highest
lcore to insert timers into, there is the possibility of conflicts
on this flag between threads. Then the atomic compare-and-swap
operation is needed.

Use the C11 atomics instead of the generic rte_atomic operations to
avoid the unnecessary barrier on aarch64.

Signed-off-by: Phil Yang <phil.yang at arm.com>
Reviewed-by: Dharmik Thakkar <dharmik.thakkar at arm.com>
Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
Acked-by: Erik Gabriel Carrillo <erik.g.carrillo at intel.com>
---
 lib/librte_eventdev/rte_event_timer_adapter.c | 15 +++++++++++----
 1 file changed, 11 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eventdev/rte_event_timer_adapter.c b/lib/librte_eventdev/rte_event_timer_adapter.c
index 76885972e..678be9f0a 100644
--- a/lib/librte_eventdev/rte_event_timer_adapter.c
+++ b/lib/librte_eventdev/rte_event_timer_adapter.c
@@ -550,7 +550,7 @@ struct swtim {
 	uint32_t timer_data_id;
 	/* Track which cores have actually armed a timer */
 	struct {
-		rte_atomic16_t v;
+		uint16_t v;
 	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
 	/* Track which cores' timer lists should be polled */
 	unsigned int poll_lcores[RTE_MAX_LCORE];
@@ -602,7 +602,8 @@ swtim_callback(struct rte_timer *tim)
 				      "with immediate expiry value");
 		}
 
-		if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore].v))) {
+		if (unlikely(sw->in_use[lcore].v == 0)) {
+			sw->in_use[lcore].v = 1;
 			n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
 						     __ATOMIC_RELAXED);
 			__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
@@ -830,7 +831,7 @@ swtim_init(struct rte_event_timer_adapter *adapter)
 
 	/* Initialize the variables that track in-use timer lists */
 	for (i = 0; i < RTE_MAX_LCORE; i++)
-		rte_atomic16_init(&sw->in_use[i].v);
+		sw->in_use[i].v = 0;
 
 	/* Initialize the timer subsystem and allocate timer data instance */
 	ret = rte_timer_subsystem_init();
@@ -1013,6 +1014,8 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 	struct rte_timer *tim, *tims[nb_evtims];
 	uint64_t cycles;
 	int n_lcores;
+	/* Timer list for this lcore is not in use. */
+	uint16_t exp_state = 0;
 
 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
 	/* Check that the service is running. */
@@ -1031,8 +1034,12 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
 	/* If this is the first time we're arming an event timer on this lcore,
 	 * mark this lcore as "in use"; this will cause the service
 	 * function to process the timer list that corresponds to this lcore.
+	 * The atomic compare-and-swap operation can prevent the race condition
+	 * on in_use flag between multiple non-EAL threads.
 	 */
-	if (unlikely(rte_atomic16_test_and_set(&sw->in_use[lcore_id].v))) {
+	if (unlikely(__atomic_compare_exchange_n(&sw->in_use[lcore_id].v,
+			&exp_state, 1, 0,
+			__ATOMIC_RELAXED, __ATOMIC_RELAXED))) {
 		EVTIM_LOG_DBG("Adding lcore id = %u to list of lcores to poll",
 			      lcore_id);
 		n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
-- 
2.20.1

---
  Diff of the applied patch vs upstream commit (please double-check if non-empty:
---
--- -	2020-07-24 12:53:52.442169935 +0100
+++ 0101-eventdev-use-C11-atomics-for-lcore-timer-armed-flag.patch	2020-07-24 12:53:48.343007503 +0100
@@ -1,8 +1,10 @@
-From 1028d63eb214c5961b2df9d4b5662dab082b839e Mon Sep 17 00:00:00 2001
+From 3939291be17174407903296e4f47986515e3e9fc Mon Sep 17 00:00:00 2001
 From: Phil Yang <phil.yang at arm.com>
 Date: Tue, 7 Jul 2020 23:54:51 +0800
 Subject: [PATCH] eventdev: use C11 atomics for lcore timer armed flag
 
+[ upstream commit 1028d63eb214c5961b2df9d4b5662dab082b839e ]
+
 The in_use flag is a per core variable which is not shared between
 lcores in the normal case and the access of this variable should be
 ordered on the same core. However, if non-EAL thread pick the highest
@@ -13,8 +15,6 @@
 Use the C11 atomics instead of the generic rte_atomic operations to
 avoid the unnecessary barrier on aarch64.
 
-Cc: stable at dpdk.org
-
 Signed-off-by: Phil Yang <phil.yang at arm.com>
 Reviewed-by: Dharmik Thakkar <dharmik.thakkar at arm.com>
 Reviewed-by: Ruifeng Wang <ruifeng.wang at arm.com>
@@ -24,10 +24,10 @@
  1 file changed, 11 insertions(+), 4 deletions(-)
 
 diff --git a/lib/librte_eventdev/rte_event_timer_adapter.c b/lib/librte_eventdev/rte_event_timer_adapter.c
-index 370ea40a7..4ed301382 100644
+index 76885972e..678be9f0a 100644
 --- a/lib/librte_eventdev/rte_event_timer_adapter.c
 +++ b/lib/librte_eventdev/rte_event_timer_adapter.c
-@@ -554,7 +554,7 @@ struct swtim {
+@@ -550,7 +550,7 @@ struct swtim {
  	uint32_t timer_data_id;
  	/* Track which cores have actually armed a timer */
  	struct {
@@ -36,7 +36,7 @@
  	} __rte_cache_aligned in_use[RTE_MAX_LCORE];
  	/* Track which cores' timer lists should be polled */
  	unsigned int poll_lcores[RTE_MAX_LCORE];
-@@ -606,7 +606,8 @@ swtim_callback(struct rte_timer *tim)
+@@ -602,7 +602,8 @@ swtim_callback(struct rte_timer *tim)
  				      "with immediate expiry value");
  		}
  
@@ -46,7 +46,7 @@
  			n_lcores = __atomic_fetch_add(&sw->n_poll_lcores, 1,
  						     __ATOMIC_RELAXED);
  			__atomic_store_n(&sw->poll_lcores[n_lcores], lcore,
-@@ -834,7 +835,7 @@ swtim_init(struct rte_event_timer_adapter *adapter)
+@@ -830,7 +831,7 @@ swtim_init(struct rte_event_timer_adapter *adapter)
  
  	/* Initialize the variables that track in-use timer lists */
  	for (i = 0; i < RTE_MAX_LCORE; i++)
@@ -55,7 +55,7 @@
  
  	/* Initialize the timer subsystem and allocate timer data instance */
  	ret = rte_timer_subsystem_init();
-@@ -1017,6 +1018,8 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
+@@ -1013,6 +1014,8 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
  	struct rte_timer *tim, *tims[nb_evtims];
  	uint64_t cycles;
  	int n_lcores;
@@ -64,7 +64,7 @@
  
  #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
  	/* Check that the service is running. */
-@@ -1035,8 +1038,12 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
+@@ -1031,8 +1034,12 @@ __swtim_arm_burst(const struct rte_event_timer_adapter *adapter,
  	/* If this is the first time we're arming an event timer on this lcore,
  	 * mark this lcore as "in use"; this will cause the service
  	 * function to process the timer list that corresponds to this lcore.


More information about the stable mailing list