[PATCH v2 04/17] eal: use previous value atomic fetch operations

Tyler Retzlaff roretzla at linux.microsoft.com
Thu Mar 2 17:18:09 CET 2023


Use __atomic_fetch_{add,and,or,sub,xor} instead of
__atomic_{add,and,or,sub,xor}_fetch when we have no interest in the
result of the operation.

Reduces unnecessary codegen that provided the result of the atomic
operation that was not used.

Change brings closer alignment with atomics available in C11 standard
and will reduce review effort when they are integrated.

Signed-off-by: Tyler Retzlaff <roretzla at linux.microsoft.com>
Acked-by: Morten Brørup <mb at smartsharesystems.com>
---
 lib/eal/common/eal_common_trace.c |  8 ++++----
 lib/eal/common/rte_service.c      |  8 ++++----
 lib/eal/ppc/include/rte_atomic.h  | 16 ++++++++--------
 3 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/lib/eal/common/eal_common_trace.c b/lib/eal/common/eal_common_trace.c
index 75162b7..cb980af 100644
--- a/lib/eal/common/eal_common_trace.c
+++ b/lib/eal/common/eal_common_trace.c
@@ -103,10 +103,10 @@ struct trace_point_head *
 trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode)
 {
 	if (mode == RTE_TRACE_MODE_OVERWRITE)
-		__atomic_and_fetch(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
+		__atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
 			__ATOMIC_RELEASE);
 	else
-		__atomic_or_fetch(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,
+		__atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,
 			__ATOMIC_RELEASE);
 }
 
@@ -155,7 +155,7 @@ rte_trace_mode rte_trace_mode_get(void)
 
 	prev = __atomic_fetch_or(t, __RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);
 	if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)
-		__atomic_add_fetch(&trace.status, 1, __ATOMIC_RELEASE);
+		__atomic_fetch_add(&trace.status, 1, __ATOMIC_RELEASE);
 	return 0;
 }
 
@@ -169,7 +169,7 @@ rte_trace_mode rte_trace_mode_get(void)
 
 	prev = __atomic_fetch_and(t, ~__RTE_TRACE_FIELD_ENABLE_MASK, __ATOMIC_RELEASE);
 	if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)
-		__atomic_sub_fetch(&trace.status, 1, __ATOMIC_RELEASE);
+		__atomic_fetch_sub(&trace.status, 1, __ATOMIC_RELEASE);
 	return 0;
 }
 
diff --git a/lib/eal/common/rte_service.c b/lib/eal/common/rte_service.c
index 42ca1d0..7ab48f2 100644
--- a/lib/eal/common/rte_service.c
+++ b/lib/eal/common/rte_service.c
@@ -464,11 +464,11 @@ struct core_state {
 	/* Increment num_mapped_cores to reflect that this core is
 	 * now mapped capable of running the service.
 	 */
-	__atomic_add_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+	__atomic_fetch_add(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
 
 	int ret = service_run(id, cs, UINT64_MAX, s, serialize_mt_unsafe);
 
-	__atomic_sub_fetch(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
+	__atomic_fetch_sub(&s->num_mapped_cores, 1, __ATOMIC_RELAXED);
 
 	return ret;
 }
@@ -638,12 +638,12 @@ struct core_state {
 
 		if (*set && !lcore_mapped) {
 			lcore_states[lcore].service_mask |= sid_mask;
-			__atomic_add_fetch(&rte_services[sid].num_mapped_cores,
+			__atomic_fetch_add(&rte_services[sid].num_mapped_cores,
 				1, __ATOMIC_RELAXED);
 		}
 		if (!*set && lcore_mapped) {
 			lcore_states[lcore].service_mask &= ~(sid_mask);
-			__atomic_sub_fetch(&rte_services[sid].num_mapped_cores,
+			__atomic_fetch_sub(&rte_services[sid].num_mapped_cores,
 				1, __ATOMIC_RELAXED);
 		}
 	}
diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h
index 663b4d3..2ab735b 100644
--- a/lib/eal/ppc/include/rte_atomic.h
+++ b/lib/eal/ppc/include/rte_atomic.h
@@ -60,13 +60,13 @@ static inline int rte_atomic16_test_and_set(rte_atomic16_t *v)
 static inline void
 rte_atomic16_inc(rte_atomic16_t *v)
 {
-	__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline void
 rte_atomic16_dec(rte_atomic16_t *v)
 {
-	__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
@@ -102,13 +102,13 @@ static inline int rte_atomic32_test_and_set(rte_atomic32_t *v)
 static inline void
 rte_atomic32_inc(rte_atomic32_t *v)
 {
-	__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline void
 rte_atomic32_dec(rte_atomic32_t *v)
 {
-	__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
@@ -157,25 +157,25 @@ static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 static inline void
 rte_atomic64_add(rte_atomic64_t *v, int64_t inc)
 {
-	__atomic_add_fetch(&v->cnt, inc, __ATOMIC_ACQUIRE);
+	__atomic_fetch_add(&v->cnt, inc, __ATOMIC_ACQUIRE);
 }
 
 static inline void
 rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
 {
-	__atomic_sub_fetch(&v->cnt, dec, __ATOMIC_ACQUIRE);
+	__atomic_fetch_sub(&v->cnt, dec, __ATOMIC_ACQUIRE);
 }
 
 static inline void
 rte_atomic64_inc(rte_atomic64_t *v)
 {
-	__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_add(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline void
 rte_atomic64_dec(rte_atomic64_t *v)
 {
-	__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE);
+	__atomic_fetch_sub(&v->cnt, 1, __ATOMIC_ACQUIRE);
 }
 
 static inline int64_t
-- 
1.8.3.1



More information about the dev mailing list