[PATCH] eal: introduce atomics abstraction

Tyler Retzlaff roretzla at linux.microsoft.com
Thu Jan 12 22:26:25 CET 2023


Introduce atomics abstraction that permits optional use of standard C11
atomics when meson is provided the new enable_stdatomics=true option.

Signed-off-by: Tyler Retzlaff <roretzla at linux.microsoft.com>
---
 config/meson.build                     |  11 +++
 lib/eal/arm/include/rte_atomic_32.h    |   6 +-
 lib/eal/arm/include/rte_atomic_64.h    |   6 +-
 lib/eal/include/generic/rte_atomic.h   | 156 ++++++++++++++++++++++++++++++++-
 lib/eal/loongarch/include/rte_atomic.h |   6 +-
 lib/eal/ppc/include/rte_atomic.h       |   6 +-
 lib/eal/riscv/include/rte_atomic.h     |   6 +-
 lib/eal/x86/include/rte_atomic.h       |   8 +-
 meson_options.txt                      |   2 +
 9 files changed, 199 insertions(+), 8 deletions(-)

diff --git a/config/meson.build b/config/meson.build
index 6d9ffd4..9515ce9 100644
--- a/config/meson.build
+++ b/config/meson.build
@@ -254,6 +254,17 @@ endif
 # add -include rte_config to cflags
 add_project_arguments('-include', 'rte_config.h', language: 'c')
 
+stdc_atomics_enabled = get_option('enable_stdatomics')
+dpdk_conf.set('RTE_STDC_ATOMICS', stdc_atomics_enabled)
+
+if stdc_atomics_enabled
+if cc.get_id() == 'gcc' or cc.get_id() == 'clang'
+    add_project_arguments('-std=gnu11', language: 'c')
+else
+    add_project_arguments('-std=c11', language: 'c')
+endif
+endif
+
 # enable extra warnings and disable any unwanted warnings
 # -Wall is added by default at warning level 1, and -Wextra
 # at warning level 2 (DPDK default)
diff --git a/lib/eal/arm/include/rte_atomic_32.h b/lib/eal/arm/include/rte_atomic_32.h
index c00ab78..7088a12 100644
--- a/lib/eal/arm/include/rte_atomic_32.h
+++ b/lib/eal/arm/include/rte_atomic_32.h
@@ -34,9 +34,13 @@
 #define rte_io_rmb() rte_rmb()
 
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
+#ifdef RTE_STDC_ATOMICS
+	atomic_thread_fence(memorder);
+#else
 	__atomic_thread_fence(memorder);
+#endif
 }
 
 #ifdef __cplusplus
diff --git a/lib/eal/arm/include/rte_atomic_64.h b/lib/eal/arm/include/rte_atomic_64.h
index 6047911..7f02c57 100644
--- a/lib/eal/arm/include/rte_atomic_64.h
+++ b/lib/eal/arm/include/rte_atomic_64.h
@@ -38,9 +38,13 @@
 #define rte_io_rmb() rte_rmb()
 
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
+#ifdef RTE_STDC_ATOMICS
+	atomic_thread_fence(memorder);
+#else
 	__atomic_thread_fence(memorder);
+#endif
 }
 
 /*------------------------ 128 bit atomic operations -------------------------*/
diff --git a/lib/eal/include/generic/rte_atomic.h b/lib/eal/include/generic/rte_atomic.h
index f5c49a9..cb71c0d 100644
--- a/lib/eal/include/generic/rte_atomic.h
+++ b/lib/eal/include/generic/rte_atomic.h
@@ -110,6 +110,160 @@
 
 #endif /* __DOXYGEN__ */
 
+#ifdef RTE_STDC_ATOMICS
+
+#if !defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112L || defined(__STDC_NO_ATOMICS__)
+#error compiler does not support C11 standard atomics
+#else
+#include <stdatomic.h>
+#endif
+
+#define __rte_atomic _Atomic
+
+typedef int rte_memory_order;
+
+#define rte_memory_order_relaxed memory_order_relaxed
+#define rte_memory_order_consume memory_order_consume
+#define rte_memory_order_acquire memory_order_acquire
+#define rte_memory_order_release memory_order_release
+#define rte_memory_order_acq_rel memory_order_acq_rel
+#define rte_memory_order_seq_cst memory_order_seq_cst
+
+#define rte_atomic_store(obj, desired) \
+	atomic_store_explicit(obj, desired)
+
+#define rte_atomic_store_explicit(obj, desired, order) \
+	atomic_store_explicit(obj, desired, order)
+
+#define rte_atomic_load(obj) \
+	atomic_load_explicit(obj)
+
+#define rte_atomic_load_explicit(obj, order) \
+	atomic_load_explicit(obj, order)
+
+#define rte_atomic_exchange(obj, desired) \
+	atomic_exchange(obj, desired)
+
+#define rte_atomic_exchange_explicit(obj, desired, order) \
+	atomic_exchange_explicit(obj, desired, order)
+
+#define rte_atomic_compare_exchange_strong(obj, expected, desired) \
+	atomic_compare_exchange_strong(obj, expected, desired)
+
+#define rte_atomic_compare_exchange_strong_explicit(obj, expected, desired, success, fail) \
+	atomic_compare_exchange_strong_explicit(obj, expected, desired, success, fail)
+
+#define rte_atomic_compare_exchange_weak(obj, expected, desired) \
+	atomic_compare_exchange_weak(obj, expected, desired)
+
+#define rte_atomic_compare_exchange_weak_explicit(obj, expected, desired, success, fail) \
+	atomic_compare_exchange_weak_explicit(obj, expected, desired, success, fail)
+
+#define rte_atomic_fetch_add(obj, arg) \
+	atomic_fetch_add(obj, arg)
+
+#define rte_atomic_fetch_add_explicit(obj, arg, order) \
+	atomic_fetch_add_explicit(obj, arg, order)
+
+#define rte_atomic_fetch_sub(obj, arg) \
+	atomic_fetch_sub(obj, arg)
+
+#define rte_atomic_fetch_sub_explicit(obj, arg, order) \
+	atomic_fetch_sub_explicit(obj, arg, order)
+
+#define rte_atomic_fetch_or(obj, arg) \
+	atomic_fetch_or(obj, arg)
+
+#define rte_atomic_fetch_or_explicit(obj, arg, order) \
+	atomic_fetch_or_explicit(obj, arg, order)
+
+#define rte_atomic_fetch_xor(obj, arg) \
+	atomic_fetch_xor(obj, arg)
+
+#define rte_atomic_fetch_xor_explicit(obj, arg, order) \
+	atomic_fetch_xor_explicit(obj, arg, order)
+
+#define rte_atomic_fetch_and(obj, arg) \
+	atomic_fetch_and(obj, arg)
+
+#define rte_atomic_fetch_and_explicit(obj, arg, order) \
+	atomic_fetch_and_explicit(obj, arg, order)
+
+#else
+
+#define __rte_atomic
+
+typedef int rte_memory_order;
+
+#define rte_memory_order_relaxed __ATOMIC_RELAXED
+#define rte_memory_order_consume __ATOMIC_CONSUME
+#define rte_memory_order_acquire __ATOMIC_ACQUIRE
+#define rte_memory_order_release __ATOMIC_RELEASE
+#define rte_memory_order_acq_rel __ATOMIC_ACQ_REL
+#define rte_memory_order_seq_cst __ATOMIC_SEQ_CST
+
+#define rte_atomic_store(obj, desired) \
+	__atomic_store_n(obj, desired, rte_memory_order_seq_cst)
+
+#define rte_atomic_store_explicit(obj, desired, order) \
+	__atomic_store_n(obj, desired, order)
+
+#define rte_atomic_load(obj) \
+	__atomic_load_n(obj, rte_memory_order_seq_cst)
+
+#define rte_atomic_load_explicit(obj, order) \
+	__atomic_load_n(obj, order)
+
+#define rte_atomic_exchange(obj, desired) \
+	__atomic_exchange_n(obj, desired, rte_memory_order_seq_cst)
+
+#define rte_atomic_exchange_explicit(obj, desired, order) \
+	__atomic_exchange_n(obj, desired, order)
+
+#define rte_atomic_compare_exchange_strong(obj, expected, desired) \
+	__sync_bool_compare_and_swap(obj, expected, desired)
+
+#define rte_atomic_compare_exchange_strong_explicit(obj, expected, desired, success, fail) \
+	__atomic_compare_exchange_n(obj, expected, desired, 0, success, fail)
+
+#define rte_atomic_compare_exchange_weak(obj, expected, desired) \
+	__atomic_compare_exchange_n(obj, expected, desired, 1, rte_memory_order_seq_cst, rte_memory_order_seq_cst)
+
+#define rte_atomic_compare_exchange_weak_explicit(obj, expected, desired, success, fail) \
+	__atomic_compare_exchange_n(obj, expected, desired, 1, success, fail)
+
+#define rte_atomic_fetch_add(obj, arg) \
+	__atomic_fetch_add(obj, arg, rte_memory_order_seq_cst)
+
+#define rte_atomic_fetch_add_explicit(obj, arg, order) \
+	__atomic_fetch_add(obj, arg, order)
+
+#define rte_atomic_fetch_sub(obj, arg) \
+	__atomic_fetch_sub(obj, arg, rte_memory_order_seq_cst)
+
+#define rte_atomic_fetch_sub_explicit(obj, arg, order) \
+	__atomic_fetch_sub(obj, arg, order)
+
+#define rte_atomic_fetch_or(obj, arg) \
+	__atomic_fetch_or(obj, arg, rte_memory_order_seq_cst)
+
+#define rte_atomic_fetch_or_explicit(obj, arg, order) \
+	__atomic_fetch_or(obj, arg, order)
+
+#define rte_atomic_fetch_xor(obj, arg) \
+	__atomic_fetch_xor(obj, arg, rte_memory_order_seq_cst)
+
+#define rte_atomic_fetch_xor_explicit(obj, arg, order) \
+	__atomic_fetch_xor(obj, arg, order)
+
+#define rte_atomic_fetch_and(obj, arg) \
+	__atomic_fetch_and(obj, arg, rte_memory_order_seq_cst)
+
+#define rte_atomic_fetch_and_explicit(obj, arg, order) \
+	__atomic_fetch_and(obj, arg, order)
+
+#endif
+
 /**
  * Compiler barrier.
  *
@@ -123,7 +277,7 @@
 /**
  * Synchronization fence between threads based on the specified memory order.
  */
-static inline void rte_atomic_thread_fence(int memorder);
+static inline void rte_atomic_thread_fence(rte_memory_order memorder);
 
 /*------------------------- 16 bit atomic operations -------------------------*/
 
diff --git a/lib/eal/loongarch/include/rte_atomic.h b/lib/eal/loongarch/include/rte_atomic.h
index 3c82845..66aa0c8 100644
--- a/lib/eal/loongarch/include/rte_atomic.h
+++ b/lib/eal/loongarch/include/rte_atomic.h
@@ -35,9 +35,13 @@
 #define rte_io_rmb()	rte_mb()
 
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
+#ifdef RTE_STDC_ATOMICS
+	atomic_thread_fence(memorder);
+#else
 	__atomic_thread_fence(memorder);
+#endif
 }
 
 #ifdef __cplusplus
diff --git a/lib/eal/ppc/include/rte_atomic.h b/lib/eal/ppc/include/rte_atomic.h
index 663b4d3..a428a83 100644
--- a/lib/eal/ppc/include/rte_atomic.h
+++ b/lib/eal/ppc/include/rte_atomic.h
@@ -38,9 +38,13 @@
 #define rte_io_rmb() rte_rmb()
 
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
+#ifdef RTE_STDC_ATOMICS
+	atomic_thread_fence(memorder);
+#else
 	__atomic_thread_fence(memorder);
+#endif
 }
 
 /*------------------------- 16 bit atomic operations -------------------------*/
diff --git a/lib/eal/riscv/include/rte_atomic.h b/lib/eal/riscv/include/rte_atomic.h
index 4b4633c..3c203a9 100644
--- a/lib/eal/riscv/include/rte_atomic.h
+++ b/lib/eal/riscv/include/rte_atomic.h
@@ -40,9 +40,13 @@
 #define rte_io_rmb()	asm volatile("fence ir, ir" : : : "memory")
 
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
+#ifdef RTE_STDC_ATOMICS
+	atomic_thread_fence(memorder);
+#else
 	__atomic_thread_fence(memorder);
+#endif
 }
 
 #ifdef __cplusplus
diff --git a/lib/eal/x86/include/rte_atomic.h b/lib/eal/x86/include/rte_atomic.h
index f2ee1a9..02d8b12 100644
--- a/lib/eal/x86/include/rte_atomic.h
+++ b/lib/eal/x86/include/rte_atomic.h
@@ -87,12 +87,16 @@
  * used instead.
  */
 static __rte_always_inline void
-rte_atomic_thread_fence(int memorder)
+rte_atomic_thread_fence(rte_memory_order memorder)
 {
-	if (memorder == __ATOMIC_SEQ_CST)
+	if (memorder == rte_memory_order_seq_cst)
 		rte_smp_mb();
 	else
+#ifdef RTE_STDC_ATOMICS
+		atomic_thread_fence(memorder);
+#else
 		__atomic_thread_fence(memorder);
+#endif
 }
 
 /*------------------------- 16 bit atomic operations -------------------------*/
diff --git a/meson_options.txt b/meson_options.txt
index 0852849..acbcbb8 100644
--- a/meson_options.txt
+++ b/meson_options.txt
@@ -46,6 +46,8 @@ option('mbuf_refcnt_atomic', type: 'boolean', value: true, description:
        'Atomically access the mbuf refcnt.')
 option('platform', type: 'string', value: 'native', description:
        'Platform to build, either "native", "generic" or a SoC. Please refer to the Linux build guide for more information.')
+option('enable_stdatomics', type: 'boolean', value: false, description:
+       'enable use of standard C11 atomics.')
 option('enable_trace_fp', type: 'boolean', value: false, description:
        'enable fast path trace points.')
 option('tests', type: 'boolean', value: true, description:
-- 
1.8.3.1



More information about the dev mailing list