[dpdk-stable] [PATCH v2 3/4] eal/x86: reduce contention when retrying TSX

Bruce Richardson bruce.richardson at intel.com
Mon Nov 12 11:47:18 CET 2018


When TSX transactions abort, it is generally worth retrying a number of
times before falling back to the traditional locking path, as the
parallelism benefits from TSX can be worth it when a transaction does
succeed. For cases with multiple threads and high contention rates, it
can be useful to have increasing delays between retry attempts, so as to
avoid having the same threads repeatedly collided.

Signed-off-by: Bruce Richardson <bruce.richardson at intel.com>

---
V2: Have retry with backoff for all cases of memory conflicts, not
just those where we explicitly abort due to the lock being held.
---
 .../common/include/arch/x86/rte_spinlock.h          | 21 +++++++++++++++++----
 lib/librte_eal/linuxapp/eal/eal_alarm.c             |  2 ++
 2 files changed, 19 insertions(+), 4 deletions(-)

diff --git a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
index 60321da..e2e2b26 100644
--- a/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
+++ b/lib/librte_eal/common/include/arch/x86/rte_spinlock.h
@@ -15,8 +15,9 @@
 #include "rte_branch_prediction.h"
 #include "rte_common.h"
 #include "rte_pause.h"
+#include "rte_cycles.h"
 
-#define RTE_RTM_MAX_RETRIES (10)
+#define RTE_RTM_MAX_RETRIES (20)
 #define RTE_XABORT_LOCK_BUSY (0xff)
 
 #ifndef RTE_FORCE_INTRINSICS
@@ -76,7 +77,7 @@ static inline int rte_tm_supported(void)
 static inline int
 rte_try_tm(volatile int *lock)
 {
-	int retries;
+	int i, retries;
 
 	if (!rte_rtm_supported)
 		return 0;
@@ -96,9 +97,21 @@ static inline int rte_tm_supported(void)
 		while (*lock)
 			rte_pause();
 
-		if ((status & RTE_XABORT_EXPLICIT) &&
-			(RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))
+		if ((status & RTE_XABORT_CONFLICT) ||
+		   ((status & RTE_XABORT_EXPLICIT) &&
+		    (RTE_XABORT_CODE(status) == RTE_XABORT_LOCK_BUSY))) {
+			/* add a small delay before retrying, basing the
+			 * delay on the number of times we've already tried,
+			 * to give a back-off type of behaviour. We
+			 * randomize trycount by taking bits from the tsc count
+			 */
+			int try_count = RTE_RTM_MAX_RETRIES - retries;
+			int pause_count = (rte_rdtsc() & 0x7) | 1;
+			pause_count <<= try_count;
+			for (i = 0; i < pause_count; i++)
+				rte_pause();
 			continue;
+		}
 
 		if ((status & RTE_XABORT_RETRY) == 0) /* do not retry */
 			break;
diff --git a/lib/librte_eal/linuxapp/eal/eal_alarm.c b/lib/librte_eal/linuxapp/eal/eal_alarm.c
index 391d2a6..840ede7 100644
--- a/lib/librte_eal/linuxapp/eal/eal_alarm.c
+++ b/lib/librte_eal/linuxapp/eal/eal_alarm.c
@@ -30,7 +30,9 @@
 #define NS_PER_US 1000
 #define US_PER_MS 1000
 #define MS_PER_S 1000
+#ifndef US_PER_S
 #define US_PER_S (US_PER_MS * MS_PER_S)
+#endif
 
 #ifdef CLOCK_MONOTONIC_RAW /* Defined in glibc bits/time.h */
 #define CLOCK_TYPE_ID CLOCK_MONOTONIC_RAW
-- 
1.8.5.6



More information about the stable mailing list