summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-12-11 16:05:16 +0530
committerVineet Gupta <vgupta@synopsys.com>2015-06-25 06:00:18 +0530
commita5c8b52abe677977883655166796f167ef1e0084 (patch)
treed8434f779724e4d700d1dda0f781264f24a9ba50
parent04e2eee4b02edcafce96c9c37b31b1a3318291a4 (diff)
ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock
A quad core SMP build could get into hardware livelock with concurrent LLOCK/SCOND. Workaround that by adding a PREFETCHW which is serialized by SCU (System Coherency Unit). It brings the cache line in Exclusive state and makes others invalidate their lines. This gives enough time for winner to complete the LLOCK/SCOND, before others can get the line back. The prefetchw in the ll/sc loop is not nice but this is the only software workaround for current version of RTL. Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r--arch/arc/include/asm/atomic.h14
1 files changed, 12 insertions, 2 deletions
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 20b7dc17979e..03484cb4d16d 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,13 +23,21 @@
#define atomic_set(v, i) (((v)->counter) = (i))
+#ifdef CONFIG_ISA_ARCV2
+#define PREFETCHW " prefetchw [%1] \n"
+#else
+#define PREFETCHW
+#endif
+
#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned int temp; \
\
__asm__ __volatile__( \
- "1: llock %0, [%1] \n" \
+ "1: \n" \
+ PREFETCHW \
+ " llock %0, [%1] \n" \
" " #asm_op " %0, %0, %2 \n" \
" scond %0, [%1] \n" \
" bnz 1b \n" \
@@ -50,7 +58,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
smp_mb(); \
\
__asm__ __volatile__( \
- "1: llock %0, [%1] \n" \
+ "1: \n" \
+ PREFETCHW \
+ " llock %0, [%1] \n" \
" " #asm_op " %0, %0, %2 \n" \
" scond %0, [%1] \n" \
" bnz 1b \n" \