summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2014-11-13 15:54:01 +0530
committerVineet Gupta <vgupta@synopsys.com>2015-06-25 05:59:23 +0530
commitd57f727264f1425a94689bafc7e99e502cb135b5 (patch)
treec1a73c49583de1880c14be29b09a27344f695a45
parenteaf0ecc33f82b9c46528d1646575dd8caf586a3d (diff)
ARC: add compiler barrier to LLSC based cmpxchg
When auditing cmpxchg call sites, Chuck noted that gcc was optimizing away some of the desired LDs. | do { | new = old = *ipi_data_ptr; | new |= 1U << msg; | } while (cmpxchg(ipi_data_ptr, old, new) != old); was generating to below | 8015cef8: ld r2,[r4,0] <-- First LD | 8015cefc: bset r1,r2,r1 | | 8015cf00: llock r3,[r4] <-- atomic op | 8015cf04: brne r3,r2,8015cf10 | 8015cf08: scond r1,[r4] | 8015cf0c: bnz 8015cf00 | | 8015cf10: brne r3,r2,8015cf00 <-- Branch doesn't go to orig LD Although this was fixed by adding a ACCESS_ONCE in this call site, it seems safer (for now at least) to add compiler barrier to LLSC based cmpxchg Reported-by: Chuck Jordan <cjordan@synopsys,com> Cc: <stable@vger.kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
-rw-r--r--arch/arc/include/asm/cmpxchg.h9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
index 03cd6894855d..90de5c528da2 100644
--- a/arch/arc/include/asm/cmpxchg.h
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
" scond %3, [%1] \n"
" bnz 1b \n"
"2: \n"
- : "=&r"(prev)
- : "r"(ptr), "ir"(expected),
- "r"(new) /* can't be "ir". scond can't take limm for "b" */
- : "cc");
+ : "=&r"(prev) /* Early clobber, to prevent reg reuse */
+ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
+ "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take LIMM for "b" */
+ : "cc", "memory"); /* so that gcc knows memory is being written here */
return prev;
}