diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-13 15:48:00 +0200 |
commit | dbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch) | |
tree | 9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/sparc | |
parent | d6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff) | |
parent | 2291059c852706c6f5ffb400366042b7625066cd (diff) |
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar:
"This is a series kept separate from the main locking tree, which
cleans up and improves various details in the atomics type handling:
- Remove the unused atomic_or_long() method
- Consolidate and compress atomic ops implementations between
architectures, to reduce linecount and to make it easier to add new
ops.
- Rewrite generic atomic support to only require cmpxchg() from an
architecture - generate all other methods from that"
* 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits)
locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read()
locking, mips: Fix atomics
locking, sparc64: Fix atomics
locking,arch: Rewrite generic atomic support
locking,arch,xtensa: Fold atomic_ops
locking,arch,sparc: Fold atomic_ops
locking,arch,sh: Fold atomic_ops
locking,arch,powerpc: Fold atomic_ops
locking,arch,parisc: Fold atomic_ops
locking,arch,mn10300: Fold atomic_ops
locking,arch,mips: Fold atomic_ops
locking,arch,metag: Fold atomic_ops
locking,arch,m68k: Fold atomic_ops
locking,arch,m32r: Fold atomic_ops
locking,arch,ia64: Fold atomic_ops
locking,arch,hexagon: Fold atomic_ops
locking,arch,cris: Fold atomic_ops
locking,arch,avr32: Fold atomic_ops
locking,arch,arm64: Fold atomic_ops
locking,arch,arm: Fold atomic_ops
...
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/include/asm/atomic_32.h | 19 | ||||
-rw-r--r-- | arch/sparc/include/asm/atomic_64.h | 49 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/lib/atomic32.c | 29 | ||||
-rw-r--r-- | arch/sparc/lib/atomic_64.S | 163 | ||||
-rw-r--r-- | arch/sparc/lib/ksyms.c | 25 |
6 files changed, 136 insertions, 151 deletions
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7aed2be45b44..765c1776ec9f 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -20,23 +20,22 @@ #define ATOMIC_INIT(i) { (i) } -int __atomic_add_return(int, atomic_t *); +int atomic_add_return(int, atomic_t *); int atomic_cmpxchg(atomic_t *, int, int); #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) int __atomic_add_unless(atomic_t *, int, int); void atomic_set(atomic_t *, int); -#define atomic_read(v) (*(volatile int *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic_add(i, v) ((void)__atomic_add_return( (int)(i), (v))) -#define atomic_sub(i, v) ((void)__atomic_add_return(-(int)(i), (v))) -#define atomic_inc(v) ((void)__atomic_add_return( 1, (v))) -#define atomic_dec(v) ((void)__atomic_add_return( -1, (v))) +#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) +#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) +#define atomic_inc(v) ((void)atomic_add_return( 1, (v))) +#define atomic_dec(v) ((void)atomic_add_return( -1, (v))) -#define atomic_add_return(i, v) (__atomic_add_return( (int)(i), (v))) -#define atomic_sub_return(i, v) (__atomic_add_return(-(int)(i), (v))) -#define atomic_inc_return(v) (__atomic_add_return( 1, (v))) -#define atomic_dec_return(v) (__atomic_add_return( -1, (v))) +#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) +#define atomic_inc_return(v) (atomic_add_return( 1, (v))) +#define atomic_dec_return(v) (atomic_add_return( -1, (v))) #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h index bb894c8bec56..4082749913ce 100644 --- a/arch/sparc/include/asm/atomic_64.h +++ b/arch/sparc/include/asm/atomic_64.h @@ -14,33 +14,34 @@ #define ATOMIC_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) } -#define atomic_read(v) (*(volatile int *)&(v)->counter) -#define atomic64_read(v) (*(volatile long *)&(v)->counter) +#define atomic_read(v) ACCESS_ONCE((v)->counter) +#define atomic64_read(v) ACCESS_ONCE((v)->counter) #define atomic_set(v, i) (((v)->counter) = i) #define atomic64_set(v, i) (((v)->counter) = i) -void atomic_add(int, atomic_t *); -void atomic64_add(long, atomic64_t *); -void atomic_sub(int, atomic_t *); -void atomic64_sub(long, atomic64_t *); +#define ATOMIC_OP(op) \ +void atomic_##op(int, atomic_t *); \ +void atomic64_##op(long, atomic64_t *); -int atomic_add_ret(int, atomic_t *); -long atomic64_add_ret(long, atomic64_t *); -int atomic_sub_ret(int, atomic_t *); -long atomic64_sub_ret(long, atomic64_t *); +#define ATOMIC_OP_RETURN(op) \ +int atomic_##op##_return(int, atomic_t *); \ +long atomic64_##op##_return(long, atomic64_t *); -#define atomic_dec_return(v) atomic_sub_ret(1, v) -#define atomic64_dec_return(v) atomic64_sub_ret(1, v) +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) -#define atomic_inc_return(v) atomic_add_ret(1, v) -#define atomic64_inc_return(v) atomic64_add_ret(1, v) +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -#define atomic_sub_return(i, v) atomic_sub_ret(i, v) -#define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP -#define atomic_add_return(i, v) atomic_add_ret(i, v) -#define atomic64_add_return(i, v) atomic64_add_ret(i, v) +#define atomic_dec_return(v) atomic_sub_return(1, v) +#define atomic64_dec_return(v) atomic64_sub_return(1, v) + +#define atomic_inc_return(v) atomic_add_return(1, v) +#define atomic64_inc_return(v) atomic64_add_return(1, v) /* * atomic_inc_and_test - increment and test @@ -53,11 +54,11 @@ long atomic64_sub_ret(long, atomic64_t *); #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0) -#define atomic64_sub_and_test(i, v) (atomic64_sub_ret(i, v) == 0) +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +#define atomic64_sub_and_test(i, v) (atomic64_sub_return(i, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_ret(1, v) == 0) -#define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) +#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) +#define atomic64_dec_and_test(v) (atomic64_sub_return(1, v) == 0) #define atomic_inc(v) atomic_add(1, v) #define atomic64_inc(v) atomic64_add(1, v) @@ -65,8 +66,8 @@ long atomic64_sub_ret(long, atomic64_t *); #define atomic_dec(v) atomic_sub(1, v) #define atomic64_dec(v) atomic64_sub(1, v) -#define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) -#define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) +#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0) +#define atomic64_add_negative(i, v) (atomic64_add_return(i, v) < 0) #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index c9300bfaee5a..302c476413d5 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1138,7 +1138,7 @@ static unsigned long penguins_are_doing_time; void smp_capture(void) { - int result = atomic_add_ret(1, &smp_capture_depth); + int result = atomic_add_return(1, &smp_capture_depth); if (result == 1) { int ncpus = num_online_cpus(); diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 1d32b54089aa..a7c418ac26af 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c @@ -27,18 +27,23 @@ static DEFINE_SPINLOCK(dummy); #endif /* SMP */ -int __atomic_add_return(int i, atomic_t *v) -{ - int ret; - unsigned long flags; - spin_lock_irqsave(ATOMIC_HASH(v), flags); - - ret = (v->counter += i); - - spin_unlock_irqrestore(ATOMIC_HASH(v), flags); - return ret; -} -EXPORT_SYMBOL(__atomic_add_return); +#define ATOMIC_OP(op, cop) \ +int atomic_##op##_return(int i, atomic_t *v) \ +{ \ + int ret; \ + unsigned long flags; \ + spin_lock_irqsave(ATOMIC_HASH(v), flags); \ + \ + ret = (v->counter cop i); \ + \ + spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ + return ret; \ +} \ +EXPORT_SYMBOL(atomic_##op##_return); + +ATOMIC_OP(add, +=) + +#undef ATOMIC_OP int atomic_cmpxchg(atomic_t *v, int old, int new) { diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 85c233d0a340..05dac43907d1 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -14,109 +14,80 @@ * memory barriers, and a second which returns * a value and does the barriers. */ -ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: lduw [%o1], %g1 - add %g1, %o0, %g7 - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) - nop - retl - nop -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic_add) -ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: lduw [%o1], %g1 - sub %g1, %o0, %g7 - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) - nop - retl - nop -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic_sub) +#define ATOMIC_OP(op) \ +ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: lduw [%o1], %g1; \ + op %g1, %o0, %g7; \ + cas [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + nop; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(atomic_##op); \ -ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: lduw [%o1], %g1 - add %g1, %o0, %g7 - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) - add %g1, %o0, %g1 - retl - sra %g1, 0, %o0 -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic_add_ret) +#define ATOMIC_OP_RETURN(op) \ +ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: lduw [%o1], %g1; \ + op %g1, %o0, %g7; \ + cas [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %icc, BACKOFF_LABEL(2f, 1b); \ + op %g1, %o0, %g1; \ + retl; \ + sra %g1, 0, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(atomic_##op##_return); -ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: lduw [%o1], %g1 - sub %g1, %o0, %g7 - cas [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %icc, BACKOFF_LABEL(2f, 1b) - sub %g1, %o0, %g1 - retl - sra %g1, 0, %o0 -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic_sub_ret) +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) -ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: ldx [%o1], %g1 - add %g1, %o0, %g7 - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) - nop - retl - nop -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic64_add) +ATOMIC_OPS(add) +ATOMIC_OPS(sub) -ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: ldx [%o1], %g1 - sub %g1, %o0, %g7 - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) - nop - retl - nop -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic64_sub) +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP -ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: ldx [%o1], %g1 - add %g1, %o0, %g7 - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) - nop - retl - add %g1, %o0, %o0 -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic64_add_ret) +#define ATOMIC64_OP(op) \ +ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: ldx [%o1], %g1; \ + op %g1, %o0, %g7; \ + casx [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + nop; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(atomic64_##op); \ -ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */ - BACKOFF_SETUP(%o2) -1: ldx [%o1], %g1 - sub %g1, %o0, %g7 - casx [%o1], %g1, %g7 - cmp %g1, %g7 - bne,pn %xcc, BACKOFF_LABEL(2f, 1b) - nop - retl - sub %g1, %o0, %o0 -2: BACKOFF_SPIN(%o2, %o3, 1b) -ENDPROC(atomic64_sub_ret) +#define ATOMIC64_OP_RETURN(op) \ +ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ + BACKOFF_SETUP(%o2); \ +1: ldx [%o1], %g1; \ + op %g1, %o0, %g7; \ + casx [%o1], %g1, %g7; \ + cmp %g1, %g7; \ + bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \ + nop; \ + retl; \ + op %g1, %o0, %o0; \ +2: BACKOFF_SPIN(%o2, %o3, 1b); \ +ENDPROC(atomic64_##op##_return); + +#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) + +ATOMIC64_OPS(add) +ATOMIC64_OPS(sub) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ BACKOFF_SETUP(%o2) diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 323335b9cd2b..1d649a95660c 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -99,14 +99,23 @@ EXPORT_SYMBOL(___copy_in_user); EXPORT_SYMBOL(__clear_user); /* Atomic counter implementation. */ -EXPORT_SYMBOL(atomic_add); -EXPORT_SYMBOL(atomic_add_ret); -EXPORT_SYMBOL(atomic_sub); -EXPORT_SYMBOL(atomic_sub_ret); -EXPORT_SYMBOL(atomic64_add); -EXPORT_SYMBOL(atomic64_add_ret); -EXPORT_SYMBOL(atomic64_sub); -EXPORT_SYMBOL(atomic64_sub_ret); +#define ATOMIC_OP(op) \ +EXPORT_SYMBOL(atomic_##op); \ +EXPORT_SYMBOL(atomic64_##op); + +#define ATOMIC_OP_RETURN(op) \ +EXPORT_SYMBOL(atomic_##op##_return); \ +EXPORT_SYMBOL(atomic64_##op##_return); + +#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + EXPORT_SYMBOL(atomic64_dec_if_positive); /* Atomic bit operations. */ |