diff options
author | Mark Rutland <mark.rutland@arm.com> | 2021-05-25 15:02:18 +0100 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2021-05-26 13:20:51 +0200 |
commit | f84f1b9c47a55eb8db4ba5270a504f78c316ce1d (patch) | |
tree | ff3fbe3010971e3f5a9c4c31365c24eb79d7a5cc /arch/ia64/include/uapi | |
parent | 94b63eb6e131a7fe94f1c1eb8e10162931506176 (diff) |
locking/atomic: ia64: move to ARCH_ATOMIC
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).
As a step towards that, this patch migrates ia64 to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-20-mark.rutland@arm.com
Diffstat (limited to 'arch/ia64/include/uapi')
-rw-r--r-- | arch/ia64/include/uapi/asm/cmpxchg.h | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index 5d90307fd6e0..926c6cb1e029 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -#ifndef _ASM_IA64_CMPXCHG_H -#define _ASM_IA64_CMPXCHG_H +#ifndef _UAPI_ASM_IA64_CMPXCHG_H +#define _UAPI_ASM_IA64_CMPXCHG_H /* * Compare/Exchange, forked from asm/intrinsics.h @@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void); __xchg_result; \ }) +#ifndef __KERNEL__ #define xchg(ptr, x) \ ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) +#endif /* * Atomic compare and exchange. Compare OLD with MEM, if identical, @@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void); * we had to back-pedal and keep the "legacy" behavior of a full fence :-( */ +#ifndef __KERNEL__ /* for compatibility with other platforms: */ #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg_local cmpxchg #define cmpxchg64_local cmpxchg64 +#endif #ifdef CONFIG_IA64_DEBUG_CMPXCHG # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; @@ -152,4 +156,4 @@ do { \ #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_IA64_CMPXCHG_H */ +#endif /* _UAPI_ASM_IA64_CMPXCHG_H */ |