diff options
Diffstat (limited to 'arch/arm64/include')
49 files changed, 1534 insertions, 678 deletions
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 406485ed110a..208cec08a74f 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h @@ -12,11 +12,11 @@ #ifndef _ASM_ACPI_H #define _ASM_ACPI_H -#include <linux/mm.h> #include <linux/irqchip/arm-gic-acpi.h> +#include <linux/mm.h> +#include <linux/psci.h> #include <asm/cputype.h> -#include <asm/psci.h> #include <asm/smp_plat.h> /* Macros for consistency checks of the GICC subtable of MADT */ diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h index c385a0c4057f..d56ec0715157 100644 --- a/arch/arm64/include/asm/alternative.h +++ b/arch/arm64/include/asm/alternative.h @@ -3,6 +3,8 @@ #ifndef __ASSEMBLY__ +#include <linux/init.h> +#include <linux/kconfig.h> #include <linux/types.h> #include <linux/stddef.h> #include <linux/stringify.h> @@ -15,7 +17,7 @@ struct alt_instr { u8 alt_len; /* size of new instruction(s), <= orig_len */ }; -void apply_alternatives_all(void); +void __init apply_alternatives_all(void); void apply_alternatives(void *start, size_t length); void free_alternatives_memory(void); @@ -40,7 +42,8 @@ void free_alternatives_memory(void); * be fixed in a binutils release posterior to 2.25.51.0.2 (anything * containing commit 4e4d08cf7399b606 or c1baaddf8861). */ -#define ALTERNATIVE(oldinstr, newinstr, feature) \ +#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \ + ".if "__stringify(cfg_enabled)" == 1\n" \ "661:\n\t" \ oldinstr "\n" \ "662:\n" \ @@ -53,7 +56,11 @@ void free_alternatives_memory(void); "664:\n\t" \ ".popsection\n\t" \ ".org . - (664b-663b) + (662b-661b)\n\t" \ - ".org . - (662b-661b) + (664b-663b)\n" + ".org . - (662b-661b) + (664b-663b)\n" \ + ".endif\n" + +#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \ + __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg)) #else @@ -65,7 +72,8 @@ void free_alternatives_memory(void); .byte \alt_len .endm -.macro alternative_insn insn1 insn2 cap +.macro alternative_insn insn1, insn2, cap, enable = 1 + .if \enable 661: \insn1 662: .pushsection .altinstructions, "a" altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f @@ -75,8 +83,70 @@ void free_alternatives_memory(void); 664: .popsection .org . - (664b-663b) + (662b-661b) .org . - (662b-661b) + (664b-663b) + .endif +.endm + +/* + * Begin an alternative code sequence. + * + * The code that follows this macro will be assembled and linked as + * normal. There are no restrictions on this code. + */ +.macro alternative_if_not cap, enable = 1 + .if \enable + .pushsection .altinstructions, "a" + altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f + .popsection +661: + .endif +.endm + +/* + * Provide the alternative code sequence. + * + * The code that follows this macro is assembled into a special + * section to be used for dynamic patching. Code that follows this + * macro must: + * + * 1. Be exactly the same length (in bytes) as the default code + * sequence. + * + * 2. Not contain a branch target that is used outside of the + * alternative sequence it is defined in (branches into an + * alternative sequence are not fixed up). + */ +.macro alternative_else, enable = 1 + .if \enable +662: .pushsection .altinstr_replacement, "ax" +663: + .endif .endm +/* + * Complete an alternative code sequence. + */ +.macro alternative_endif, enable = 1 + .if \enable +664: .popsection + .org . - (664b-663b) + (662b-661b) + .org . - (662b-661b) + (664b-663b) + .endif +.endm + +#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \ + alternative_insn insn1, insn2, cap, IS_ENABLED(cfg) + + #endif /* __ASSEMBLY__ */ +/* + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature)); + * + * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO)); + * N.B. If CONFIG_FOO is specified, but not selected, the whole block + * will be omitted, including oldinstr. + */ +#define ALTERNATIVE(oldinstr, newinstr, ...) \ + _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1) + #endif /* __ASM_ALTERNATIVE_H */ diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 144b64ad96c3..b51f2cc22ca9 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -50,18 +50,6 @@ .endm /* - * Save/disable and restore interrupts. - */ - .macro save_and_disable_irqs, olddaif - mrs \olddaif, daif - disable_irq - .endm - - .macro restore_irqs, olddaif - msr daif, \olddaif - .endm - -/* * Enable and disable debug exceptions. */ .macro disable_dbg @@ -103,9 +91,7 @@ * SMP data memory barrier */ .macro smp_dmb, opt -#ifdef CONFIG_SMP dmb \opt -#endif .endm #define USER(l, x...) \ diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 7047051ded40..35a67783cfa0 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -24,233 +24,72 @@ #include <linux/types.h> #include <asm/barrier.h> -#include <asm/cmpxchg.h> - -#define ATOMIC_INIT(i) { (i) } +#include <asm/lse.h> #ifdef __KERNEL__ -/* - * On ARM, ordinary assignment (str instruction) doesn't clear the local - * strex/ldrex monitor on some implementations. The reason we can use it for - * atomic_set() is the clrex or dummy strex done on every exception return. - */ -#define atomic_read(v) ACCESS_ONCE((v)->counter) -#define atomic_set(v,i) (((v)->counter) = (i)) - -/* - * AArch64 UP and SMP safe atomic ops. We use load exclusive and - * store exclusive to ensure that these are atomic. We may loop - * to ensure that the update happens. - */ - -#define ATOMIC_OP(op, asm_op) \ -static inline void atomic_##op(int i, atomic_t *v) \ -{ \ - unsigned long tmp; \ - int result; \ - \ - asm volatile("// atomic_" #op "\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ +#define __ARM64_IN_ATOMIC_IMPL -#define ATOMIC_OP_RETURN(op, asm_op) \ -static inline int atomic_##op##_return(int i, atomic_t *v) \ -{ \ - unsigned long tmp; \ - int result; \ - \ - asm volatile("// atomic_" #op "_return\n" \ -"1: ldxr %w0, %2\n" \ -" " #asm_op " %w0, %w0, %w3\n" \ -" stlxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ - : "memory"); \ - \ - smp_mb(); \ - return result; \ -} - -#define ATOMIC_OPS(op, asm_op) \ - ATOMIC_OP(op, asm_op) \ - ATOMIC_OP_RETURN(op, asm_op) - -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, sub) - -#undef ATOMIC_OPS -#undef ATOMIC_OP_RETURN -#undef ATOMIC_OP - -static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) -{ - unsigned long tmp; - int oldval; - - smp_mb(); - - asm volatile("// atomic_cmpxchg\n" -"1: ldxr %w1, %2\n" -" cmp %w1, %w3\n" -" b.ne 2f\n" -" stxr %w0, %w4, %2\n" -" cbnz %w0, 1b\n" -"2:" - : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); - - smp_mb(); - return oldval; -} - -#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) +#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) +#include <asm/atomic_lse.h> +#else +#include <asm/atomic_ll_sc.h> +#endif -static inline int __atomic_add_unless(atomic_t *v, int a, int u) -{ - int c, old; +#undef __ARM64_IN_ATOMIC_IMPL - c = atomic_read(v); - while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) - c = old; - return c; -} +#include <asm/cmpxchg.h> -#define atomic_inc(v) atomic_add(1, v) -#define atomic_dec(v) atomic_sub(1, v) +#define ___atomic_add_unless(v, a, u, sfx) \ +({ \ + typeof((v)->counter) c, old; \ + \ + c = atomic##sfx##_read(v); \ + while (c != (u) && \ + (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \ + c = old; \ + c; \ + }) -#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0) -#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0) -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) -#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0) +#define ATOMIC_INIT(i) { (i) } -#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0) +#define atomic_read(v) READ_ONCE((v)->counter) +#define atomic_set(v, i) (((v)->counter) = (i)) +#define atomic_xchg(v, new) xchg(&((v)->counter), (new)) +#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new)) + +#define atomic_inc(v) atomic_add(1, (v)) +#define atomic_dec(v) atomic_sub(1, (v)) +#define atomic_inc_return(v) atomic_add_return(1, (v)) +#define atomic_dec_return(v) atomic_sub_return(1, (v)) +#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0) +#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0) +#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0) +#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0) +#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,) +#define atomic_andnot atomic_andnot /* * 64-bit atomic operations. */ -#define ATOMIC64_INIT(i) { (i) } - -#define atomic64_read(v) ACCESS_ONCE((v)->counter) -#define atomic64_set(v,i) (((v)->counter) = (i)) - -#define ATOMIC64_OP(op, asm_op) \ -static inline void atomic64_##op(long i, atomic64_t *v) \ -{ \ - long result; \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ -} \ - -#define ATOMIC64_OP_RETURN(op, asm_op) \ -static inline long atomic64_##op##_return(long i, atomic64_t *v) \ -{ \ - long result; \ - unsigned long tmp; \ - \ - asm volatile("// atomic64_" #op "_return\n" \ -"1: ldxr %0, %2\n" \ -" " #asm_op " %0, %0, %3\n" \ -" stlxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ - : "memory"); \ - \ - smp_mb(); \ - return result; \ -} - -#define ATOMIC64_OPS(op, asm_op) \ - ATOMIC64_OP(op, asm_op) \ - ATOMIC64_OP_RETURN(op, asm_op) - -ATOMIC64_OPS(add, add) -ATOMIC64_OPS(sub, sub) - -#undef ATOMIC64_OPS -#undef ATOMIC64_OP_RETURN -#undef ATOMIC64_OP - -static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new) -{ - long oldval; - unsigned long res; - - smp_mb(); - - asm volatile("// atomic64_cmpxchg\n" -"1: ldxr %1, %2\n" -" cmp %1, %3\n" -" b.ne 2f\n" -" stxr %w0, %4, %2\n" -" cbnz %w0, 1b\n" -"2:" - : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) - : "Ir" (old), "r" (new) - : "cc"); - - smp_mb(); - return oldval; -} - -#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) - -static inline long atomic64_dec_if_positive(atomic64_t *v) -{ - long result; - unsigned long tmp; - - asm volatile("// atomic64_dec_if_positive\n" -"1: ldxr %0, %2\n" -" subs %0, %0, #1\n" -" b.mi 2f\n" -" stlxr %w1, %0, %2\n" -" cbnz %w1, 1b\n" -" dmb ish\n" -"2:" - : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) - : - : "cc", "memory"); - - return result; -} - -static inline int atomic64_add_unless(atomic64_t *v, long a, long u) -{ - long c, old; - - c = atomic64_read(v); - while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c) - c = old; - - return c != u; -} - -#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0) -#define atomic64_inc(v) atomic64_add(1LL, (v)) -#define atomic64_inc_return(v) atomic64_add_return(1LL, (v)) +#define ATOMIC64_INIT ATOMIC_INIT +#define atomic64_read atomic_read +#define atomic64_set atomic_set +#define atomic64_xchg atomic_xchg +#define atomic64_cmpxchg atomic_cmpxchg + +#define atomic64_inc(v) atomic64_add(1, (v)) +#define atomic64_dec(v) atomic64_sub(1, (v)) +#define atomic64_inc_return(v) atomic64_add_return(1, (v)) +#define atomic64_dec_return(v) atomic64_sub_return(1, (v)) #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0) -#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0) -#define atomic64_dec(v) atomic64_sub(1LL, (v)) -#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v)) -#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0) -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL) +#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) +#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0) +#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0) +#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u) +#define atomic64_andnot atomic64_andnot + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #endif #endif diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h new file mode 100644 index 000000000000..b3b5c4ae3800 --- /dev/null +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -0,0 +1,247 @@ +/* + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ATOMIC_LL_SC_H +#define __ASM_ATOMIC_LL_SC_H + +#ifndef __ARM64_IN_ATOMIC_IMPL +#error "please don't include this file directly" +#endif + +/* + * AArch64 UP and SMP safe atomic ops. We use load exclusive and + * store exclusive to ensure that these are atomic. We may loop + * to ensure that the update happens. + * + * NOTE: these functions do *not* follow the PCS and must explicitly + * save any clobbered registers other than x0 (regardless of return + * value). This is achieved through -fcall-saved-* compiler flags for + * this file, which unfortunately don't work on a per-function basis + * (the optimize attribute silently ignores these options). + */ + +#define ATOMIC_OP(op, asm_op) \ +__LL_SC_INLINE void \ +__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + asm volatile("// atomic_" #op "\n" \ +" prfm pstl1strm, %2\n" \ +"1: ldxr %w0, %2\n" \ +" " #asm_op " %w0, %w0, %w3\n" \ +" stxr %w1, %w0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i)); \ +} \ +__LL_SC_EXPORT(atomic_##op); + +#define ATOMIC_OP_RETURN(op, asm_op) \ +__LL_SC_INLINE int \ +__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \ +{ \ + unsigned long tmp; \ + int result; \ + \ + asm volatile("// atomic_" #op "_return\n" \ +" prfm pstl1strm, %2\n" \ +"1: ldxr %w0, %2\n" \ +" " #asm_op " %w0, %w0, %w3\n" \ +" stlxr %w1, %w0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i) \ + : "memory"); \ + \ + smp_mb(); \ + return result; \ +} \ +__LL_SC_EXPORT(atomic_##op##_return); + +#define ATOMIC_OPS(op, asm_op) \ + ATOMIC_OP(op, asm_op) \ + ATOMIC_OP_RETURN(op, asm_op) + +ATOMIC_OPS(add, add) +ATOMIC_OPS(sub, sub) + +ATOMIC_OP(and, and) +ATOMIC_OP(andnot, bic) +ATOMIC_OP(or, orr) +ATOMIC_OP(xor, eor) + +#undef ATOMIC_OPS +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define ATOMIC64_OP(op, asm_op) \ +__LL_SC_INLINE void \ +__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \ +{ \ + long result; \ + unsigned long tmp; \ + \ + asm volatile("// atomic64_" #op "\n" \ +" prfm pstl1strm, %2\n" \ +"1: ldxr %0, %2\n" \ +" " #asm_op " %0, %0, %3\n" \ +" stxr %w1, %0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i)); \ +} \ +__LL_SC_EXPORT(atomic64_##op); + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +__LL_SC_INLINE long \ +__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \ +{ \ + long result; \ + unsigned long tmp; \ + \ + asm volatile("// atomic64_" #op "_return\n" \ +" prfm pstl1strm, %2\n" \ +"1: ldxr %0, %2\n" \ +" " #asm_op " %0, %0, %3\n" \ +" stlxr %w1, %0, %2\n" \ +" cbnz %w1, 1b" \ + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ + : "Ir" (i) \ + : "memory"); \ + \ + smp_mb(); \ + return result; \ +} \ +__LL_SC_EXPORT(atomic64_##op##_return); + +#define ATOMIC64_OPS(op, asm_op) \ + ATOMIC64_OP(op, asm_op) \ + ATOMIC64_OP_RETURN(op, asm_op) + +ATOMIC64_OPS(add, add) +ATOMIC64_OPS(sub, sub) + +ATOMIC64_OP(and, and) +ATOMIC64_OP(andnot, bic) +ATOMIC64_OP(or, orr) +ATOMIC64_OP(xor, eor) + +#undef ATOMIC64_OPS +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP + +__LL_SC_INLINE long +__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v)) +{ + long result; + unsigned long tmp; + + asm volatile("// atomic64_dec_if_positive\n" +" prfm pstl1strm, %2\n" +"1: ldxr %0, %2\n" +" subs %0, %0, #1\n" +" b.lt 2f\n" +" stlxr %w1, %0, %2\n" +" cbnz %w1, 1b\n" +" dmb ish\n" +"2:" + : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) + : + : "cc", "memory"); + + return result; +} +__LL_SC_EXPORT(atomic64_dec_if_positive); + +#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \ +__LL_SC_INLINE unsigned long \ +__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \ + unsigned long old, \ + unsigned long new)) \ +{ \ + unsigned long tmp, oldval; \ + \ + asm volatile( \ + " prfm pstl1strm, %[v]\n" \ + "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ + " cbnz %" #w "[tmp], 2f\n" \ + " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \ + " cbnz %w[tmp], 1b\n" \ + " " #mb "\n" \ + " mov %" #w "[oldval], %" #w "[old]\n" \ + "2:" \ + : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ + [v] "+Q" (*(unsigned long *)ptr) \ + : [old] "Lr" (old), [new] "r" (new) \ + : cl); \ + \ + return oldval; \ +} \ +__LL_SC_EXPORT(__cmpxchg_case_##name); + +__CMPXCHG_CASE(w, b, 1, , , ) +__CMPXCHG_CASE(w, h, 2, , , ) +__CMPXCHG_CASE(w, , 4, , , ) +__CMPXCHG_CASE( , , 8, , , ) +__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory") +__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory") +__CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory") +__CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory") + +#undef __CMPXCHG_CASE + +#define __CMPXCHG_DBL(name, mb, rel, cl) \ +__LL_SC_INLINE int \ +__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr)) \ +{ \ + unsigned long tmp, ret; \ + \ + asm volatile("// __cmpxchg_double" #name "\n" \ + " prfm pstl1strm, %2\n" \ + "1: ldxp %0, %1, %2\n" \ + " eor %0, %0, %3\n" \ + " eor %1, %1, %4\n" \ + " orr %1, %0, %1\n" \ + " cbnz %1, 2f\n" \ + " st" #rel "xp %w0, %5, %6, %2\n" \ + " cbnz %w0, 1b\n" \ + " " #mb "\n" \ + "2:" \ + : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ + : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ + : cl); \ + \ + return ret; \ +} \ +__LL_SC_EXPORT(__cmpxchg_double##name); + +__CMPXCHG_DBL( , , , ) +__CMPXCHG_DBL(_mb, dmb ish, l, "memory") + +#undef __CMPXCHG_DBL + +#endif /* __ASM_ATOMIC_LL_SC_H */ diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h new file mode 100644 index 000000000000..55d740e63459 --- /dev/null +++ b/arch/arm64/include/asm/atomic_lse.h @@ -0,0 +1,391 @@ +/* + * Based on arch/arm/include/asm/atomic.h + * + * Copyright (C) 1996 Russell King. + * Copyright (C) 2002 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ASM_ATOMIC_LSE_H +#define __ASM_ATOMIC_LSE_H + +#ifndef __ARM64_IN_ATOMIC_IMPL +#error "please don't include this file directly" +#endif + +#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op) + +static inline void atomic_andnot(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot), + " stclr %w[i], %[v]\n") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic_or(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or), + " stset %w[i], %[v]\n") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic_xor(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor), + " steor %w[i], %[v]\n") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic_add(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add), + " stadd %w[i], %[v]\n") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline int atomic_add_return(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC(add_return), + /* LSE atomics */ + " ldaddal %w[i], w30, %[v]\n" + " add %w[i], %w[i], w30") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30", "memory"); + + return w0; +} + +static inline void atomic_and(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC(and), + /* LSE atomics */ + " mvn %w[i], %w[i]\n" + " stclr %w[i], %[v]") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic_sub(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC(sub), + /* LSE atomics */ + " neg %w[i], %w[i]\n" + " stadd %w[i], %[v]") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline int atomic_sub_return(int i, atomic_t *v) +{ + register int w0 asm ("w0") = i; + register atomic_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC(sub_return) + " nop", + /* LSE atomics */ + " neg %w[i], %w[i]\n" + " ldaddal %w[i], w30, %[v]\n" + " add %w[i], %w[i], w30") + : [i] "+r" (w0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30", "memory"); + + return w0; +} + +#undef __LL_SC_ATOMIC + +#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op) + +static inline void atomic64_andnot(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot), + " stclr %[i], %[v]\n") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic64_or(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or), + " stset %[i], %[v]\n") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic64_xor(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor), + " steor %[i], %[v]\n") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic64_add(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add), + " stadd %[i], %[v]\n") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline long atomic64_add_return(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC64(add_return), + /* LSE atomics */ + " ldaddal %[i], x30, %[v]\n" + " add %[i], %[i], x30") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30", "memory"); + + return x0; +} + +static inline void atomic64_and(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC64(and), + /* LSE atomics */ + " mvn %[i], %[i]\n" + " stclr %[i], %[v]") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline void atomic64_sub(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC64(sub), + /* LSE atomics */ + " neg %[i], %[i]\n" + " stadd %[i], %[v]") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30"); +} + +static inline long atomic64_sub_return(long i, atomic64_t *v) +{ + register long x0 asm ("x0") = i; + register atomic64_t *x1 asm ("x1") = v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC64(sub_return) + " nop", + /* LSE atomics */ + " neg %[i], %[i]\n" + " ldaddal %[i], x30, %[v]\n" + " add %[i], %[i], x30") + : [i] "+r" (x0), [v] "+Q" (v->counter) + : "r" (x1) + : "x30", "memory"); + + return x0; +} + +static inline long atomic64_dec_if_positive(atomic64_t *v) +{ + register long x0 asm ("x0") = (long)v; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " nop\n" + __LL_SC_ATOMIC64(dec_if_positive) + " nop\n" + " nop\n" + " nop\n" + " nop\n" + " nop", + /* LSE atomics */ + "1: ldr x30, %[v]\n" + " subs %[ret], x30, #1\n" + " b.lt 2f\n" + " casal x30, %[ret], %[v]\n" + " sub x30, x30, #1\n" + " sub x30, x30, %[ret]\n" + " cbnz x30, 1b\n" + "2:") + : [ret] "+&r" (x0), [v] "+Q" (v->counter) + : + : "x30", "cc", "memory"); + + return x0; +} + +#undef __LL_SC_ATOMIC64 + +#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) + +#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \ +static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \ + unsigned long old, \ + unsigned long new) \ +{ \ + register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ + register unsigned long x1 asm ("x1") = old; \ + register unsigned long x2 asm ("x2") = new; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + " nop\n" \ + __LL_SC_CMPXCHG(name) \ + " nop", \ + /* LSE atomics */ \ + " mov " #w "30, %" #w "[old]\n" \ + " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \ + " mov %" #w "[ret], " #w "30") \ + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ + : [old] "r" (x1), [new] "r" (x2) \ + : "x30" , ##cl); \ + \ + return x0; \ +} + +__CMPXCHG_CASE(w, b, 1, ) +__CMPXCHG_CASE(w, h, 2, ) +__CMPXCHG_CASE(w, , 4, ) +__CMPXCHG_CASE(x, , 8, ) +__CMPXCHG_CASE(w, b, mb_1, al, "memory") +__CMPXCHG_CASE(w, h, mb_2, al, "memory") +__CMPXCHG_CASE(w, , mb_4, al, "memory") +__CMPXCHG_CASE(x, , mb_8, al, "memory") + +#undef __LL_SC_CMPXCHG +#undef __CMPXCHG_CASE + +#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) + +#define __CMPXCHG_DBL(name, mb, cl...) \ +static inline int __cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr) \ +{ \ + unsigned long oldval1 = old1; \ + unsigned long oldval2 = old2; \ + register unsigned long x0 asm ("x0") = old1; \ + register unsigned long x1 asm ("x1") = old2; \ + register unsigned long x2 asm ("x2") = new1; \ + register unsigned long x3 asm ("x3") = new2; \ + register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ + \ + asm volatile(ARM64_LSE_ATOMIC_INSN( \ + /* LL/SC */ \ + " nop\n" \ + " nop\n" \ + " nop\n" \ + __LL_SC_CMPXCHG_DBL(name), \ + /* LSE atomics */ \ + " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ + " eor %[old1], %[old1], %[oldval1]\n" \ + " eor %[old2], %[old2], %[oldval2]\n" \ + " orr %[old1], %[old1], %[old2]") \ + : [old1] "+r" (x0), [old2] "+r" (x1), \ + [v] "+Q" (*(unsigned long *)ptr) \ + : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ + [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ + : "x30" , ##cl); \ + \ + return x0; \ +} + +__CMPXCHG_DBL( , ) +__CMPXCHG_DBL(_mb, al, "memory") + +#undef __LL_SC_CMPXCHG_DBL +#undef __CMPXCHG_DBL + +#endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 0fa47c4275cb..624f9679f4b0 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -35,28 +35,6 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) -#ifndef CONFIG_SMP -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() - -#define smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ - ACCESS_ONCE(*p) = (v); \ -} while (0) - -#define smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = ACCESS_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - barrier(); \ - ___p1; \ -}) - -#else - #define smp_mb() dmb(ish) #define smp_rmb() dmb(ishld) #define smp_wmb() dmb(ishst) @@ -109,8 +87,6 @@ do { \ ___p1; \ }) -#endif - #define read_barrier_depends() do { } while(0) #define smp_read_barrier_depends() do { } while(0) diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h new file mode 100644 index 000000000000..4a748ce9ba1a --- /dev/null +++ b/arch/arm64/include/asm/bug.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2015 ARM Limited + * Author: Dave Martin <Dave.Martin@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _ARCH_ARM64_ASM_BUG_H +#define _ARCH_ARM64_ASM_BUG_H + +#include <asm/debug-monitors.h> + +#ifdef CONFIG_GENERIC_BUG +#define HAVE_ARCH_BUG + +#ifdef CONFIG_DEBUG_BUGVERBOSE +#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line) +#define __BUGVERBOSE_LOCATION(file, line) \ + ".pushsection .rodata.str,\"aMS\",@progbits,1\n" \ + "2: .string \"" file "\"\n\t" \ + ".popsection\n\t" \ + \ + ".long 2b - 0b\n\t" \ + ".short " #line "\n\t" +#else +#define _BUGVERBOSE_LOCATION(file, line) +#endif + +#define _BUG_FLAGS(flags) __BUG_FLAGS(flags) + +#define __BUG_FLAGS(flags) asm volatile ( \ + ".pushsection __bug_table,\"a\"\n\t" \ + ".align 2\n\t" \ + "0: .long 1f - 0b\n\t" \ +_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \ + ".short " #flags "\n\t" \ + ".popsection\n" \ + \ + "1: brk %[imm]" \ + :: [imm] "i" (BUG_BRK_IMM) \ +) + +#define BUG() do { \ + _BUG_FLAGS(0); \ + unreachable(); \ +} while (0) + +#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint)) + +#endif /* ! CONFIG_GENERIC_BUG */ + +#include <asm-generic/bug.h> + +#endif /* ! _ARCH_ARM64_ASM_BUG_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index d8c25b7b18fb..899e9f1d19e4 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -21,7 +21,9 @@ #include <linux/bug.h> #include <linux/mmdebug.h> +#include <asm/atomic.h> #include <asm/barrier.h> +#include <asm/lse.h> static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) { @@ -29,37 +31,73 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size switch (size) { case 1: - asm volatile("// __xchg1\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " prfm pstl1strm, %2\n" "1: ldxrb %w0, %2\n" " stlxrb %w1, %w3, %2\n" " cbnz %w1, 1b\n" + " dmb ish", + /* LSE atomics */ + " nop\n" + " nop\n" + " swpalb %w3, %w0, %2\n" + " nop\n" + " nop") : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "r" (x) : "memory"); break; case 2: - asm volatile("// __xchg2\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " prfm pstl1strm, %2\n" "1: ldxrh %w0, %2\n" " stlxrh %w1, %w3, %2\n" " cbnz %w1, 1b\n" + " dmb ish", + /* LSE atomics */ + " nop\n" + " nop\n" + " swpalh %w3, %w0, %2\n" + " nop\n" + " nop") : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "r" (x) : "memory"); break; case 4: - asm volatile("// __xchg4\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " prfm pstl1strm, %2\n" "1: ldxr %w0, %2\n" " stlxr %w1, %w3, %2\n" " cbnz %w1, 1b\n" + " dmb ish", + /* LSE atomics */ + " nop\n" + " nop\n" + " swpal %w3, %w0, %2\n" + " nop\n" + " nop") : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "r" (x) : "memory"); break; case 8: - asm volatile("// __xchg8\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " stlxr %w1, %3, %2\n" " cbnz %w1, 1b\n" + " dmb ish", + /* LSE atomics */ + " nop\n" + " nop\n" + " swpal %3, %0, %2\n" + " nop\n" + " nop") : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "r" (x) : "memory"); @@ -68,7 +106,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size BUILD_BUG(); } - smp_mb(); return ret; } @@ -83,131 +120,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) { - unsigned long oldval = 0, res; - switch (size) { case 1: - do { - asm volatile("// __cmpxchg1\n" - " ldxrb %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxrb %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_1(ptr, (u8)old, new); case 2: - do { - asm volatile("// __cmpxchg2\n" - " ldxrh %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxrh %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_2(ptr, (u16)old, new); case 4: - do { - asm volatile("// __cmpxchg4\n" - " ldxr %w1, %2\n" - " mov %w0, #0\n" - " cmp %w1, %w3\n" - " b.ne 1f\n" - " stxr %w0, %w4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_4(ptr, old, new); case 8: - do { - asm volatile("// __cmpxchg8\n" - " ldxr %1, %2\n" - " mov %w0, #0\n" - " cmp %1, %3\n" - " b.ne 1f\n" - " stxr %w0, %4, %2\n" - "1:\n" - : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr) - : "Ir" (old), "r" (new) - : "cc"); - } while (res); - break; - + return __cmpxchg_case_8(ptr, old, new); default: BUILD_BUG(); } - return oldval; + unreachable(); } -#define system_has_cmpxchg_double() 1 - -static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2, - unsigned long old1, unsigned long old2, - unsigned long new1, unsigned long new2, int size) +static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, + unsigned long new, int size) { - unsigned long loop, lost; - switch (size) { + case 1: + return __cmpxchg_case_mb_1(ptr, (u8)old, new); + case 2: + return __cmpxchg_case_mb_2(ptr, (u16)old, new); + case 4: + return __cmpxchg_case_mb_4(ptr, old, new); case 8: - VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1); - do { - asm volatile("// __cmpxchg_double8\n" - " ldxp %0, %1, %2\n" - " eor %0, %0, %3\n" - " eor %1, %1, %4\n" - " orr %1, %0, %1\n" - " mov %w0, #0\n" - " cbnz %1, 1f\n" - " stxp %w0, %5, %6, %2\n" - "1:\n" - : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1) - : "r" (old1), "r"(old2), "r"(new1), "r"(new2)); - } while (loop); - break; + return __cmpxchg_case_mb_8(ptr, old, new); default: BUILD_BUG(); } - return !lost; -} - -static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2, - unsigned long old1, unsigned long old2, - unsigned long new1, unsigned long new2, int size) -{ - int ret; - - smp_mb(); - ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size); - smp_mb(); - - return ret; -} - -static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, - unsigned long new, int size) -{ - unsigned long ret; - - smp_mb(); - ret = __cmpxchg(ptr, old, new, size); - smp_mb(); - - return ret; + unreachable(); } #define cmpxchg(ptr, o, n) \ @@ -228,21 +173,32 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, __ret; \ }) +#define system_has_cmpxchg_double() 1 + +#define __cmpxchg_double_check(ptr1, ptr2) \ +({ \ + if (sizeof(*(ptr1)) != 8) \ + BUILD_BUG(); \ + VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \ +}) + #define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \ ({\ int __ret;\ - __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \ - (unsigned long)(o2), (unsigned long)(n1), \ - (unsigned long)(n2), sizeof(*(ptr1)));\ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ __ret; \ }) #define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \ ({\ int __ret;\ - __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \ - (unsigned long)(o2), (unsigned long)(n1), \ - (unsigned long)(n2), sizeof(*(ptr1)));\ + __cmpxchg_double_check(ptr1, ptr2); \ + __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \ + (unsigned long)(n1), (unsigned long)(n2), \ + ptr1); \ __ret; \ }) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c1044218a63a..171570702bb8 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -25,15 +25,20 @@ #define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1 #define ARM64_WORKAROUND_845719 2 #define ARM64_HAS_SYSREG_GIC_CPUIF 3 +#define ARM64_HAS_PAN 4 +#define ARM64_HAS_LSE_ATOMICS 5 -#define ARM64_NCAPS 4 +#define ARM64_NCAPS 6 #ifndef __ASSEMBLY__ +#include <linux/kernel.h> + struct arm64_cpu_capabilities { const char *desc; u16 capability; bool (*matches)(const struct arm64_cpu_capabilities *); + void (*enable)(void); union { struct { /* To be used for erratum handling only */ u32 midr_model; @@ -41,8 +46,8 @@ struct arm64_cpu_capabilities { }; struct { /* Feature register checking */ - u64 register_mask; - u64 register_value; + int field_pos; + int min_field_value; }; }; }; @@ -70,6 +75,13 @@ static inline void cpus_set_cap(unsigned int num) __set_bit(num, cpu_hwcaps); } +static inline int __attribute_const__ cpuid_feature_extract_field(u64 features, + int field) +{ + return (s64)(features << (64 - 4 - field)) >> (64 - 4); +} + + void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, const char *info); void check_local_cpu_errata(void); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index a84ec605bed8..ee6403df9fe4 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -81,9 +81,6 @@ #define ID_AA64MMFR0_BIGEND(mmfr0) \ (((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT) -#define SCTLR_EL1_CP15BEN (0x1 << 5) -#define SCTLR_EL1_SED (0x1 << 8) - #ifndef __ASSEMBLY__ /* diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 40ec68aa6870..279c85b5ec09 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -18,6 +18,12 @@ #ifdef __KERNEL__ +#include <linux/errno.h> +#include <linux/types.h> +#include <asm/esr.h> +#include <asm/insn.h> +#include <asm/ptrace.h> + /* Low-level stepping controls. */ #define DBG_MDSCR_SS (1 << 0) #define DBG_SPSR_SS (1 << 21) @@ -38,12 +44,7 @@ /* * Break point instruction encoding */ -#define BREAK_INSTR_SIZE 4 - -/* - * ESR values expected for dynamic and compile time BRK instruction - */ -#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff)) +#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE /* * #imm16 values used for BRK instruction generation @@ -51,10 +52,12 @@ * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction * 0x401: for compile time BRK instruction + * 0x800: kernel-mode BUG() and WARN() traps */ #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 +#define BUG_BRK_IMM 0x800 /* * BRK instruction encoding @@ -68,25 +71,10 @@ */ #define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5)) -/* - * Extract byte from BRK instruction - */ -#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \ - ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff) - -/* - * Extract byte from BRK #imm16 - */ -#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \ - (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff) - -#define KGDB_DYN_DBG_BRK_BYTE(x) \ - (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x)) - -#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0) -#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1) -#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2) -#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3) +#define AARCH64_BREAK_KGDB_DYN_DBG \ + (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) +#define KGDB_DYN_BRK_INS_BYTE(x) \ + ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff) #define CACHE_FLUSH_IS_SAFE 1 @@ -127,13 +115,13 @@ void unregister_break_hook(struct break_hook *hook); u8 debug_monitors_arch(void); -enum debug_el { +enum dbg_active_el { DBG_ACTIVE_EL0 = 0, DBG_ACTIVE_EL1, }; -void enable_debug_monitors(enum debug_el el); -void disable_debug_monitors(enum debug_el el); +void enable_debug_monitors(enum dbg_active_el el); +void disable_debug_monitors(enum dbg_active_el el); void user_rewind_single_step(struct task_struct *task); void user_fastforward_single_step(struct task_struct *task); diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index f0d6d0bfe55c..cfdb34bedbcd 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -22,8 +22,6 @@ #include <linux/types.h> #include <linux/vmalloc.h> -#include <asm-generic/dma-coherent.h> - #include <xen/xen.h> #include <asm/xen/hypervisor.h> @@ -86,28 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) return (phys_addr_t)dev_addr; } -static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - debug_dma_mapping_error(dev, dev_addr); - return ops->mapping_error(dev, dev_addr); -} - -static inline int dma_supported(struct device *dev, u64 mask) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - return ops->dma_supported(dev, mask); -} - -static inline int dma_set_mask(struct device *dev, u64 mask) -{ - if (!dev->dma_mask || !dma_supported(dev, mask)) - return -EIO; - *dev->dma_mask = mask; - - return 0; -} - static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { if (!dev->dma_mask) @@ -120,50 +96,5 @@ static inline void dma_mark_clean(void *addr, size_t size) { } -#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) -#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL) - -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - void *vaddr; - - if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr)) - return vaddr; - - vaddr = ops->alloc(dev, size, dma_handle, flags, attrs); - debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr); - return vaddr; -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *vaddr, dma_addr_t dev_addr, - struct dma_attrs *attrs) -{ - struct dma_map_ops *ops = get_dma_ops(dev); - - if (dma_release_from_coherent(dev, get_order(size), vaddr)) - return; - - debug_dma_free_coherent(dev, size, vaddr, dev_addr); - ops->free(dev, size, vaddr, dev_addr, attrs); -} - -/* - * There is no dma_cache_sync() implementation, so just return NULL here. - */ -static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t flags) -{ - return NULL; -} - -static inline void dma_free_noncoherent(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle) -{ -} - #endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 70522450ca23..77eeb2cc648f 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -18,6 +18,8 @@ #ifndef __ASM_ESR_H #define __ASM_ESR_H +#include <asm/memory.h> + #define ESR_ELx_EC_UNKNOWN (0x00) #define ESR_ELx_EC_WFx (0x01) /* Unallocated EC: 0x02 */ @@ -99,6 +101,13 @@ #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) #define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) +/* ESR value templates for specific events */ + +/* BRK instruction trap from AArch64 state */ +#define ESR_ELx_VAL_BRK64(imm) \ + ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \ + ((imm) & 0xffff)) + #ifndef __ASSEMBLY__ #include <asm/types.h> diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 0303705fcad6..6cb7e1a6bc02 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -18,7 +18,13 @@ #ifndef __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H +#include <linux/ftrace.h> + #define __exception __attribute__((section(".exception.text"))) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +#define __exception_irq_entry __irq_entry +#else #define __exception_irq_entry __exception +#endif #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h index c0739187a920..8b9884c726ad 100644 --- a/arch/arm64/include/asm/fixmap.h +++ b/arch/arm64/include/asm/fixmap.h @@ -8,7 +8,7 @@ * Copyright (C) 1998 Ingo Molnar * Copyright (C) 2013 Mark Salter <msalter@redhat.com> * - * Adapted from arch/x86_64 version. + * Adapted from arch/x86 version. * */ diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 74069b3bd919..007a69fc4f40 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -20,10 +20,17 @@ #include <linux/futex.h> #include <linux/uaccess.h> + +#include <asm/alternative.h> +#include <asm/cpufeature.h> #include <asm/errno.h> +#include <asm/sysreg.h> #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ asm volatile( \ + ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) \ +" prfm pstl1strm, %2\n" \ "1: ldxr %w1, %2\n" \ insn "\n" \ "2: stlxr %w3, %w0, %2\n" \ @@ -39,6 +46,8 @@ " .align 3\n" \ " .quad 1b, 4b, 2b, 4b\n" \ " .popsection\n" \ + ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "r" (oparg), "Ir" (-EFAULT) \ : "memory") @@ -112,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; asm volatile("// futex_atomic_cmpxchg_inatomic\n" +" prfm pstl1strm, %2\n" "1: ldxr %w1, %2\n" " sub %w3, %w1, %w4\n" " cbnz %w3, 3f\n" diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 6aae421f4d73..2bb7009bdac7 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -24,9 +24,7 @@ typedef struct { unsigned int __softirq_pending; -#ifdef CONFIG_SMP unsigned int ipi_irqs[NR_IPI]; -#endif } ____cacheline_aligned irq_cpustat_t; #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ @@ -34,10 +32,8 @@ typedef struct { #define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++ #define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member) -#ifdef CONFIG_SMP u64 smp_irq_stat_cpu(unsigned int cpu); #define arch_irq_stat_cpu smp_irq_stat_cpu -#endif #define __ARCH_IRQ_EXIT_IRQS_DISABLED 1 diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 2fd9b14ca295..bb4052e85dba 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -13,10 +13,6 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifndef __ASM_HUGETLB_H diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 52b484b6aa1a..4c47cb2fbb52 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -16,6 +16,8 @@ #ifndef __ASM_HW_BREAKPOINT_H #define __ASM_HW_BREAKPOINT_H +#include <asm/cputype.h> + #ifdef __KERNEL__ struct arch_hw_breakpoint_ctrl { @@ -132,5 +134,17 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task) extern struct pmu perf_ops_bp; +/* Determine number of BRP registers available. */ +static inline int get_num_brps(void) +{ + return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1; +} + +/* Determine number of WRP registers available. */ +static inline int get_num_wrps(void) +{ + return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1; +} + #endif /* __KERNEL__ */ #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h index b4f6b19a8a68..8e24ef3f7c82 100644 --- a/arch/arm64/include/asm/irq_work.h +++ b/arch/arm64/include/asm/irq_work.h @@ -1,8 +1,6 @@ #ifndef __ASM_IRQ_WORK_H #define __ASM_IRQ_WORK_H -#ifdef CONFIG_SMP - #include <asm/smp.h> static inline bool arch_irq_work_has_interrupt(void) @@ -10,13 +8,4 @@ static inline bool arch_irq_work_has_interrupt(void) return !!__smp_cross_call; } -#else - -static inline bool arch_irq_work_has_interrupt(void) -{ - return false; -} - -#endif - #endif /* __ASM_IRQ_WORK_H */ diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h index c0e5165c2f76..1b5e0e843c3a 100644 --- a/arch/arm64/include/asm/jump_label.h +++ b/arch/arm64/include/asm/jump_label.h @@ -26,14 +26,28 @@ #define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE -static __always_inline bool arch_static_branch(struct static_key *key) +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm goto("1: nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" ".align 3\n\t" ".quad 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" - : : "i"(key) : : l_yes); + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm goto("1: b %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %c0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); return false; l_yes: diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index ac6fafb95fe7..7605e095217f 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -171,10 +171,13 @@ #define HSTR_EL2_TTEE (1 << 16) #define HSTR_EL2_T(x) (1 << x) +/* Hyp Coproccessor Trap Register Shifts */ +#define CPTR_EL2_TFP_SHIFT 10 + /* Hyp Coprocessor Trap Register */ #define CPTR_EL2_TCPAC (1 << 31) #define CPTR_EL2_TTA (1 << 20) -#define CPTR_EL2_TFP (1 << 10) +#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) /* Hyp Debug Configuration Register bits */ #define MDCR_EL2_TDRA (1 << 11) diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 3c5fe685a2d6..67fa0de3d483 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -46,24 +46,16 @@ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ #define PAR_EL1 21 /* Physical Address Register */ #define MDSCR_EL1 22 /* Monitor Debug System Control Register */ -#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */ -#define DBGBCR15_EL1 38 -#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */ -#define DBGBVR15_EL1 54 -#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */ -#define DBGWCR15_EL1 70 -#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */ -#define DBGWVR15_EL1 86 -#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */ +#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */ /* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 88 /* Domain Access Control Register */ -#define IFSR32_EL2 89 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */ -#define TEECR32_EL1 92 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 94 +#define DACR32_EL2 24 /* Domain Access Control Register */ +#define IFSR32_EL2 25 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */ +#define TEECR32_EL1 28 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 29 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 30 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ @@ -132,6 +124,8 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); +extern u32 __kvm_get_mdcr_el2(void); + #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 2709db2a7eac..415938dc45cf 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -103,15 +103,34 @@ struct kvm_vcpu_arch { /* HYP configuration */ u64 hcr_el2; + u32 mdcr_el2; /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Debug state */ + /* Guest debug state */ u64 debug_flags; + /* + * We maintain more than a single set of debug registers to support + * debugging the guest from the host and to maintain separate host and + * guest state during world switches. vcpu_debug_state are the debug + * registers of the vcpu as the guest sees them. host_debug_state are + * the host registers which are saved and restored during + * world switches. external_debug_state contains the debug + * values we want to debug the guest. This is set via the + * KVM_SET_GUEST_DEBUG ioctl. + * + * debug_ptr points to the set of debug registers that should be loaded + * onto the hardware when running the guest. + */ + struct kvm_guest_debug_arch *debug_ptr; + struct kvm_guest_debug_arch vcpu_debug_state; + struct kvm_guest_debug_arch external_debug_state; + /* Pointer to host CPU context */ kvm_cpu_context_t *host_cpu_context; + struct kvm_guest_debug_arch host_debug_state; /* VGIC state */ struct vgic_cpu vgic_cpu; @@ -122,6 +141,17 @@ struct kvm_vcpu_arch { * here. */ + /* + * Guest registers we preserve during guest debugging. + * + * These shadow registers are updated by the kvm_handle_sys_reg + * trap handler if the guest accesses or updates them while we + * are using guest debug. + */ + struct { + u32 mdscr_el1; + } guest_debug_preserved; + /* Don't run the guest */ bool pause; @@ -216,15 +246,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, hyp_stack_ptr, vector_ptr); } -struct vgic_sr_vectors { - void *save_vgic; - void *restore_vgic; -}; - static inline void kvm_arch_hardware_disable(void) {} static inline void kvm_arch_hardware_unsetup(void) {} static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +void kvm_arm_init_debug(void); +void kvm_arm_setup_debug(struct kvm_vcpu *vcpu); +void kvm_arm_clear_debug(struct kvm_vcpu *vcpu); +void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu); + #endif /* __ARM64_KVM_HOST_H__ */ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h new file mode 100644 index 000000000000..3de42d68611d --- /dev/null +++ b/arch/arm64/include/asm/lse.h @@ -0,0 +1,53 @@ +#ifndef __ASM_LSE_H +#define __ASM_LSE_H + +#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) + +#include <linux/stringify.h> +#include <asm/alternative.h> +#include <asm/cpufeature.h> + +#ifdef __ASSEMBLER__ + +.arch_extension lse + +.macro alt_lse, llsc, lse + alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS +.endm + +#else /* __ASSEMBLER__ */ + +__asm__(".arch_extension lse"); + +/* Move the ll/sc atomics out-of-line */ +#define __LL_SC_INLINE +#define __LL_SC_PREFIX(x) __ll_sc_##x +#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x)) + +/* Macro for constructing calls to out-of-line ll/sc atomics */ +#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n" + +/* In-line patching at runtime */ +#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ + ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) + +#endif /* __ASSEMBLER__ */ +#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ + +#ifdef __ASSEMBLER__ + +.macro alt_lse, llsc, lse + \llsc +.endm + +#else /* __ASSEMBLER__ */ + +#define __LL_SC_INLINE static inline +#define __LL_SC_PREFIX(x) x +#define __LL_SC_EXPORT(x) + +#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc + +#endif /* __ASSEMBLER__ */ +#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +#endif /* __ASM_LSE_H */ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index f800d45ea226..6b4c3ad75a2a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -81,12 +81,6 @@ #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET)) /* - * Convert a physical address to a Page Frame Number and back - */ -#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT)) -#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT) - -/* * Convert a page to/from a physical address */ #define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page))) @@ -114,6 +108,14 @@ extern phys_addr_t memstart_addr; #define PHYS_OFFSET ({ memstart_addr; }) /* + * The maximum physical address that the linear direct mapping + * of system RAM can cover. (PAGE_OFFSET can be interpreted as + * a 2's complement signed quantity and negated to derive the + * maximum size of the linear mapping.) + */ +#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; }) + +/* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. * diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 79fcfb048884..030208767185 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -28,7 +28,6 @@ typedef struct { #define ASID(mm) ((mm)->context.id & 0xffff) extern void paging_init(void); -extern void setup_mm_for_reboot(void); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void init_mem_pgprot(void); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 4fde8c1df97f..0a456bef8c79 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h @@ -16,8 +16,6 @@ #ifndef __ASM_PERCPU_H #define __ASM_PERCPU_H -#ifdef CONFIG_SMP - static inline void set_my_cpu_offset(unsigned long off) { asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); @@ -38,12 +36,6 @@ static inline unsigned long __my_cpu_offset(void) } #define __my_cpu_offset __my_cpu_offset() -#else /* !CONFIG_SMP */ - -#define set_my_cpu_offset(x) do { } while (0) - -#endif /* CONFIG_SMP */ - #define PERCPU_OP(op, asm_op) \ static inline unsigned long __percpu_##op(void *ptr, \ unsigned long val, int size) \ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 6471773db6fd..7bd3cdb533ea 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -17,7 +17,7 @@ #ifndef __ASM_PERF_EVENT_H #define __ASM_PERF_EVENT_H -#ifdef CONFIG_HW_PERF_EVENTS +#ifdef CONFIG_PERF_EVENTS struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); extern unsigned long perf_misc_flags(struct pt_regs *regs); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 59bfae75dc98..24154b055835 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -104,6 +104,7 @@ #define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ #define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ #define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */ +#define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */ #define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */ #define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */ @@ -168,5 +169,7 @@ #define TCR_TG1_64K (UL(3) << 30) #define TCR_ASID16 (UL(1) << 36) #define TCR_TBI0 (UL(1) << 37) +#define TCR_HA (UL(1) << 39) +#define TCR_HD (UL(1) << 40) #endif diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 56283f8a675c..6900b2d95371 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -16,6 +16,7 @@ #ifndef __ASM_PGTABLE_H #define __ASM_PGTABLE_H +#include <asm/bug.h> #include <asm/proc-fns.h> #include <asm/memory.h> @@ -27,7 +28,11 @@ #define PTE_VALID (_AT(pteval_t, 1) << 0) #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) +#ifdef CONFIG_ARM64_HW_AFDBM +#define PTE_WRITE (PTE_DBM) /* same as DBM */ +#else #define PTE_WRITE (_AT(pteval_t, 1) << 57) +#endif #define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */ /* @@ -48,18 +53,16 @@ #define FIRST_USER_ADDRESS 0UL #ifndef __ASSEMBLY__ + +#include <linux/mmdebug.h> + extern void __pte_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); extern void __pgd_error(const char *file, int line, unsigned long val); -#ifdef CONFIG_SMP #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#else -#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF) -#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF) -#endif #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) @@ -137,12 +140,20 @@ extern struct page *empty_zero_page; * The following only work if pte_present(). Undefined behaviour otherwise. */ #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) -#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) +#ifdef CONFIG_ARM64_HW_AFDBM +#define pte_hw_dirty(pte) (!(pte_val(pte) & PTE_RDONLY)) +#else +#define pte_hw_dirty(pte) (0) +#endif +#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) +#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) + +#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) #define pte_valid_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) #define pte_valid_not_user(pte) \ @@ -209,20 +220,49 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } } +struct mm_struct; +struct vm_area_struct; + extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); +/* + * PTE bits configuration in the presence of hardware Dirty Bit Management + * (PTE_WRITE == PTE_DBM): + * + * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) + * 0 0 | 1 0 0 + * 0 1 | 1 1 0 + * 1 0 | 1 0 1 + * 1 1 | 0 1 x + * + * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via + * the page fault mechanism. Checking the dirty status of a pte becomes: + * + * PTE_DIRTY || !PTE_RDONLY + */ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { if (pte_valid_user(pte)) { if (!pte_special(pte) && pte_exec(pte)) __sync_icache_dcache(pte, addr); - if (pte_dirty(pte) && pte_write(pte)) + if (pte_sw_dirty(pte) && pte_write(pte)) pte_val(pte) &= ~PTE_RDONLY; else pte_val(pte) |= PTE_RDONLY; } + /* + * If the existing pte is valid, check for potential race with + * hardware updates of the pte (ptep_set_access_flags safely changes + * valid ptes without going through an invalid entry). + */ + if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) && + pte_valid(*ptep)) { + BUG_ON(!pte_young(pte)); + BUG_ON(pte_write(*ptep) && !pte_dirty(pte)); + } + set_pte(ptep, pte); } @@ -461,6 +501,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; + /* preserve the hardware dirty information */ + if (pte_hw_dirty(pte)) + newprot |= PTE_DIRTY; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } @@ -470,6 +513,101 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); } +#ifdef CONFIG_ARM64_HW_AFDBM +/* + * Atomic pte/pmd modifications. + */ +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + pteval_t pteval; + unsigned int tmp, res; + + asm volatile("// ptep_test_and_clear_young\n" + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n" + " and %0, %0, %4 // clear PTE_AF\n" + " stxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res) + : "L" (~PTE_AF), "I" (ilog2(PTE_AF))); + + return res; +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pmd_t *pmdp) +{ + return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + pteval_t old_pteval; + unsigned int tmp; + + asm volatile("// ptep_get_and_clear\n" + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " stxr %w1, xzr, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))); + + return __pte(old_pteval); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* + * ptep_set_wrprotect - mark read-only while trasferring potential hardware + * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. + */ +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) +{ + pteval_t pteval; + unsigned long tmp; + + asm volatile("// ptep_set_wrprotect\n" + " prfm pstl1strm, %2\n" + "1: ldxr %0, %2\n" + " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n" + " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n" + " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n" + " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n" + " stxr %w1, %0, %2\n" + " cbnz %w1, 1b\n" + : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)) + : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE) + : "cc"); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + ptep_set_wrprotect(mm, address, (pte_t *)pmdp); +} +#endif +#endif /* CONFIG_ARM64_HW_AFDBM */ + extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; @@ -505,6 +643,21 @@ extern int kern_addr_valid(unsigned long addr); #define pgtable_cache_init() do { } while (0) +/* + * On AArch64, the cache coherency is handled via the set_pte_at() function. + */ +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* + * set_pte() does not have a DSB for user mappings, so make sure that + * the page table write is visible. + */ + dsb(ishst); +} + +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + #endif /* !__ASSEMBLY__ */ #endif /* __ASM_PGTABLE_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index e4c893e54f01..98f32355dc97 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x) #endif +void cpu_enable_pan(void); + #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h deleted file mode 100644 index 49d7e1aaebdc..000000000000 --- a/arch/arm64/include/asm/psci.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * Copyright (C) 2013 ARM Limited - */ - -#ifndef __ASM_PSCI_H -#define __ASM_PSCI_H - -int __init psci_dt_init(void); - -#ifdef CONFIG_ACPI -int __init psci_acpi_init(void); -bool __init acpi_psci_present(void); -bool __init acpi_psci_use_hvc(void); -#else -static inline int psci_acpi_init(void) { return 0; } -static inline bool acpi_psci_present(void) { return false; } -#endif - -#endif /* __ASM_PSCI_H */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index d6dd9fdbc3be..536274ed292e 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -183,11 +183,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs) #define instruction_pointer(regs) ((unsigned long)(regs)->pc) -#ifdef CONFIG_SMP extern unsigned long profile_pc(struct pt_regs *regs); -#else -#define profile_pc(regs) instruction_pointer(regs) -#endif #endif /* __ASSEMBLY__ */ #endif diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h index db02be81b90a..d9c3d6a6100a 100644 --- a/arch/arm64/include/asm/smp.h +++ b/arch/arm64/include/asm/smp.h @@ -20,10 +20,6 @@ #include <linux/cpumask.h> #include <linux/thread_info.h> -#ifndef CONFIG_SMP -# error "<asm/smp.h> included in non-SMP build" -#endif - #define raw_smp_processor_id() (current_thread_info()->cpu) struct seq_file; diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h index 7abf7570c00f..af58dcdefb21 100644 --- a/arch/arm64/include/asm/smp_plat.h +++ b/arch/arm64/include/asm/smp_plat.h @@ -56,6 +56,4 @@ static inline int get_logical_index(u64 mpidr) return -EINVAL; } -void __init do_post_cpus_up_work(void); - #endif /* __ASM_SMP_PLAT_H */ diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h index cee128732435..c85e96d174a5 100644 --- a/arch/arm64/include/asm/spinlock.h +++ b/arch/arm64/include/asm/spinlock.h @@ -16,6 +16,7 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H +#include <asm/lse.h> #include <asm/spinlock_types.h> #include <asm/processor.h> @@ -38,11 +39,21 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) asm volatile( /* Atomically increment the next ticket. */ + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ " prfm pstl1strm, %3\n" "1: ldaxr %w0, %3\n" " add %w1, %w0, %w5\n" " stxr %w2, %w1, %3\n" -" cbnz %w2, 1b\n" +" cbnz %w2, 1b\n", + /* LSE atomics */ +" mov %w2, %w5\n" +" ldadda %w2, %w0, %3\n" +" nop\n" +" nop\n" +" nop\n" + ) + /* Did we get the lock? */ " eor %w1, %w0, %w0, ror #16\n" " cbz %w1, 3f\n" @@ -67,15 +78,25 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) unsigned int tmp; arch_spinlock_t lockval; - asm volatile( -" prfm pstl1strm, %2\n" -"1: ldaxr %w0, %2\n" -" eor %w1, %w0, %w0, ror #16\n" -" cbnz %w1, 2f\n" -" add %w0, %w0, %3\n" -" stxr %w1, %w0, %2\n" -" cbnz %w1, 1b\n" -"2:" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " prfm pstl1strm, %2\n" + "1: ldaxr %w0, %2\n" + " eor %w1, %w0, %w0, ror #16\n" + " cbnz %w1, 2f\n" + " add %w0, %w0, %3\n" + " stxr %w1, %w0, %2\n" + " cbnz %w1, 1b\n" + "2:", + /* LSE atomics */ + " ldr %w0, %2\n" + " eor %w1, %w0, %w0, ror #16\n" + " cbnz %w1, 1f\n" + " add %w1, %w0, %3\n" + " casa %w0, %w1, %2\n" + " and %w1, %w1, #0xffff\n" + " eor %w1, %w1, %w0, lsr #16\n" + "1:") : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock) : "I" (1 << TICKET_SHIFT) : "memory"); @@ -85,10 +106,19 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock) { - asm volatile( -" stlrh %w1, %0\n" - : "=Q" (lock->owner) - : "r" (lock->owner + 1) + unsigned long tmp; + + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " ldrh %w1, %0\n" + " add %w1, %w1, #1\n" + " stlrh %w1, %0", + /* LSE atomics */ + " mov %w1, #1\n" + " nop\n" + " staddlh %w1, %0") + : "=Q" (lock->owner), "=&r" (tmp) + : : "memory"); } @@ -123,13 +153,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int tmp; - asm volatile( + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ " sevl\n" "1: wfe\n" "2: ldaxr %w0, %1\n" " cbnz %w0, 1b\n" " stxr %w0, %w2, %1\n" " cbnz %w0, 2b\n" + " nop", + /* LSE atomics */ + "1: mov %w0, wzr\n" + "2: casa %w0, %w2, %1\n" + " cbz %w0, 3f\n" + " ldxr %w0, %1\n" + " cbz %w0, 2b\n" + " wfe\n" + " b 1b\n" + "3:") : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) : "memory"); @@ -139,11 +180,18 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; - asm volatile( - " ldaxr %w0, %1\n" - " cbnz %w0, 1f\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + "1: ldaxr %w0, %1\n" + " cbnz %w0, 2f\n" " stxr %w0, %w2, %1\n" - "1:\n" + " cbnz %w0, 1b\n" + "2:", + /* LSE atomics */ + " mov %w0, wzr\n" + " casa %w0, %w2, %1\n" + " nop\n" + " nop") : "=&r" (tmp), "+Q" (rw->lock) : "r" (0x80000000) : "memory"); @@ -153,9 +201,10 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) static inline void arch_write_unlock(arch_rwlock_t *rw) { - asm volatile( - " stlr %w1, %0\n" - : "=Q" (rw->lock) : "r" (0) : "memory"); + asm volatile(ARM64_LSE_ATOMIC_INSN( + " stlr wzr, %0", + " swpl wzr, wzr, %0") + : "=Q" (rw->lock) :: "memory"); } /* write_can_lock - would write_trylock() succeed? */ @@ -172,6 +221,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) * * The memory barriers are implicit with the load-acquire and store-release * instructions. + * + * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC + * and LSE implementations may exhibit different behaviour (although this + * will have no effect on lockdep). */ static inline void arch_read_lock(arch_rwlock_t *rw) { @@ -179,26 +232,43 @@ static inline void arch_read_lock(arch_rwlock_t *rw) asm volatile( " sevl\n" + ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ "1: wfe\n" "2: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" " tbnz %w0, #31, 1b\n" " stxr %w1, %w0, %2\n" - " cbnz %w1, 2b\n" + " nop\n" + " cbnz %w1, 2b", + /* LSE atomics */ + "1: wfe\n" + "2: ldxr %w0, %2\n" + " adds %w1, %w0, #1\n" + " tbnz %w1, #31, 1b\n" + " casa %w0, %w1, %2\n" + " sbc %w0, %w1, %w0\n" + " cbnz %w0, 2b") : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "memory"); + : "cc", "memory"); } static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp, tmp2; - asm volatile( + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ "1: ldxr %w0, %2\n" " sub %w0, %w0, #1\n" " stlxr %w1, %w0, %2\n" - " cbnz %w1, 1b\n" + " cbnz %w1, 1b", + /* LSE atomics */ + " movn %w0, #0\n" + " nop\n" + " nop\n" + " staddl %w0, %2") : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : : "memory"); @@ -206,17 +276,28 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw) { - unsigned int tmp, tmp2 = 1; + unsigned int tmp, tmp2; - asm volatile( - " ldaxr %w0, %2\n" + asm volatile(ARM64_LSE_ATOMIC_INSN( + /* LL/SC */ + " mov %w1, #1\n" + "1: ldaxr %w0, %2\n" " add %w0, %w0, #1\n" - " tbnz %w0, #31, 1f\n" + " tbnz %w0, #31, 2f\n" " stxr %w1, %w0, %2\n" - "1:\n" - : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) + " cbnz %w1, 1b\n" + "2:", + /* LSE atomics */ + " ldr %w0, %2\n" + " adds %w1, %w0, #1\n" + " tbnz %w1, #31, 1f\n" + " casa %w0, %w1, %2\n" + " sbc %w1, %w1, %w0\n" + " nop\n" + "1:") + : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : - : "memory"); + : "cc", "memory"); return !tmp2; } diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h index b8d383665f56..55be59a35e3f 100644 --- a/arch/arm64/include/asm/spinlock_types.h +++ b/arch/arm64/include/asm/spinlock_types.h @@ -20,6 +20,8 @@ # error "please don't include this file directly" #endif +#include <linux/types.h> + #define TICKET_SHIFT 16 typedef struct { diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 5c89df0acbcb..a7f3d4b2514d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -20,8 +20,29 @@ #ifndef __ASM_SYSREG_H #define __ASM_SYSREG_H +#include <asm/opcodes.h> + +#define SCTLR_EL1_CP15BEN (0x1 << 5) +#define SCTLR_EL1_SED (0x1 << 8) + +/* + * ARMv8 ARM reserves the following encoding for system registers: + * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", + * C5.2, version:ARM DDI 0487A.f) + * [20-19] : Op0 + * [18-16] : Op1 + * [15-12] : CRn + * [11-8] : CRm + * [7-5] : Op2 + */ #define sys_reg(op0, op1, crn, crm, op2) \ - ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) + ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) + +#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4) +#define SCTLR_EL1_SPAN (1 << 23) + +#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\ + (!!x)<<8 | 0x1f) #ifdef __ASSEMBLY__ @@ -31,11 +52,11 @@ .equ __reg_num_xzr, 31 .macro mrs_s, rt, sreg - .inst 0xd5300000|(\sreg)|(__reg_num_\rt) + .inst 0xd5200000|(\sreg)|(__reg_num_\rt) .endm .macro msr_s, sreg, rt - .inst 0xd5100000|(\sreg)|(__reg_num_\rt) + .inst 0xd5000000|(\sreg)|(__reg_num_\rt) .endm #else @@ -47,14 +68,23 @@ asm( " .equ __reg_num_xzr, 31\n" "\n" " .macro mrs_s, rt, sreg\n" -" .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n" +" .inst 0xd5200000|(\\sreg)|(__reg_num_\\rt)\n" " .endm\n" "\n" " .macro msr_s, sreg, rt\n" -" .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n" +" .inst 0xd5000000|(\\sreg)|(__reg_num_\\rt)\n" " .endm\n" ); +static inline void config_sctlr_el1(u32 clear, u32 set) +{ + u32 val; + + asm volatile("mrs %0, sctlr_el1" : "=r" (val)); + val &= ~clear; + val |= set; + asm volatile("msr sctlr_el1, %0" : : "r" (val)); +} #endif #endif /* __ASM_SYSREG_H */ diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 3a0242c7eb8d..d6e6b6660380 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm(tlb->mm); } else { struct vm_area_struct vma = { .vm_mm = tlb->mm, }; - flush_tlb_range(&vma, tlb->start, tlb->end); + /* + * The intermediate page table levels are already handled by + * the __(pte|pmd|pud)_free_tlb() functions, so last level + * TLBI is sufficient here. + */ + __flush_tlb_range(&vma, tlb->start, tlb->end, true); } } diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 934815d45eda..7bd2da021658 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -87,27 +87,56 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, ((unsigned long)ASID(vma->vm_mm) << 48); dsb(ishst); - asm("tlbi vae1is, %0" : : "r" (addr)); + asm("tlbi vale1is, %0" : : "r" (addr)); dsb(ish); } +/* + * This is meant to avoid soft lock-ups on large TLB flushing ranges and not + * necessarily a performance improvement. + */ +#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) + static inline void __flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, + bool last_level) { unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; unsigned long addr; + + if ((end - start) > MAX_TLB_RANGE) { + flush_tlb_mm(vma->vm_mm); + return; + } + start = asid | (start >> 12); end = asid | (end >> 12); dsb(ishst); - for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) - asm("tlbi vae1is, %0" : : "r"(addr)); + for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) { + if (last_level) + asm("tlbi vale1is, %0" : : "r"(addr)); + else + asm("tlbi vae1is, %0" : : "r"(addr)); + } dsb(ish); } -static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end) +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + __flush_tlb_range(vma, start, end, false); +} + +static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { unsigned long addr; + + if ((end - start) > MAX_TLB_RANGE) { + flush_tlb_all(); + return; + } + start >>= 12; end >>= 12; @@ -119,29 +148,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e } /* - * This is meant to avoid soft lock-ups on large TLB flushing ranges and not - * necessarily a performance improvement. - */ -#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) - -static inline void flush_tlb_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ - if ((end - start) <= MAX_TLB_RANGE) - __flush_tlb_range(vma, start, end); - else - flush_tlb_mm(vma->vm_mm); -} - -static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - if ((end - start) <= MAX_TLB_RANGE) - __flush_tlb_kernel_range(start, end); - else - flush_tlb_all(); -} - -/* * Used to invalidate the TLB (walk caches) corresponding to intermediate page * table levels (pgd/pud/pmd). */ @@ -154,20 +160,6 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm, asm("tlbi vae1is, %0" : : "r" (addr)); dsb(ish); } -/* - * On AArch64, the cache coherency is handled via the set_pte_at() function. - */ -static inline void update_mmu_cache(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep) -{ - /* - * set_pte() does not have a DSB for user mappings, so make sure that - * the page table write is visible. - */ - dsb(ishst); -} - -#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) #endif diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 225ec3524fbf..a3e9d6fdbf21 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -1,8 +1,6 @@ #ifndef __ASM_TOPOLOGY_H #define __ASM_TOPOLOGY_H -#ifdef CONFIG_SMP - #include <linux/cpumask.h> struct cpu_topology { @@ -24,13 +22,6 @@ void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); -#else - -static inline void init_cpu_topology(void) { } -static inline void store_cpu_topology(unsigned int cpuid) { } - -#endif - #include <asm-generic/topology.h> #endif /* _ASM_ARM_TOPOLOGY_H */ diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index 232e4ba5d314..0cc2f29bf9da 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -34,13 +34,32 @@ struct undef_hook { void register_undef_hook(struct undef_hook *hook); void unregister_undef_hook(struct undef_hook *hook); +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +static inline int __in_irqentry_text(unsigned long ptr) +{ + extern char __irqentry_text_start[]; + extern char __irqentry_text_end[]; + + return ptr >= (unsigned long)&__irqentry_text_start && + ptr < (unsigned long)&__irqentry_text_end; +} +#else +static inline int __in_irqentry_text(unsigned long ptr) +{ + return 0; +} +#endif + static inline int in_exception_text(unsigned long ptr) { extern char __exception_text_start[]; extern char __exception_text_end[]; + int in; + + in = ptr >= (unsigned long)&__exception_text_start && + ptr < (unsigned long)&__exception_text_end; - return ptr >= (unsigned long)&__exception_text_start && - ptr < (unsigned long)&__exception_text_end; + return in ? : __in_irqentry_text(ptr); } #endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 07e1ba449bf1..b2ede967fe7d 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -24,7 +24,10 @@ #include <linux/string.h> #include <linux/thread_info.h> +#include <asm/alternative.h> +#include <asm/cpufeature.h> #include <asm/ptrace.h> +#include <asm/sysreg.h> #include <asm/errno.h> #include <asm/memory.h> #include <asm/compiler.h> @@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs) do { \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN)); \ switch (sizeof(*(ptr))) { \ case 1: \ __get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \ @@ -148,6 +153,8 @@ do { \ BUILD_BUG(); \ } \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN)); \ } while (0) #define __get_user(x, ptr) \ @@ -194,6 +201,8 @@ do { \ do { \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN)); \ switch (sizeof(*(ptr))) { \ case 1: \ __put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \ @@ -210,6 +219,8 @@ do { \ default: \ BUILD_BUG(); \ } \ + asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \ + CONFIG_ARM64_PAN)); \ } while (0) #define __put_user(x, ptr) \ diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h index 86553213c132..4318866d053c 100644 --- a/arch/arm64/include/asm/xen/events.h +++ b/arch/arm64/include/asm/xen/events.h @@ -18,4 +18,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs) #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) +/* Rebind event channel is supported by default */ +static inline bool xen_support_evtchn_rebind(void) +{ + return true; +} + #endif /* _ASM_ARM64_XEN_EVENTS_H */ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 73cf0f54d57c..361c8a8ef55f 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -27,5 +27,6 @@ #define HWCAP_SHA1 (1 << 5) #define HWCAP_SHA2 (1 << 6) #define HWCAP_CRC32 (1 << 7) +#define HWCAP_ATOMICS (1 << 8) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index d26832022127..0cd7b5947dfc 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -53,14 +53,20 @@ struct kvm_regs { struct user_fpsimd_state fp_regs; }; -/* Supported Processor Types */ +/* + * Supported CPU Targets - Adding a new target type is not recommended, + * unless there are some special registers not supported by the + * genericv8 syreg table. + */ #define KVM_ARM_TARGET_AEM_V8 0 #define KVM_ARM_TARGET_FOUNDATION_V8 1 #define KVM_ARM_TARGET_CORTEX_A57 2 #define KVM_ARM_TARGET_XGENE_POTENZA 3 #define KVM_ARM_TARGET_CORTEX_A53 4 +/* Generic ARM v8 target */ +#define KVM_ARM_TARGET_GENERIC_V8 5 -#define KVM_ARM_NUM_TARGETS 5 +#define KVM_ARM_NUM_TARGETS 6 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ #define KVM_ARM_DEVICE_TYPE_SHIFT 0 @@ -100,12 +106,39 @@ struct kvm_sregs { struct kvm_fpu { }; +/* + * See v8 ARM ARM D7.3: Debug Registers + * + * The architectural limit is 16 debug registers of each type although + * in practice there are usually less (see ID_AA64DFR0_EL1). + * + * Although the control registers are architecturally defined as 32 + * bits wide we use a 64 bit structure here to keep parity with + * KVM_GET/SET_ONE_REG behaviour which treats all system registers as + * 64 bit values. It also allows for the possibility of the + * architecture expanding the control registers without having to + * change the userspace ABI. + */ +#define KVM_ARM_MAX_DBG_REGS 16 struct kvm_guest_debug_arch { + __u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS]; + __u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS]; + __u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS]; + __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS]; }; struct kvm_debug_exit_arch { + __u32 hsr; + __u64 far; /* used for watchpoints */ }; +/* + * Architecture specific defines for kvm_guest_debug->control + */ + +#define KVM_GUESTDBG_USE_SW_BP (1 << 16) +#define KVM_GUESTDBG_USE_HW (1 << 17) + struct kvm_sync_regs { }; diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h index 6913643bbe54..208db3df135a 100644 --- a/arch/arm64/include/uapi/asm/ptrace.h +++ b/arch/arm64/include/uapi/asm/ptrace.h @@ -44,6 +44,7 @@ #define PSR_I_BIT 0x00000080 #define PSR_A_BIT 0x00000100 #define PSR_D_BIT 0x00000200 +#define PSR_PAN_BIT 0x00400000 #define PSR_Q_BIT 0x08000000 #define PSR_V_BIT 0x10000000 #define PSR_C_BIT 0x20000000 |