From 541ec870ef31433018d245614254bd9d810a9ac3 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 31 May 2016 12:33:03 +0100 Subject: arm64: kill ESR_LNX_EXEC Currently we treat ESR_EL1 bit 24 as software-defined for distinguishing instruction aborts from data aborts, but this bit is architecturally RES0 for instruction aborts, and could be allocated for an arbitrary purpose in future. Additionally, we hard-code the value in entry.S without the mnemonic, making the code difficult to understand. Instead, remove ESR_LNX_EXEC, and distinguish aborts based on the esr, which we already pass to the sole use of ESR_LNX_EXEC. A new helper, is_el0_instruction_abort() is added to make the logic clear. Any instruction aborts taken from EL1 will already have been handled by bad_mode, so we need not handle that case in the helper. For consistency, the existing permission_fault helper is renamed to is_permission_fault, and the return type is changed to bool. There should be no functional changes as the return value was a boolean expression, and the result is only used in another boolean expression. Signed-off-by: Mark Rutland Cc: Dave P Martin Cc: Huang Shijie Cc: James Morse Cc: Marc Zyngier Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm64/kernel/entry.S') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 12e8d2bcb3f9..eefffa81c6df 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -532,7 +532,7 @@ el0_ia: enable_dbg_and_irq ct_user_exit mov x0, x26 - orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts + mov x1, x25 mov x2, sp bl do_mem_abort b ret_to_user -- cgit v1.2.3 From 7dd01aef055792260287c6708daf75aac3918f66 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Tue, 28 Jun 2016 18:07:32 +0100 Subject: arm64: trap userspace "dc cvau" cache operation on errata-affected core The ARM errata 819472, 826319, 827319 and 824069 for affected Cortex-A53 cores demand to promote "dc cvau" instructions to "dc civac". Since we allow userspace to also emit those instructions, we should make sure that "dc cvau" gets promoted there too. So lets grasp the nettle here and actually trap every userland cache maintenance instruction once we detect at least one affected core in the system. We then emulate the instruction by executing it on behalf of userland, promoting "dc cvau" to "dc civac" on the way and injecting access fault back into userspace. Signed-off-by: Andre Przywara [catalin.marinas@arm.com: s/set_segfault/arm64_notify_segfault/] Signed-off-by: Catalin Marinas --- arch/arm64/include/asm/processor.h | 1 + arch/arm64/include/asm/sysreg.h | 2 +- arch/arm64/kernel/cpu_errata.c | 2 ++ arch/arm64/kernel/entry.S | 12 +++++++- arch/arm64/kernel/traps.c | 60 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 75 insertions(+), 2 deletions(-) (limited to 'arch/arm64/kernel/entry.S') diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index cef1cf398356..ace0a96e7d6e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -192,5 +192,6 @@ static inline void spin_lock_prefetch(const void *ptr) void cpu_enable_pan(void *__unused); void cpu_enable_uao(void *__unused); +void cpu_enable_cache_maint_trap(void *__unused); #endif /* __ASM_PROCESSOR_H */ diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 751e901c8d37..cc06794b7346 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -98,11 +98,11 @@ SCTLR_ELx_SA | SCTLR_ELx_I) /* SCTLR_EL1 specific flags. */ +#define SCTLR_EL1_UCI (1 << 26) #define SCTLR_EL1_SPAN (1 << 23) #define SCTLR_EL1_SED (1 << 8) #define SCTLR_EL1_CP15BEN (1 << 5) - /* id_aa64isar0 */ #define ID_AA64ISAR0_RDM_SHIFT 28 #define ID_AA64ISAR0_ATOMICS_SHIFT 20 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index c2261a7a316e..af647d2cee22 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -46,6 +46,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM errata 826319, 827319, 824069", .capability = ARM64_WORKAROUND_CLEAN_CACHE, MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02), + .enable = cpu_enable_cache_maint_trap, }, #endif #ifdef CONFIG_ARM64_ERRATUM_819472 @@ -54,6 +55,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .desc = "ARM errata 819472", .capability = ARM64_WORKAROUND_CLEAN_CACHE, MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01), + .enable = cpu_enable_cache_maint_trap, }, #endif #ifdef CONFIG_ARM64_ERRATUM_832075 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index eefffa81c6df..3eca5d34f7a6 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -451,7 +451,7 @@ el0_sync: cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_SYS64 // configurable trap - b.eq el0_undef + b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception b.eq el0_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception @@ -579,6 +579,16 @@ el0_undef: mov x0, sp bl do_undefinstr b ret_to_user +el0_sys: + /* + * System instructions, for trapped cache maintenance instructions + */ + enable_dbg_and_irq + ct_user_exit + mov x0, x25 + mov x1, sp + bl do_sysinstr + b ret_to_user el0_dbg: /* * Debug exception handling diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d8a5366dcc24..e04f83873af7 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -41,6 +41,7 @@ #include #include #include +#include static const char *handler[]= { "Synchronous Abort", @@ -427,6 +428,65 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); } +void cpu_enable_cache_maint_trap(void *__unused) +{ + config_sctlr_el1(SCTLR_EL1_UCI, 0); +} + +#define __user_cache_maint(insn, address, res) \ + asm volatile ( \ + "1: " insn ", %1\n" \ + " mov %w0, #0\n" \ + "2:\n" \ + " .pushsection .fixup,\"ax\"\n" \ + " .align 2\n" \ + "3: mov %w0, %w2\n" \ + " b 2b\n" \ + " .popsection\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (res) \ + : "r" (address), "i" (-EFAULT) ) + +asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) +{ + unsigned long address; + int ret; + + /* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */ + if ((esr & 0x01fffc01) == 0x0012dc00) { + int rt = (esr >> 5) & 0x1f; + int crm = (esr >> 1) & 0x0f; + + address = (rt == 31) ? 0 : regs->regs[rt]; + + switch (crm) { + case 11: /* DC CVAU, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case 10: /* DC CVAC, gets promoted */ + __user_cache_maint("dc civac", address, ret); + break; + case 14: /* DC CIVAC */ + __user_cache_maint("dc civac", address, ret); + break; + case 5: /* IC IVAU */ + __user_cache_maint("ic ivau", address, ret); + break; + default: + force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); + return; + } + } else { + force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); + return; + } + + if (ret) + arm64_notify_segfault(regs, address); + else + regs->pc += 4; +} + long compat_arm_syscall(struct pt_regs *regs); asmlinkage long do_ni_syscall(struct pt_regs *regs) -- cgit v1.2.3 From 888b3c8720e0a4033db09ba2364afde6a4763638 Mon Sep 17 00:00:00 2001 From: Pratyush Anand Date: Fri, 8 Jul 2016 12:35:50 -0400 Subject: arm64: Treat all entry code as non-kprobe-able Entry symbols are not kprobe safe. So blacklist them for kprobing. Signed-off-by: Pratyush Anand Signed-off-by: David A. Long Acked-by: Masami Hiramatsu [catalin.marinas@arm.com: Do not include syscall wrappers in .entry.text] Signed-off-by: Catalin Marinas --- arch/arm64/kernel/entry.S | 3 +++ arch/arm64/kernel/probes/kprobes.c | 26 ++++++++++++++++++++++++++ arch/arm64/kernel/vmlinux.lds.S | 1 + 3 files changed, 30 insertions(+) (limited to 'arch/arm64/kernel/entry.S') diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 12e8d2bcb3f9..492a2655fea4 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -242,6 +242,7 @@ tsk .req x28 // current thread_info /* * Exception vectors. */ + .pushsection ".entry.text", "ax" .align 11 ENTRY(vectors) @@ -774,6 +775,8 @@ __ni_sys_trace: bl do_ni_syscall b __sys_trace_return + .popsection // .entry.text + /* * Special system call wrappers. */ diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 44968019ce0d..0fe2b6578163 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "decode-insn.h" @@ -519,6 +520,31 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 1; } +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + extern char __idmap_text_start[], __idmap_text_end[]; + extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; + + if ((addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) || + (addr >= (unsigned long)__entry_text_start && + addr < (unsigned long)__entry_text_end) || + (addr >= (unsigned long)__idmap_text_start && + addr < (unsigned long)__idmap_text_end) || + !!search_exception_tables(addr)) + return true; + + if (!is_kernel_in_hyp_mode()) { + if ((addr >= (unsigned long)__hyp_text_start && + addr < (unsigned long)__hyp_text_end) || + (addr >= (unsigned long)__hyp_idmap_text_start && + addr < (unsigned long)__hyp_idmap_text_end)) + return true; + } + + return false; +} + int __init arch_init_kprobes(void) { return 0; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 075ce322e82b..9f5939469ef6 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -118,6 +118,7 @@ SECTIONS __exception_text_end = .; IRQENTRY_TEXT SOFTIRQENTRY_TEXT + ENTRY_TEXT TEXT_TEXT SCHED_TEXT LOCK_TEXT -- cgit v1.2.3