From 2c962369d72f286659e6446919f88d69b943cb4d Mon Sep 17 00:00:00 2001 From: Łukasz Stelmach Date: Mon, 27 Apr 2020 20:36:11 +0100 Subject: ARM: 8970/1: decompressor: increase tag size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The size field of the tag header structure is supposed to be set to the size of a tag structure including the header. Fixes: c772568788b5f0 ("ARM: add additional table to compressed kernel") Signed-off-by: Łukasz Stelmach Signed-off-by: Russell King --- arch/arm/boot/compressed/vmlinux.lds.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S index b247f399de71..f82b5962d97e 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.S +++ b/arch/arm/boot/compressed/vmlinux.lds.S @@ -42,7 +42,7 @@ SECTIONS } .table : ALIGN(4) { _table_start = .; - LONG(ZIMAGE_MAGIC(2)) + LONG(ZIMAGE_MAGIC(4)) LONG(ZIMAGE_MAGIC(0x5a534c4b)) LONG(ZIMAGE_MAGIC(__piggy_size_addr - _start)) LONG(ZIMAGE_MAGIC(_kernel_bss_size)) -- cgit v1.2.3 From 747ffc2fcf969eff9309d7f2d1d61cb8b9e1bb40 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 May 2020 13:03:54 +0100 Subject: ARM: uaccess: consolidate uaccess asm to asm/uaccess-asm.h Consolidate the user access assembly code to asm/uaccess-asm.h. This moves the csdb, check_uaccess, uaccess_mask_range_ptr, uaccess_enable, uaccess_disable, uaccess_save, uaccess_restore macros, and creates two new ones for exception entry and exit - uaccess_entry and uaccess_exit. This makes the uaccess_save and uaccess_restore macros private to asm/uaccess-asm.h. Signed-off-by: Russell King --- arch/arm/include/asm/assembler.h | 75 +------------------------- arch/arm/include/asm/uaccess-asm.h | 106 +++++++++++++++++++++++++++++++++++++ arch/arm/kernel/entry-armv.S | 11 +--- arch/arm/kernel/entry-header.S | 9 ++-- 4 files changed, 112 insertions(+), 89 deletions(-) create mode 100644 arch/arm/include/asm/uaccess-asm.h diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 99929122dad7..3546d294d55f 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -18,11 +18,11 @@ #endif #include -#include #include #include #include #include +#include #define IOMEM(x) (x) @@ -446,79 +446,6 @@ THUMB( orr \reg , \reg , #PSR_T_BIT ) .size \name , . - \name .endm - .macro csdb -#ifdef CONFIG_THUMB2_KERNEL - .inst.w 0xf3af8014 -#else - .inst 0xe320f014 -#endif - .endm - - .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req -#ifndef CONFIG_CPU_USE_DOMAINS - adds \tmp, \addr, #\size - 1 - sbcscc \tmp, \tmp, \limit - bcs \bad -#ifdef CONFIG_CPU_SPECTRE - movcs \addr, #0 - csdb -#endif -#endif - .endm - - .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req -#ifdef CONFIG_CPU_SPECTRE - sub \tmp, \limit, #1 - subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr - addhs \tmp, \tmp, #1 @ if (tmp >= 0) { - subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } - movlo \addr, #0 @ if (tmp < 0) addr = NULL - csdb -#endif - .endm - - .macro uaccess_disable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_DISABLE - mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_enable, tmp, isb=1 -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - /* - * Whenever we re-enter userspace, the domains should always be - * set appropriately. - */ - mov \tmp, #DACR_UACCESS_ENABLE - mcr p15, 0, \tmp, c3, c0, 0 - .if \isb - instr_sync - .endif -#endif - .endm - - .macro uaccess_save, tmp -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #SVC_DACR] -#endif - .endm - - .macro uaccess_restore -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #SVC_DACR] - mcr p15, 0, r0, c3, c0, 0 -#endif - .endm - .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo .macro ret\c, reg #if __LINUX_ARM_ARCH__ < 6 diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h new file mode 100644 index 000000000000..d475e3e8145d --- /dev/null +++ b/arch/arm/include/asm/uaccess-asm.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_UACCESS_ASM_H__ +#define __ASM_UACCESS_ASM_H__ + +#include +#include +#include +#include + + .macro csdb +#ifdef CONFIG_THUMB2_KERNEL + .inst.w 0xf3af8014 +#else + .inst 0xe320f014 +#endif + .endm + + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcscc \tmp, \tmp, \limit + bcs \bad +#ifdef CONFIG_CPU_SPECTRE + movcs \addr, #0 + csdb +#endif +#endif + .endm + + .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req +#ifdef CONFIG_CPU_SPECTRE + sub \tmp, \limit, #1 + subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr + addhs \tmp, \tmp, #1 @ if (tmp >= 0) { + subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) } + movlo \addr, #0 @ if (tmp < 0) addr = NULL + csdb +#endif + .endm + + .macro uaccess_disable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_enable, tmp, isb=1 +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + /* + * Whenever we re-enter userspace, the domains should always be + * set appropriately. + */ + mov \tmp, #DACR_UACCESS_ENABLE + mcr p15, 0, \tmp, c3, c0, 0 + .if \isb + instr_sync + .endif +#endif + .endm + + .macro uaccess_save, tmp +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + mrc p15, 0, \tmp, c3, c0, 0 + str \tmp, [sp, #SVC_DACR] +#endif + .endm + + .macro uaccess_restore +#ifdef CONFIG_CPU_SW_DOMAIN_PAN + ldr r0, [sp, #SVC_DACR] + mcr p15, 0, r0, c3, c0, 0 +#endif + .endm + + /* + * Save the address limit on entry to a privileged exception and + * if using PAN, save and disable usermode access. + */ + .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable + ldr \tmp0, [\tsk, #TI_ADDR_LIMIT] + mov \tmp1, #TASK_SIZE + str \tmp1, [\tsk, #TI_ADDR_LIMIT] + str \tmp0, [sp, #SVC_ADDR_LIMIT] + uaccess_save \tmp0 + .if \disable + uaccess_disable \tmp0 + .endif + .endm + + /* Restore the user access state previously saved by uaccess_entry */ + .macro uaccess_exit, tsk, tmp0, tmp1 + ldr \tmp1, [sp, #SVC_ADDR_LIMIT] + uaccess_restore + str \tmp1, [\tsk, #TI_ADDR_LIMIT] + .endm + +#endif /* __ASM_UACCESS_ASM_H__ */ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 77f54830554c..55a47df04773 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -27,6 +27,7 @@ #include #include #include +#include #include "entry-header.S" #include @@ -179,15 +180,7 @@ ENDPROC(__und_invalid) stmia r7, {r2 - r6} get_thread_info tsk - ldr r0, [tsk, #TI_ADDR_LIMIT] - mov r1, #TASK_SIZE - str r1, [tsk, #TI_ADDR_LIMIT] - str r0, [sp, #SVC_ADDR_LIMIT] - - uaccess_save r0 - .if \uaccess - uaccess_disable r0 - .endif + uaccess_entry tsk, r0, r1, r2, \uaccess .if \trace #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 32051ec5b33f..40db0f9188b6 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -6,6 +6,7 @@ #include #include #include +#include #include @ Bad Abort numbers @@ -217,9 +218,7 @@ blne trace_hardirqs_off #endif .endif - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode SVC restore @@ -263,9 +262,7 @@ @ on the stack remains correct). @ .macro svc_exit_via_fiq - ldr r1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore - str r1, [tsk, #TI_ADDR_LIMIT] + uaccess_exit tsk, r0, r1 #ifndef CONFIG_THUMB2_KERNEL @ ARM mode restore mov r0, sp -- cgit v1.2.3 From 8ede890b0bcebe8c760aacfe20e934d98c3dc6aa Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 May 2020 13:14:09 +0100 Subject: ARM: uaccess: integrate uaccess_save and uaccess_restore Integrate uaccess_save / uaccess_restore macros into the new uaccess_entry / uaccess_exit macros respectively. Signed-off-by: Russell King --- arch/arm/include/asm/uaccess-asm.h | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h index d475e3e8145d..e46468b91eaa 100644 --- a/arch/arm/include/asm/uaccess-asm.h +++ b/arch/arm/include/asm/uaccess-asm.h @@ -67,30 +67,23 @@ #endif .endm - .macro uaccess_save, tmp #ifdef CONFIG_CPU_SW_DOMAIN_PAN - mrc p15, 0, \tmp, c3, c0, 0 - str \tmp, [sp, #SVC_DACR] -#endif - .endm - - .macro uaccess_restore -#ifdef CONFIG_CPU_SW_DOMAIN_PAN - ldr r0, [sp, #SVC_DACR] - mcr p15, 0, r0, c3, c0, 0 +#define DACR(x...) x +#else +#define DACR(x...) #endif - .endm /* * Save the address limit on entry to a privileged exception and * if using PAN, save and disable usermode access. */ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable - ldr \tmp0, [\tsk, #TI_ADDR_LIMIT] - mov \tmp1, #TASK_SIZE - str \tmp1, [\tsk, #TI_ADDR_LIMIT] - str \tmp0, [sp, #SVC_ADDR_LIMIT] - uaccess_save \tmp0 + ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] + mov \tmp2, #TASK_SIZE + str \tmp2, [\tsk, #TI_ADDR_LIMIT] + DACR( mrc p15, 0, \tmp0, c3, c0, 0) + DACR( str \tmp0, [sp, #SVC_DACR]) + str \tmp1, [sp, #SVC_ADDR_LIMIT] .if \disable uaccess_disable \tmp0 .endif @@ -99,8 +92,11 @@ /* Restore the user access state previously saved by uaccess_entry */ .macro uaccess_exit, tsk, tmp0, tmp1 ldr \tmp1, [sp, #SVC_ADDR_LIMIT] - uaccess_restore + DACR( ldr \tmp0, [sp, #SVC_DACR]) str \tmp1, [\tsk, #TI_ADDR_LIMIT] + DACR( mcr p15, 0, \tmp0, c3, c0, 0) .endm +#undef DACR + #endif /* __ASM_UACCESS_ASM_H__ */ -- cgit v1.2.3 From 71f8af1110101facfad68989ff91f88f8e2c3e22 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 3 May 2020 13:24:07 +0100 Subject: ARM: uaccess: fix DACR mismatch with nested exceptions Tomas Paukrt reports that his SAM9X60 based system (ARM926, ARMv5TJ) fails to fix up alignment faults, eventually resulting in a kernel oops. The problem occurs when using CONFIG_CPU_USE_DOMAINS with commit e6978e4bf181 ("ARM: save and reset the address limit when entering an exception"). This is because the address limit is set back to TASK_SIZE on exception entry, and, although it is restored on exception exit, the domain register is not. Hence, this sequence can occur: interrupt pt_regs->addr_limit = addr_limit // USER_DS addr_limit = USER_DS alignment exception __probe_kernel_read() old_fs = get_fs() // USER_DS set_fs(KERNEL_DS) addr_limit = KERNEL_DS dacr.kernel = DOMAIN_MANAGER interrupt pt_regs->addr_limit = addr_limit // KERNEL_DS addr_limit = USER_DS alignment exception __probe_kernel_read() old_fs = get_fs() // USER_DS set_fs(KERNEL_DS) addr_limit = KERNEL_DS dacr.kernel = DOMAIN_MANAGER ... set_fs(old_fs) addr_limit = USER_DS dacr.kernel = DOMAIN_CLIENT ... addr_limit = pt_regs->addr_limit // KERNEL_DS interrupt returns At this point, addr_limit is correctly restored to KERNEL_DS for __probe_kernel_read() to continue execution, but dacr.kernel is not, it has been reset by the set_fs(old_fs) to DOMAIN_CLIENT. This would not have happened prior to the mentioned commit, because addr_limit would remain KERNEL_DS, so get_fs() would have returned KERNEL_DS, and so would correctly nest. This commit fixes the problem by also saving the DACR on exception entry if either CONFIG_CPU_SW_DOMAIN_PAN or CONFIG_CPU_USE_DOMAINS are enabled, and resetting the DACR appropriately on exception entry to match addr_limit and PAN settings. Fixes: e6978e4bf181 ("ARM: save and reset the address limit when entering an exception") Reported-by: Tomas Paukrt Signed-off-by: Russell King --- arch/arm/include/asm/uaccess-asm.h | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/uaccess-asm.h b/arch/arm/include/asm/uaccess-asm.h index e46468b91eaa..907571fd05c6 100644 --- a/arch/arm/include/asm/uaccess-asm.h +++ b/arch/arm/include/asm/uaccess-asm.h @@ -67,15 +67,21 @@ #endif .endm -#ifdef CONFIG_CPU_SW_DOMAIN_PAN +#if defined(CONFIG_CPU_SW_DOMAIN_PAN) || defined(CONFIG_CPU_USE_DOMAINS) #define DACR(x...) x #else #define DACR(x...) #endif /* - * Save the address limit on entry to a privileged exception and - * if using PAN, save and disable usermode access. + * Save the address limit on entry to a privileged exception. + * + * If we are using the DACR for kernel access by the user accessors + * (CONFIG_CPU_USE_DOMAINS=y), always reset the DACR kernel domain + * back to client mode, whether or not \disable is set. + * + * If we are using SW PAN, set the DACR user domain to no access + * if \disable is set. */ .macro uaccess_entry, tsk, tmp0, tmp1, tmp2, disable ldr \tmp1, [\tsk, #TI_ADDR_LIMIT] @@ -84,8 +90,17 @@ DACR( mrc p15, 0, \tmp0, c3, c0, 0) DACR( str \tmp0, [sp, #SVC_DACR]) str \tmp1, [sp, #SVC_ADDR_LIMIT] - .if \disable - uaccess_disable \tmp0 + .if \disable && IS_ENABLED(CONFIG_CPU_SW_DOMAIN_PAN) + /* kernel=client, user=no access */ + mov \tmp2, #DACR_UACCESS_DISABLE + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync + .elseif IS_ENABLED(CONFIG_CPU_USE_DOMAINS) + /* kernel=client */ + bic \tmp2, \tmp0, #domain_mask(DOMAIN_KERNEL) + orr \tmp2, \tmp2, #domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) + mcr p15, 0, \tmp2, c3, c0, 0 + instr_sync .endif .endm -- cgit v1.2.3 From 1f13aa4d5194cbbc7934012069c05a60fc697df9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 5 May 2020 13:54:36 +0100 Subject: ARM: 8973/1: Add missing newline terminator to kernel message Before commit 874f9c7da9a4acbc ("printk: create pr_ functions"), pr_*() calls without a trailing newline characters would be printed with a newline character appended, both on the console and in the output of the dmesg command. After that commit, no new line character is appended, and the output of the next pr_*() call of the same type may be appended: -No ATAGs? -hw-breakpoint: found 5 (+1 reserved) breakpoint and 4 watchpoint registers. +No ATAGs?hw-breakpoint: found 5 (+1 reserved) breakpoint and 4 watchpoint registers. While this commit has been reverted in commit a0cba2179ea4c182 ("Revert "printk: create pr_ functions""), it's still good practice to terminate kernel messages with newlines. Signed-off-by: Geert Uytterhoeven Signed-off-by: Russell King --- arch/arm/kernel/atags_proc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c index 4247ebf4b893..3c2faf2bd124 100644 --- a/arch/arm/kernel/atags_proc.c +++ b/arch/arm/kernel/atags_proc.c @@ -42,7 +42,7 @@ static int __init init_atags_procfs(void) size_t size; if (tag->hdr.tag != ATAG_CORE) { - pr_info("No ATAGs?"); + pr_info("No ATAGs?\n"); return -EINVAL; } -- cgit v1.2.3 From 3866f217aaa81bf7165c7f27362eee5d7919c496 Mon Sep 17 00:00:00 2001 From: Fredrik Strupe Date: Mon, 18 May 2020 19:41:11 +0100 Subject: ARM: 8977/1: ptrace: Fix mask for thumb breakpoint hook call_undef_hook() in traps.c applies the same instr_mask for both 16-bit and 32-bit thumb instructions. If instr_mask then is only 16 bits wide (0xffff as opposed to 0xffffffff), the first half-word of 32-bit thumb instructions will be masked out. This makes the function match 32-bit thumb instructions where the second half-word is equal to instr_val, regardless of the first half-word. The result in this case is that all undefined 32-bit thumb instructions with the second half-word equal to 0xde01 (udf #1) work as breakpoints and will raise a SIGTRAP instead of a SIGILL, instead of just the one intended 16-bit instruction. An example of such an instruction is 0xeaa0de01, which is unallocated according to Arm ARM and should raise a SIGILL, but instead raises a SIGTRAP. This patch fixes the issue by setting all the bits in instr_mask, which will still match the intended 16-bit thumb instruction (where the upper half is always 0), but not any 32-bit thumb instructions. Cc: Oleg Nesterov Signed-off-by: Fredrik Strupe Signed-off-by: Russell King --- arch/arm/kernel/ptrace.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index b606cded90cd..4cc6a7eff635 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -219,8 +219,8 @@ static struct undef_hook arm_break_hook = { }; static struct undef_hook thumb_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xde01, + .instr_mask = 0xffffffff, + .instr_val = 0x0000de01, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, .fn = break_trap, -- cgit v1.2.3