diff options
-rw-r--r-- | Documentation/arm64/memory.rst | 9 | ||||
-rw-r--r-- | arch/arm64/Kconfig | 15 | ||||
-rw-r--r-- | arch/arm64/Makefile | 16 | ||||
-rw-r--r-- | arch/arm64/include/asm/asm-uaccess.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/atomic_lse.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/vdso/compat_barrier.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/vdso_datapage.h | 33 | ||||
-rw-r--r-- | arch/arm64/kernel/armv8_deprecated.c | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu_errata.c | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/cpufeature.c | 16 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/ftrace.c | 12 | ||||
-rw-r--r-- | arch/arm64/kernel/hibernate.c | 9 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 50 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/gettimeofday.S | 0 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso32/Makefile | 44 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 19 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | lib/vdso/Kconfig | 9 |
22 files changed, 155 insertions, 124 deletions
diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst index b040909e45f8..02e02175e6f5 100644 --- a/Documentation/arm64/memory.rst +++ b/Documentation/arm64/memory.rst @@ -154,11 +154,18 @@ return virtual addresses to userspace from a 48-bit range. Software can "opt-in" to receiving VAs from a 52-bit space by specifying an mmap hint parameter that is larger than 48-bit. + For example: - maybe_high_address = mmap(~0UL, size, prot, flags,...); + +.. code-block:: c + + maybe_high_address = mmap(~0UL, size, prot, flags,...); It is also possible to build a debug kernel that returns addresses from a 52-bit space by enabling the following kernel config options: + +.. code-block:: sh + CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y Note that this option is only intended for debugging applications diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7d36fd95ae5a..3f047afb982c 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -110,7 +110,6 @@ config ARM64 select GENERIC_STRNLEN_USER select GENERIC_TIME_VSYSCALL select GENERIC_GETTIMEOFDAY - select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT) select HANDLE_DOMAIN_IRQ select HARDIRQS_SW_RESEND select HAVE_PCI @@ -1176,7 +1175,7 @@ menuconfig COMPAT if COMPAT config KUSER_HELPERS - bool "Enable kuser helpers page for 32 bit applications" + bool "Enable kuser helpers page for 32-bit applications" default y help Warning: disabling this option may break 32-bit user programs. @@ -1202,6 +1201,18 @@ config KUSER_HELPERS Say N here only if you are absolutely certain that you do not need these helpers; otherwise, the safe option is to say Y. +config COMPAT_VDSO + bool "Enable vDSO for 32-bit applications" + depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != "" + select GENERIC_COMPAT_VDSO + default y + help + Place in the process address space of 32-bit applications an + ELF shared object providing fast implementations of gettimeofday + and clock_gettime. + + You must have a 32-bit build of glibc 2.22 or later for programs + to seamlessly take advantage of this. menuconfig ARMV8_DEPRECATED bool "Emulate deprecated/obsolete ARMv8 instructions" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 84a3d502c5a5..2c0238ce0551 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -53,22 +53,6 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable) endif endif -ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y) - CROSS_COMPILE_COMPAT ?= $(CONFIG_CROSS_COMPILE_COMPAT_VDSO:"%"=%) - - ifeq ($(CONFIG_CC_IS_CLANG), y) - $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built) - else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),) - $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built) - else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),) - $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT) - else - export CROSS_COMPILE_COMPAT - export CONFIG_COMPAT_VDSO := y - compat_vdso := -DCONFIG_COMPAT_VDSO=1 - endif -endif - KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \ $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h index f74909ba29bd..5bf963830b17 100644 --- a/arch/arm64/include/asm/asm-uaccess.h +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -78,10 +78,9 @@ alternative_else_nop_endif /* * Remove the address tag from a virtual address, if present. */ - .macro clear_address_tag, dst, addr - tst \addr, #(1 << 55) - bic \dst, \addr, #(0xff << 56) - csel \dst, \dst, \addr, eq + .macro untagged_addr, dst, addr + sbfx \dst, \addr, #0, #56 + and \dst, \dst, \addr .endm #endif diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index c6bd87d2915b..574808b9df4c 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) } #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ -static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ +static __always_inline u##sz \ +__lse__cmpxchg_case_##name##sz(volatile void *ptr, \ u##sz old, \ u##sz new) \ { \ @@ -362,7 +363,8 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory") #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, cl...) \ -static inline long __lse__cmpxchg_double##name(unsigned long old1, \ +static __always_inline long \ +__lse__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index b61b50bf68b1..c23c47360664 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void) * up with a tagged userland pointer. Clear the tag to get a sane pointer to * pass on to access_ok(), for instance. */ -#define untagged_addr(addr) \ +#define __untagged_addr(addr) \ ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) +#define untagged_addr(addr) ({ \ + u64 __addr = (__force u64)addr; \ + __addr &= __untagged_addr(__addr); \ + (__force __typeof__(addr))__addr; \ +}) + #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -#define __tag_reset(addr) untagged_addr(addr) +#define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else #define __tag_shifted(tag) 0UL diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7576df00eb50..8330810f699e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) -#define kc_offset_to_vaddr(o) ((o) | PAGE_END) - #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) #else diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 972d196c7714..6e919fafb43d 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -212,7 +212,7 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) -#define SYS_PAR_EL1_F BIT(1) +#define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ diff --git a/arch/arm64/include/asm/vdso/compat_barrier.h b/arch/arm64/include/asm/vdso/compat_barrier.h index fb60a88b5ed4..3fd8fd6d8fc2 100644 --- a/arch/arm64/include/asm/vdso/compat_barrier.h +++ b/arch/arm64/include/asm/vdso/compat_barrier.h @@ -20,7 +20,7 @@ #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") -#if __LINUX_ARM_ARCH__ >= 8 +#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD) #define aarch32_smp_mb() dmb(ish) #define aarch32_smp_rmb() dmb(ishld) #define aarch32_smp_wmb() dmb(ishst) diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h deleted file mode 100644 index 1f38bf330a6e..000000000000 --- a/arch/arm64/include/asm/vdso_datapage.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 ARM Limited - */ -#ifndef __ASM_VDSO_DATAPAGE_H -#define __ASM_VDSO_DATAPAGE_H - -#ifndef __ASSEMBLY__ - -struct vdso_data { - __u64 cs_cycle_last; /* Timebase at clocksource init */ - __u64 raw_time_sec; /* Raw time */ - __u64 raw_time_nsec; - __u64 xtime_clock_sec; /* Kernel time */ - __u64 xtime_clock_nsec; - __u64 xtime_coarse_sec; /* Coarse time */ - __u64 xtime_coarse_nsec; - __u64 wtm_clock_sec; /* Wall to monotonic time */ - __u64 wtm_clock_nsec; - __u32 tb_seq_count; /* Timebase sequence counter */ - /* cs_* members must be adjacent and in this order (ldp accesses) */ - __u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */ - __u32 cs_shift; /* Clocksource shift (mono = raw) */ - __u32 cs_raw_mult; /* Raw clocksource multiplier */ - __u32 tz_minuteswest; /* Whacky timezone stuff */ - __u32 tz_dsttime; - __u32 use_syscall; - __u32 hrtimer_res; -}; - -#endif /* !__ASSEMBLY__ */ - -#endif /* __ASM_VDSO_DATAPAGE_H */ diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index 2ec09debc2bb..ca158be21f83 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops) struct insn_emulation *insn; insn = kzalloc(sizeof(*insn), GFP_KERNEL); + if (!insn) + return; + insn->ops = ops; insn->min = INSN_UNDEF; @@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void) insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl), GFP_KERNEL); + if (!insns_sysctl) + return; raw_spin_lock_irqsave(&insn_emulation_lock, flags); list_for_each_entry(insn, &insn_emulation, node) { diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a19bb3e4bcfb..6c3b10a41bd8 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -129,8 +129,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn, int cpu, slot = -1; /* - * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs - * start/end if we're a guest. Skip the hyp-vectors work. + * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if + * we're a guest. Skip the hyp-vectors work. */ if (!hyp_vecs_start) { __this_cpu_write(bp_hardening_data.fn, fn); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 9323bcc40a58..80f459ad0190 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = { static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH), @@ -175,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = { }; static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = { - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0), + ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), + FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0), ARM64_FTR_END, }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 109894bd3194..cf3bd2976e57 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -604,7 +604,7 @@ el1_da: */ mrs x3, far_el1 inherit_daif pstate=x23, tmp=x2 - clear_address_tag x0, x3 + untagged_addr x0, x3 mov x2, sp // struct pt_regs bl do_mem_abort @@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING orr x24, x24, x0 alternative_else_nop_endif cbnz x24, 1f // preempt count != 0 || NMI return path - bl preempt_schedule_irq // irq en/disable is done inside + bl arm64_preempt_schedule_irq // irq en/disable is done inside 1: #endif @@ -775,6 +775,7 @@ el0_sync_compat: b.ge el0_dbg b el0_inv el0_svc_compat: + gic_prio_kentry_setup tmp=x1 mov x0, sp bl el0_svc_compat_handler b ret_to_user @@ -807,7 +808,7 @@ el0_da: mrs x26, far_el1 ct_user_exit_irqoff enable_daif - clear_address_tag x0, x26 + untagged_addr x0, x26 mov x1, x25 mov x2, sp bl do_mem_abort diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 171773257974..06e56b470315 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) /* * Ensure updated trampoline is visible to instruction - * fetch before we patch in the branch. + * fetch before we patch in the branch. Although the + * architecture doesn't require an IPI in this case, + * Neoverse-N1 erratum #1542419 does require one + * if the TLB maintenance in module_enable_ro() is + * skipped due to rodata_enabled. It doesn't seem worth + * it to make it conditional given that this is + * certainly not a fast-path. */ - __flush_icache_range((unsigned long)&dst[0], - (unsigned long)&dst[1]); + flush_icache_range((unsigned long)&dst[0], + (unsigned long)&dst[1]); } addr = (unsigned long)dst; #else /* CONFIG_ARM64_MODULE_PLTS */ diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index e0a7fce0e01c..a96b2921d22c 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length, gfp_t mask) { int rc = 0; + pgd_t *trans_pgd; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; @@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length, memcpy((void *)dst, src_start, length); __flush_icache_range(dst, dst + length); - pgdp = pgd_offset_raw(allocator(mask), dst_addr); + trans_pgd = allocator(mask); + if (!trans_pgd) { + rc = -ENOMEM; + goto out; + } + + pgdp = pgd_offset_raw(trans_pgd, dst_addr); if (pgd_none(READ_ONCE(*pgdp))) { pudp = allocator(mask); if (!pudp) { diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index a47462def04b..71f788cd2b18 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -17,6 +17,7 @@ #include <linux/sched/task.h> #include <linux/sched/task_stack.h> #include <linux/kernel.h> +#include <linux/lockdep.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/sysctl.h> @@ -44,6 +45,7 @@ #include <asm/alternative.h> #include <asm/arch_gicv3.h> #include <asm/compat.h> +#include <asm/cpufeature.h> #include <asm/cacheflush.h> #include <asm/exec.h> #include <asm/fpsimd.h> @@ -332,22 +334,27 @@ void arch_release_task_struct(struct task_struct *tsk) fpsimd_release_task(tsk); } -/* - * src and dst may temporarily have aliased sve_state after task_struct - * is copied. We cannot fix this properly here, because src may have - * live SVE state and dst's thread_info may not exist yet, so tweaking - * either src's or dst's TIF_SVE is not safe. - * - * The unaliasing is done in copy_thread() instead. This works because - * dst is not schedulable or traceable until both of these functions - * have been called. - */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { if (current->mm) fpsimd_preserve_current_state(); *dst = *src; + /* We rely on the above assignment to initialize dst's thread_flags: */ + BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); + + /* + * Detach src's sve_state (if any) from dst so that it does not + * get erroneously used or freed prematurely. dst's sve_state + * will be allocated on demand later on if dst uses SVE. + * For consistency, also clear TIF_SVE here: this could be done + * later in copy_process(), but to avoid tripping up future + * maintainers it is best not to leave TIF_SVE and sve_state in + * an inconsistent state, even temporarily. + */ + dst->thread.sve_state = NULL; + clear_tsk_thread_flag(dst, TIF_SVE); + return 0; } @@ -361,13 +368,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); /* - * Unalias p->thread.sve_state (if any) from the parent task - * and disable discard SVE state for p: - */ - clear_tsk_thread_flag(p, TIF_SVE); - p->thread.sve_state = NULL; - - /* * In case p was allocated the same task_struct pointer as some * other recently-exited task, make sure p is disassociated from * any cpu that may have run that now-exited task recently. @@ -633,3 +633,19 @@ static int __init tagged_addr_init(void) core_initcall(tagged_addr_init); #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ + +asmlinkage void __sched arm64_preempt_schedule_irq(void) +{ + lockdep_assert_irqs_disabled(); + + /* + * Preempting a task from an IRQ means we leave copies of PSTATE + * on the stack. cpufeature's enable calls may modify PSTATE, but + * resuming one of these preempted tasks would undo those changes. + * + * Only allow a task to be preempted once cpufeatures have been + * enabled. + */ + if (static_branch_likely(&arm64_const_caps_ready)) + preempt_schedule_irq(); +} diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ /dev/null diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 1fba0776ed40..76b327f88fbb 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -8,15 +8,21 @@ ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32 include $(srctree)/lib/vdso/Makefile -COMPATCC := $(CROSS_COMPILE_COMPAT)gcc +# Same as cc-*option, but using CC_COMPAT instead of CC +ifeq ($(CONFIG_CC_IS_CLANG), y) +CC_COMPAT ?= $(CC) +else +CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc +endif -# Same as cc-*option, but using COMPATCC instead of CC cc32-option = $(call try-run,\ - $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) + $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2)) cc32-disable-warning = $(call try-run,\ - $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) + $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) cc32-ldoption = $(call try-run,\ - $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) + $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) +cc32-as-instr = $(call try-run,\ + printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3)) # We cannot use the global flags to compile the vDSO files, the main reason # being that the 32-bit compiler may be older than the main (64-bit) compiler @@ -25,22 +31,21 @@ cc32-ldoption = $(call try-run,\ # arm64 one. # As a result we set our own flags here. -# From top-level Makefile -# NOSTDINC_FLAGS -VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include) +# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile +VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include) VDSO_CPPFLAGS += $(LINUXINCLUDE) -VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS) # Common C and assembly flags # From top-level Makefile VDSO_CAFLAGS := $(VDSO_CPPFLAGS) +ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),) +VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%)) +endif + VDSO_CAFLAGS += $(call cc32-option,-fno-PIE) ifdef CONFIG_DEBUG_INFO VDSO_CAFLAGS += -g endif -ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(COMPATCC)), y) -VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO -endif # From arm Makefile VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm) @@ -55,6 +60,7 @@ endif VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING + # Try to compile for ARMv8. If the compiler is too old and doesn't support it, # fall back to v7. There is no easy way to check for what architecture the code # is being compiled, so define a macro specifying that (see arch/arm/Makefile). @@ -91,6 +97,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast VDSO_AFLAGS := $(VDSO_CAFLAGS) VDSO_AFLAGS += -D__ASSEMBLY__ +# Check for binutils support for dmb ishld +dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1) + +VDSO_CFLAGS += $(dmbinstr) +VDSO_AFLAGS += $(dmbinstr) + VDSO_LDFLAGS := $(VDSO_CPPFLAGS) # From arm vDSO Makefile VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 @@ -159,14 +171,14 @@ quiet_cmd_vdsold_and_vdso_check = LD32 $@ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) quiet_cmd_vdsold = LD32 $@ - cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ + cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ quiet_cmd_vdsocc = CC32 $@ - cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< + cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< quiet_cmd_vdsocc_gettimeofday = CC32 $@ - cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< + cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< quiet_cmd_vdsoas = AS32 $@ - cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< + cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< quiet_cmd_vdsomunge = MUNGE $@ cmd_vdsomunge = $(obj)/$(munge) $< $@ diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 115d7a0e4b08..9fc6db0bcbad 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -113,6 +113,15 @@ static inline bool is_ttbr1_addr(unsigned long addr) return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; } +static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) +{ + /* Either init_pg_dir or swapper_pg_dir */ + if (mm == &init_mm) + return __pa_symbol(mm->pgd); + + return (unsigned long)virt_to_phys(mm->pgd); +} + /* * Dump out the page tables associated with 'addr' in the currently active mm. */ @@ -141,7 +150,7 @@ static void show_pte(unsigned long addr) pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - vabits_actual, (unsigned long)virt_to_phys(mm->pgd)); + vabits_actual, mm_to_pgd_phys(mm)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); @@ -259,14 +268,18 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, par = read_sysreg(par_el1); local_irq_restore(flags); + /* + * If we now have a valid translation, treat the translation fault as + * spurious. + */ if (!(par & SYS_PAR_EL1_F)) - return false; + return true; /* * If we got a different type of fault from the AT instruction, * treat the translation fault as spurious. */ - dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par); + dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; } diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c2e56bd8913..67a1d86981a9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout); extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); +asmlinkage void preempt_schedule_irq(void); extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig index cc00364bd2c2..9fe698ff62ec 100644 --- a/lib/vdso/Kconfig +++ b/lib/vdso/Kconfig @@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO help This config option enables the compat VDSO layer. -config CROSS_COMPILE_COMPAT_VDSO - string "32 bit Toolchain prefix for compat vDSO" - default "" - depends on GENERIC_COMPAT_VDSO - help - Defines the cross-compiler prefix for compiling compat vDSO. - If a 64 bit compiler (i.e. x86_64) can compile the VDSO for - 32 bit, it does not need to define this parameter. - endif |