diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-26 11:20:42 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-26 11:20:42 -0700 |
commit | 0ed288665686a52781c0ff04ddfe402c7a5397e1 (patch) | |
tree | 7f00fac87227c85eb13cf26ac295de1ecbe3254d /arch | |
parent | 4792ba1f1ff0db30369f7016c1611fda3f84b895 (diff) | |
parent | 5a46d3f71d5e5a9f82eabc682f996f1281705ac7 (diff) |
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon:
"There's more here than we usually have at this stage, but that's
mainly down to the stacktrace changes which came in slightly too late
for the merge window.
Summary:
- Big bad batch of MAINTAINERS updates
- Fix handling of SP alignment fault exceptions
- Fix PSTATE.SSBS handling on heterogeneous systems
- Fix fallout from moving to the generic vDSO implementation
- Fix stack unwinding in the face of frame corruption
- Fix off-by-one in IORT code
- Minor SVE cleanups"
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
ACPI/IORT: Fix off-by-one check in iort_dev_find_its_id()
arm64: entry: SP Alignment Fault doesn't write to FAR_EL1
arm64: Force SSBS on context switch
MAINTAINERS: Update my email address
MAINTAINERS: Update my email address
MAINTAINERS: Fix spelling mistake in my name
MAINTAINERS: Update my email address to @kernel.org
arm64: mm: Drop pte_huge()
arm64/sve: Fix a couple of magic numbers for the Z-reg count
arm64/sve: Factor out FPSIMD to SVE state conversion
arm64: stacktrace: Better handle corrupted stacks
arm64: stacktrace: Factor out backtrace initialisation
arm64: stacktrace: Constify stacktrace.h functions
arm64: vdso: Cleanup Makefiles
arm64: vdso: fix flip/flop vdso build bug
arm64: vdso: Fix population of AT_SYSINFO_EHDR for compat vdso
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/include/asm/elf.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/stacktrace.h | 78 | ||||
-rw-r--r-- | arch/arm64/kernel/entry.S | 22 | ||||
-rw-r--r-- | arch/arm64/kernel/fpsimd.c | 29 | ||||
-rw-r--r-- | arch/arm64/kernel/perf_callchain.c | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 36 | ||||
-rw-r--r-- | arch/arm64/kernel/return_address.c | 9 | ||||
-rw-r--r-- | arch/arm64/kernel/stacktrace.c | 59 | ||||
-rw-r--r-- | arch/arm64/kernel/time.c | 7 | ||||
-rw-r--r-- | arch/arm64/kernel/traps.c | 13 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/Makefile | 13 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso32/Makefile | 14 |
14 files changed, 207 insertions, 97 deletions
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 3c7037c6ba9b..b618017205a3 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -202,7 +202,7 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; ({ \ set_thread_flag(TIF_32BIT); \ }) -#ifdef CONFIG_GENERIC_COMPAT_VDSO +#ifdef CONFIG_COMPAT_VDSO #define COMPAT_ARCH_DLINFO \ do { \ /* \ diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 87a4b2ddc1a1..3f5461f7b560 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -301,7 +301,6 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) /* * Huge pte definitions. */ -#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT)) #define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT)) /* diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index fd5b1a4efc70..844e2964b0f5 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc) regs->pmr_save = GIC_PRIO_IRQON; } +static inline void set_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_SSBS_BIT; +} + +static inline void set_compat_ssbs_bit(struct pt_regs *regs) +{ + regs->pstate |= PSR_AA32_SSBS_BIT; +} + static inline void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { @@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc, regs->pstate = PSR_MODE_EL0t; if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(regs); regs->sp = sp; } @@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, #endif if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE) - regs->pstate |= PSR_AA32_SSBS_BIT; + set_compat_ssbs_bit(regs); regs->compat_sp = sp; } diff --git a/arch/arm64/include/asm/stacktrace.h b/arch/arm64/include/asm/stacktrace.h index df45af931459..4d9b1f48dc39 100644 --- a/arch/arm64/include/asm/stacktrace.h +++ b/arch/arm64/include/asm/stacktrace.h @@ -8,19 +8,12 @@ #include <linux/percpu.h> #include <linux/sched.h> #include <linux/sched/task_stack.h> +#include <linux/types.h> #include <asm/memory.h> #include <asm/ptrace.h> #include <asm/sdei.h> -struct stackframe { - unsigned long fp; - unsigned long pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - int graph; -#endif -}; - enum stack_type { STACK_TYPE_UNKNOWN, STACK_TYPE_TASK, @@ -28,6 +21,7 @@ enum stack_type { STACK_TYPE_OVERFLOW, STACK_TYPE_SDEI_NORMAL, STACK_TYPE_SDEI_CRITICAL, + __NR_STACK_TYPES }; struct stack_info { @@ -36,6 +30,37 @@ struct stack_info { enum stack_type type; }; +/* + * A snapshot of a frame record or fp/lr register values, along with some + * accounting information necessary for robust unwinding. + * + * @fp: The fp value in the frame record (or the real fp) + * @pc: The fp value in the frame record (or the real lr) + * + * @stacks_done: Stacks which have been entirely unwound, for which it is no + * longer valid to unwind to. + * + * @prev_fp: The fp that pointed to this frame record, or a synthetic value + * of 0. This is used to ensure that within a stack, each + * subsequent frame record is at an increasing address. + * @prev_type: The type of stack this frame record was on, or a synthetic + * value of STACK_TYPE_UNKNOWN. This is used to detect a + * transition from one stack to another. + * + * @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a + * replacement lr value in the ftrace graph stack. + */ +struct stackframe { + unsigned long fp; + unsigned long pc; + DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES); + unsigned long prev_fp; + enum stack_type prev_type; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + int graph; +#endif +}; + extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); extern void walk_stackframe(struct task_struct *tsk, struct stackframe *frame, int (*fn)(struct stackframe *, void *), void *data); @@ -64,8 +89,9 @@ static inline bool on_irq_stack(unsigned long sp, return true; } -static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, - struct stack_info *info) +static inline bool on_task_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long high = low + THREAD_SIZE; @@ -112,10 +138,13 @@ static inline bool on_overflow_stack(unsigned long sp, * We can only safely access per-cpu stacks from current in a non-preemptible * context. */ -static inline bool on_accessible_stack(struct task_struct *tsk, - unsigned long sp, - struct stack_info *info) +static inline bool on_accessible_stack(const struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) { + if (info) + info->type = STACK_TYPE_UNKNOWN; + if (on_task_stack(tsk, sp, info)) return true; if (tsk != current || preemptible()) @@ -130,4 +159,27 @@ static inline bool on_accessible_stack(struct task_struct *tsk, return false; } +static inline void start_backtrace(struct stackframe *frame, + unsigned long fp, unsigned long pc) +{ + frame->fp = fp; + frame->pc = pc; +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + frame->graph = 0; +#endif + + /* + * Prime the first unwind. + * + * In unwind_frame() we'll check that the FP points to a valid stack, + * which can't be STACK_TYPE_UNKNOWN, and the first unwind will be + * treated as a transition to whichever stack that happens to be. The + * prev_fp value won't be used, but we set it to 0 such that it is + * definitely not an accessible stack address. + */ + bitmap_zero(frame->stacks_done, __NR_STACK_TYPES); + frame->prev_fp = 0; + frame->prev_type = STACK_TYPE_UNKNOWN; +} + #endif /* __ASM_STACKTRACE_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9cdc4592da3e..320a30dbe35e 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -586,10 +586,8 @@ el1_sync: b.eq el1_ia cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el1_sp_pc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el1_sp_pc + b.eq el1_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 @@ -611,9 +609,11 @@ el1_da: bl do_mem_abort kernel_exit 1 -el1_sp_pc: +el1_pc: /* - * Stack or PC alignment exception handling + * PC alignment exception handling. We don't handle SP alignment faults, + * since we will have hit a recursive exception when trying to push the + * initial pt_regs. */ mrs x0, far_el1 inherit_daif pstate=x23, tmp=x2 @@ -732,9 +732,9 @@ el0_sync: ccmp x24, #ESR_ELx_EC_WFx, #4, ne b.eq el0_sys cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception - b.eq el0_sp_pc + b.eq el0_sp cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 @@ -758,7 +758,7 @@ el0_sync_compat: cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception - b.eq el0_sp_pc + b.eq el0_pc cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap @@ -858,11 +858,15 @@ el0_fpsimd_exc: mov x1, sp bl do_fpsimd_exc b ret_to_user +el0_sp: + ldr x26, [sp, #S_SP] + b el0_sp_pc +el0_pc: + mrs x26, far_el1 el0_sp_pc: /* * Stack or PC alignment exception handling */ - mrs x26, far_el1 gic_prio_kentry_setup tmp=x0 enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index eec4776ae5f0..37d3912cfe06 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -406,6 +406,18 @@ static __uint128_t arm64_cpu_to_le128(__uint128_t x) #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x) +static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst, + unsigned int vq) +{ + unsigned int i; + __uint128_t *p; + + for (i = 0; i < SVE_NUM_ZREGS; ++i) { + p = (__uint128_t *)ZREG(sst, vq, i); + *p = arm64_cpu_to_le128(fst->vregs[i]); + } +} + /* * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to * task->thread.sve_state. @@ -423,17 +435,12 @@ static void fpsimd_to_sve(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!system_supports_sve()) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } /* @@ -459,7 +466,7 @@ static void sve_to_fpsimd(struct task_struct *task) return; vq = sve_vq_from_vl(task->thread.sve_vl); - for (i = 0; i < 32; ++i) { + for (i = 0; i < SVE_NUM_ZREGS; ++i) { p = (__uint128_t const *)ZREG(sst, vq, i); fst->vregs[i] = arm64_le128_to_cpu(*p); } @@ -550,8 +557,6 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) unsigned int vq; void *sst = task->thread.sve_state; struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; - unsigned int i; - __uint128_t *p; if (!test_tsk_thread_flag(task, TIF_SVE)) return; @@ -559,11 +564,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) vq = sve_vq_from_vl(task->thread.sve_vl); memset(sst, 0, SVE_SIG_REGS_SIZE(vq)); - - for (i = 0; i < 32; ++i) { - p = (__uint128_t *)ZREG(sst, vq, i); - *p = arm64_cpu_to_le128(fst->vregs[i]); - } + __fpsimd_to_sve(sst, fst, vq); } int sve_set_vector_length(struct task_struct *task, diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c index 9d63514b9836..b0e03e052dd1 100644 --- a/arch/arm64/kernel/perf_callchain.c +++ b/arch/arm64/kernel/perf_callchain.c @@ -154,12 +154,7 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, return; } - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, callchain_trace, entry); } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6a869d9f304f..f674f28df663 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, childregs->pstate |= PSR_UAO_BIT; if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) - childregs->pstate |= PSR_SSBS_BIT; + set_ssbs_bit(childregs); if (system_uses_irq_prio_masking()) childregs->pmr_save = GIC_PRIO_IRQON; @@ -443,6 +443,32 @@ void uao_thread_switch(struct task_struct *next) } /* + * Force SSBS state on context-switch, since it may be lost after migrating + * from a CPU which treats the bit as RES0 in a heterogeneous system. + */ +static void ssbs_thread_switch(struct task_struct *next) +{ + struct pt_regs *regs = task_pt_regs(next); + + /* + * Nothing to do for kernel threads, but 'regs' may be junk + * (e.g. idle task) so check the flags and bail early. + */ + if (unlikely(next->flags & PF_KTHREAD)) + return; + + /* If the mitigation is enabled, then we leave SSBS clear. */ + if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) || + test_tsk_thread_flag(next, TIF_SSBD)) + return; + + if (compat_user_mode(regs)) + set_compat_ssbs_bit(regs); + else if (user_mode(regs)) + set_ssbs_bit(regs); +} + +/* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. * @@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, entry_task_switch(next); uao_thread_switch(next); ptrauth_thread_switch(next); + ssbs_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case @@ -498,11 +525,8 @@ unsigned long get_wchan(struct task_struct *p) if (!stack_page) return 0; - frame.fp = thread_saved_fp(p); - frame.pc = thread_saved_pc(p); -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); + do { if (unwind_frame(p, &frame)) goto out; diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c index b21cba90f82d..c4ae647d2306 100644 --- a/arch/arm64/kernel/return_address.c +++ b/arch/arm64/kernel/return_address.c @@ -38,12 +38,9 @@ void *return_address(unsigned int level) data.level = level + 2; data.addr = NULL; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)return_address; /* dummy */ -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)return_address); walk_stackframe(current, &frame, save_return_addr, &data); if (!data.level) diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index 62d395151abe..2b160ae594eb 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c @@ -29,9 +29,18 @@ * ldp x29, x30, [sp] * add sp, sp, #0x10 */ + +/* + * Unwind from one frame record (A) to the next frame record (B). + * + * We terminate early if the location of B indicates a malformed chain of frame + * records (e.g. a cycle), determined based on the location and fp value of A + * and the location (but not the fp value) of B. + */ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) { unsigned long fp = frame->fp; + struct stack_info info; if (fp & 0xf) return -EINVAL; @@ -39,11 +48,40 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame) if (!tsk) tsk = current; - if (!on_accessible_stack(tsk, fp, NULL)) + if (!on_accessible_stack(tsk, fp, &info)) + return -EINVAL; + + if (test_bit(info.type, frame->stacks_done)) return -EINVAL; + /* + * As stacks grow downward, any valid record on the same stack must be + * at a strictly higher address than the prior record. + * + * Stacks can nest in several valid orders, e.g. + * + * TASK -> IRQ -> OVERFLOW -> SDEI_NORMAL + * TASK -> SDEI_NORMAL -> SDEI_CRITICAL -> OVERFLOW + * + * ... but the nesting itself is strict. Once we transition from one + * stack to another, it's never valid to unwind back to that first + * stack. + */ + if (info.type == frame->prev_type) { + if (fp <= frame->prev_fp) + return -EINVAL; + } else { + set_bit(frame->prev_type, frame->stacks_done); + } + + /* + * Record this frame record's values and location. The prev_fp and + * prev_type are only meaningful to the next unwind_frame() invocation. + */ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + frame->prev_fp = fp; + frame->prev_type = info.type; #ifdef CONFIG_FUNCTION_GRAPH_TRACER if (tsk->ret_stack && @@ -122,12 +160,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) data.skip = trace->skip; data.no_sched_functions = 0; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif - + start_backtrace(&frame, regs->regs[29], regs->pc); walk_stackframe(current, &frame, save_trace, &data); } EXPORT_SYMBOL_GPL(save_stack_trace_regs); @@ -146,17 +179,15 @@ static noinline void __save_stack_trace(struct task_struct *tsk, data.no_sched_functions = nosched; if (tsk != current) { - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, thread_saved_fp(tsk), + thread_saved_pc(tsk)); } else { /* We don't want this function nor the caller */ data.skip += 2; - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)__save_stack_trace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)__save_stack_trace); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif walk_stackframe(tsk, &frame, save_trace, &data); diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c index 9f25aedeac9d..0b2946414dc9 100644 --- a/arch/arm64/kernel/time.c +++ b/arch/arm64/kernel/time.c @@ -38,11 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs) if (!in_lock_functions(regs->pc)) return regs->pc; - frame.fp = regs->regs[29]; - frame.pc = regs->pc; -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif + start_backtrace(&frame, regs->regs[29], regs->pc); + do { int ret = unwind_frame(NULL, &frame); if (ret < 0) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8c03456dade6..d3313797cca9 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -100,18 +100,17 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) return; if (tsk == current) { - frame.fp = (unsigned long)__builtin_frame_address(0); - frame.pc = (unsigned long)dump_backtrace; + start_backtrace(&frame, + (unsigned long)__builtin_frame_address(0), + (unsigned long)dump_backtrace); } else { /* * task blocked in __switch_to */ - frame.fp = thread_saved_fp(tsk); - frame.pc = thread_saved_pc(tsk); + start_backtrace(&frame, + thread_saved_fp(tsk), + thread_saved_pc(tsk)); } -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - frame.graph = 0; -#endif printk("Call trace:\n"); do { diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 4ab863045188..dd2514bb1511 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -32,10 +32,10 @@ UBSAN_SANITIZE := n OBJECT_FILES_NON_STANDARD := y KCOV_INSTRUMENT := n -ifeq ($(c-gettimeofday-y),) CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -else -CFLAGS_vgettimeofday.o = -O2 -mcmodel=tiny -include $(c-gettimeofday-y) + +ifneq ($(c-gettimeofday-y),) + CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y) endif # Clang versions less than 8 do not support -mcmodel=tiny @@ -57,8 +57,7 @@ $(obj)/vdso.o : $(obj)/vdso.so # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,ld) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -74,8 +73,8 @@ include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE $(call if_changed,vdsosym) # Actual build commands -quiet_cmd_vdsocc = VDSOCC $@ - cmd_vdsocc = $(CC) $(a_flags) $(c_flags) -c -o $@ $< +quiet_cmd_vdsold_and_vdso_check = LD $@ + cmd_vdsold_and_vdso_check = $(cmd_ld); $(cmd_vdso_check) # Install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile index 60a4c6239712..1fba0776ed40 100644 --- a/arch/arm64/kernel/vdso32/Makefile +++ b/arch/arm64/kernel/vdso32/Makefile @@ -144,8 +144,7 @@ $(obj)/vdso.so.dbg: $(obj)/vdso.so.raw $(obj)/$(munge) FORCE # Link rule for the .so file, .lds has to be first $(obj)/vdso.so.raw: $(src)/vdso.lds $(obj-vdso) FORCE - $(call if_changed,vdsold) - $(call if_changed,vdso_check) + $(call if_changed,vdsold_and_vdso_check) # Compilation rules for the vDSO sources $(c-obj-vdso): %.o: %.c FORCE @@ -156,14 +155,17 @@ $(asm-obj-vdso): %.o: %.S FORCE $(call if_changed_dep,vdsoas) # Actual build commands -quiet_cmd_vdsold = VDSOL $@ +quiet_cmd_vdsold_and_vdso_check = LD32 $@ + cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check) + +quiet_cmd_vdsold = LD32 $@ cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \ -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@ -quiet_cmd_vdsocc = VDSOC $@ +quiet_cmd_vdsocc = CC32 $@ cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $< -quiet_cmd_vdsocc_gettimeofday = VDSOC_GTD $@ +quiet_cmd_vdsocc_gettimeofday = CC32 $@ cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $< -quiet_cmd_vdsoas = VDSOA $@ +quiet_cmd_vdsoas = AS32 $@ cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $< quiet_cmd_vdsomunge = MUNGE $@ |