diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2019-02-26 18:51:07 +1000 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-02-26 23:28:24 +1100 |
commit | ccd477028a202993b9ddca5d2404fdaca3b7a55c (patch) | |
tree | 075f65a46de61f8f45856cd0bf8fa71e85b9262c /arch/powerpc | |
parent | 3b4d07d2674f6b4a9281031f99d1f7efd325b16d (diff) |
powerpc/64s: Fix HV NMI vs HV interrupt recoverability test
HV interrupts that use HSRR registers do not enter with MSR[RI] clear,
but their entry code is not recoverable vs NMI, due to shared use of
HSPRG1 as a scratch register to save r13.
This means that a system reset or machine check that hits in HSRR
interrupt entry can cause r13 to be silently corrupted.
Fix this by marking NMIs non-recoverable if they land in HV interrupt
ranges.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/asm-prototypes.h | 8 | ||||
-rw-r--r-- | arch/powerpc/include/asm/nmi.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 8 | ||||
-rw-r--r-- | arch/powerpc/kernel/mce.c | 3 | ||||
-rw-r--r-- | arch/powerpc/kernel/traps.c | 66 |
5 files changed, 87 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index 1484df6779ab..e01f31fb0865 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h @@ -51,6 +51,14 @@ int exit_vmx_usercopy(void); int enter_vmx_ops(void); void *exit_vmx_ops(void *dest); +/* Exceptions */ +#ifdef CONFIG_PPC_POWERNV +extern unsigned long real_trampolines_start; +extern unsigned long real_trampolines_end; +extern unsigned long virt_trampolines_start; +extern unsigned long virt_trampolines_end; +#endif + /* Traps */ long machine_check_early(struct pt_regs *regs); long hmi_exception_realmode(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index bd9ba8defd72..84b4cfe73edd 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -14,4 +14,6 @@ extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, #define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace #endif +extern void hv_nmi_check_nonrecoverable(struct pt_regs *regs); + #endif /* _ASM_NMI_H */ diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index b179b8b5d3f0..76442af8c191 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -68,6 +68,14 @@ OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900) OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000) OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900) OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000) + +#ifdef CONFIG_PPC_POWERNV + .globl real_trampolines_start + .globl real_trampolines_end + .globl virt_trampolines_start + .globl virt_trampolines_end +#endif + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * Data area reserved for FWNMI option. diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index d501b48f287e..b5fec1f9751a 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -31,6 +31,7 @@ #include <asm/machdep.h> #include <asm/mce.h> +#include <asm/nmi.h> static DEFINE_PER_CPU(int, mce_nest_count); static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event); @@ -490,6 +491,8 @@ long machine_check_early(struct pt_regs *regs) { long handled = 0; + hv_nmi_check_nonrecoverable(regs); + /* * See if platform is capable of handling machine check. */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index b25bc8af7d38..eee8f843f3d6 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -369,6 +369,70 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) force_sig_fault(signr, code, (void __user *)addr, current); } +/* + * The interrupt architecture has a quirk in that the HV interrupts excluding + * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing + * that an interrupt handler must do is save off a GPR into a scratch register, + * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch. + * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing + * that it is non-reentrant, which leads to random data corruption. + * + * The solution is for NMI interrupts in HV mode to check if they originated + * from these critical HV interrupt regions. If so, then mark them not + * recoverable. + * + * An alternative would be for HV NMIs to use SPRG for scratch to avoid the + * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux + * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so + * that would work. However any other guest OS that may have the SPRG live + * and MSR[RI]=1 could encounter silent corruption. + * + * Builds that do not support KVM could take this second option to increase + * the recoverability of NMIs. + */ +void hv_nmi_check_nonrecoverable(struct pt_regs *regs) +{ +#ifdef CONFIG_PPC_POWERNV + unsigned long kbase = (unsigned long)_stext; + unsigned long nip = regs->nip; + + if (!(regs->msr & MSR_RI)) + return; + if (!(regs->msr & MSR_HV)) + return; + if (regs->msr & MSR_PR) + return; + + /* + * Now test if the interrupt has hit a range that may be using + * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The + * problem ranges all run un-relocated. Test real and virt modes + * at the same time by droping the high bit of the nip (virt mode + * entry points still have the +0x4000 offset). + */ + nip &= ~0xc000000000000000ULL; + if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600)) + goto nonrecoverable; + if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00)) + goto nonrecoverable; + if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0)) + goto nonrecoverable; + if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0)) + goto nonrecoverable; + /* Trampoline code runs un-relocated so subtract kbase. */ + if (nip >= real_trampolines_start - kbase && + nip < real_trampolines_end - kbase) + goto nonrecoverable; + if (nip >= virt_trampolines_start - kbase && + nip < virt_trampolines_end - kbase) + goto nonrecoverable; + return; + +nonrecoverable: + regs->msr &= ~MSR_RI; +#endif +} + void system_reset_exception(struct pt_regs *regs) { /* @@ -379,6 +443,8 @@ void system_reset_exception(struct pt_regs *regs) if (!nested) nmi_enter(); + hv_nmi_check_nonrecoverable(regs); + __this_cpu_inc(irq_stat.sreset_irqs); /* See if any machine dependent calls */ |