diff options
Diffstat (limited to 'arch')
85 files changed, 818 insertions, 305 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index cae0958a2298..21d0089117fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -198,9 +198,6 @@ config HAVE_KPROBES_ON_FTRACE config HAVE_NMI bool -config HAVE_NMI_WATCHDOG - depends on HAVE_NMI - bool # # An arch should select this if it provides all these things: # @@ -226,6 +223,12 @@ config GENERIC_SMP_IDLE_THREAD config GENERIC_IDLE_POLL_SETUP bool +config ARCH_HAS_FORTIFY_SOURCE + bool + help + An architecture should select this when it can successfully + build and run with CONFIG_FORTIFY_SOURCE. + # Select if arch has all set_memory_ro/rw/x/nx() functions in asm/cacheflush.h config ARCH_HAS_SET_MEMORY bool @@ -288,6 +291,28 @@ config HAVE_PERF_EVENTS_NMI subsystem. Also has support for calculating CPU cycle events to determine how many clock cycles in a given period. +config HAVE_HARDLOCKUP_DETECTOR_PERF + bool + depends on HAVE_PERF_EVENTS_NMI + help + The arch chooses to use the generic perf-NMI-based hardlockup + detector. Must define HAVE_PERF_EVENTS_NMI. + +config HAVE_NMI_WATCHDOG + depends on HAVE_NMI + bool + help + The arch provides a low level NMI watchdog. It provides + asm/nmi.h, and defines its own arch_touch_nmi_watchdog(). + +config HAVE_HARDLOCKUP_DETECTOR_ARCH + bool + select HAVE_NMI_WATCHDOG + help + The arch chooses to provide its own hardlockup detector, which is + a superset of the HAVE_NMI_WATCHDOG. It also conforms to config + interfaces and parameters provided by hardlockup detector subsystem. + config HAVE_PERF_REGS bool help diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index ea7832702a8f..f3a4bedd1afc 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c @@ -33,6 +33,7 @@ extern void error(char *); /* Not needed, but used in some headers pulled in by decompressors */ extern char * strstr(const char * s1, const char *s2); extern size_t strlen(const char *s); +extern int memcmp(const void *cs, const void *ct, size_t count); #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index d2315ffd8f12..f13ae153fb24 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -112,12 +112,8 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs); #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* This is the base location for PIE (ET_DYN with INTERP) loads. */ +#define ELF_ET_DYN_BASE 0x400000UL /* When the program starts, a1 contains a pointer to a function to be registered with atexit, as per the SVR4 ABI. A value of 0 means we diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 58508900c4bb..14b5903f0224 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -110,8 +110,8 @@ void __sysreg_restore_state(struct kvm_cpu_context *ctxt); void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); -void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); -void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp); +asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp); static inline bool __vfp_enabled(void) { return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); @@ -120,8 +120,8 @@ static inline bool __vfp_enabled(void) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); -int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, +asmlinkage int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host); -int asmlinkage __hyp_do_panic(const char *, int, u32); +asmlinkage int __hyp_do_panic(const char *, int, u32); #endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c index 68c6ae0b9e4c..98fbfd235ac8 100644 --- a/arch/arm/kernel/atags_parse.c +++ b/arch/arm/kernel/atags_parse.c @@ -18,6 +18,7 @@ */ #include <linux/init.h> +#include <linux/initrd.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/root_dev.h> @@ -91,8 +92,6 @@ __tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); #ifdef CONFIG_BLK_DEV_RAM static int __init parse_tag_ramdisk(const struct tag *tag) { - extern int rd_size, rd_image_start, rd_prompt, rd_doload; - rd_image_start = tag->u.ramdisk.start; rd_doload = (tag->u.ramdisk.flags & 1) == 0; rd_prompt = (tag->u.ramdisk.flags & 2) == 0; diff --git a/arch/arm/mach-sa1100/jornada720_ssp.c b/arch/arm/mach-sa1100/jornada720_ssp.c index b143c4659346..7fc11a3c17b4 100644 --- a/arch/arm/mach-sa1100/jornada720_ssp.c +++ b/arch/arm/mach-sa1100/jornada720_ssp.c @@ -33,7 +33,7 @@ static unsigned long jornada_ssp_flags; * we need to reverse all data we receive from the mcu due to its physical location * returns : 01110111 -> 11101110 */ -u8 inline jornada_ssp_reverse(u8 byte) +inline u8 jornada_ssp_reverse(u8 byte) { return ((0x80 & byte) >> 7) | diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8addb851ab5e..dfd908630631 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -12,6 +12,7 @@ config ARM64 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GIGANTIC_PAGE if (MEMORY_ISOLATION && COMPACTION) || CMA select ARCH_HAS_KCOV diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index ac3fb7441510..acae781f7359 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -113,12 +113,11 @@ #define ELF_EXEC_PAGESIZE PAGE_SIZE /* - * This is the location that an ET_DYN program is loaded if exec'ed. Typical - * use of this is to invoke "./ld.so someprog" to test out a new version of - * the loader. We need to make sure that it is out of the way of the program - * that it will "exec", and that there is sufficient room for the brk. + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. */ -#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3) +#define ELF_ET_DYN_BASE 0x100000000UL #ifndef __ASSEMBLY__ @@ -174,7 +173,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #ifdef CONFIG_COMPAT -#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3) +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h index fe5e287dc56b..b86a0865ddf1 100644 --- a/arch/arm64/include/asm/stackprotector.h +++ b/arch/arm64/include/asm/stackprotector.h @@ -30,6 +30,7 @@ static __always_inline void boot_init_stack_canary(void) /* Try to get a semi random initial value. */ get_random_bytes(&canary, sizeof(canary)); canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; current->stack_canary = canary; __stack_chk_guard = current->stack_canary; diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index 2eb714c4639f..d0aa42907569 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -63,6 +63,11 @@ extern int memcmp(const void *, const void *, size_t); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #endif diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 687a358a3733..81f03959a4ab 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -191,14 +191,8 @@ void __init kasan_init(void) if (start >= end) break; - /* - * end + 1 here is intentional. We check several shadow bytes in - * advance to slightly speed up fastpath. In some rare cases - * we could cross boundary of mapped shadow, so we just map - * some more here. - */ vmemmap_populate((unsigned long)kasan_mem_to_shadow(start), - (unsigned long)kasan_mem_to_shadow(end) + 1, + (unsigned long)kasan_mem_to_shadow(end), pfn_to_nid(virt_to_pfn(start))); } diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index adc208c2ae9c..decccffb03ca 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -35,7 +35,7 @@ * Leave enough space between the mmap area and the stack to honour ulimit in * the face of randomisation. */ -#define MIN_GAP (SZ_128M + ((STACK_RND_MASK << PAGE_SHIFT) + 1)) +#define MIN_GAP (SZ_128M) #define MAX_GAP (STACK_TOP/6*5) static int mmap_is_legacy(void) @@ -65,6 +65,11 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; diff --git a/arch/blackfin/include/asm/nmi.h b/arch/blackfin/include/asm/nmi.h index b9caac4fcfd8..107d23705f46 100644 --- a/arch/blackfin/include/asm/nmi.h +++ b/arch/blackfin/include/asm/nmi.h @@ -9,4 +9,6 @@ #include <linux/nmi.h> +extern void arch_touch_nmi_watchdog(void); + #endif diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c index 633c37083e87..1e714329fe8a 100644 --- a/arch/blackfin/kernel/nmi.c +++ b/arch/blackfin/kernel/nmi.c @@ -190,7 +190,7 @@ static int __init init_nmi_wdt(void) } device_initcall(init_nmi_wdt); -void touch_nmi_watchdog(void) +void arch_touch_nmi_watchdog(void) { atomic_set(&nmi_touched[smp_processor_id()], 1); } diff --git a/arch/cris/arch-v10/drivers/gpio.c b/arch/cris/arch-v10/drivers/gpio.c index 64285e0d3481..dfd3b3ba5e4e 100644 --- a/arch/cris/arch-v10/drivers/gpio.c +++ b/arch/cris/arch-v10/drivers/gpio.c @@ -399,7 +399,7 @@ out: /* Main device API. ioctl's to read/set/clear bits, as well as to * set alarms to wait for using a subsequent select(). */ -unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg) +inline unsigned long setget_input(struct gpio_private *priv, unsigned long arg) { /* Set direction 0=unchanged 1=input, * return mask with 1=input */ @@ -450,7 +450,7 @@ unsigned long inline setget_input(struct gpio_private *priv, unsigned long arg) return dir_g_in_bits; } /* setget_input */ -unsigned long inline setget_output(struct gpio_private *priv, unsigned long arg) +inline unsigned long setget_output(struct gpio_private *priv, unsigned long arg) { if (USE_PORTS(priv)) { *priv->dir = *priv->dir_shadow |= diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index cce3bc3603ea..2cf7648787b2 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -1,7 +1,9 @@ generic-y += clkdev.h +generic-y += device.h generic-y += exec.h generic-y += extable.h +generic-y += fb.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/frv/include/asm/cmpxchg.h b/arch/frv/include/asm/cmpxchg.h index a899765102ea..ad1f11cfa92a 100644 --- a/arch/frv/include/asm/cmpxchg.h +++ b/arch/frv/include/asm/cmpxchg.h @@ -76,6 +76,7 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v); * - if (*ptr != test) then orig = *ptr; */ extern uint64_t __cmpxchg_64(uint64_t test, uint64_t new, volatile uint64_t *v); +#define cmpxchg64(p, o, n) __cmpxchg_64((o), (n), (p)) #ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS diff --git a/arch/frv/include/asm/device.h b/arch/frv/include/asm/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/arch/frv/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include <asm-generic/device.h> - diff --git a/arch/frv/include/asm/fb.h b/arch/frv/include/asm/fb.h deleted file mode 100644 index c7df38030992..000000000000 --- a/arch/frv/include/asm/fb.h +++ /dev/null @@ -1,12 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ -#include <linux/fb.h> - -#define fb_pgprotect(...) do {} while (0) - -static inline int fb_is_primary_device(struct fb_info *info) -{ - return 0; -} - -#endif /* _ASM_FB_H_ */ diff --git a/arch/frv/include/asm/tlbflush.h b/arch/frv/include/asm/tlbflush.h index 7ac5eafc5d98..75879420f578 100644 --- a/arch/frv/include/asm/tlbflush.h +++ b/arch/frv/include/asm/tlbflush.h @@ -18,10 +18,10 @@ #ifdef CONFIG_MMU #ifndef __ASSEMBLY__ -extern void asmlinkage __flush_tlb_all(void); -extern void asmlinkage __flush_tlb_mm(unsigned long contextid); -extern void asmlinkage __flush_tlb_page(unsigned long contextid, unsigned long start); -extern void asmlinkage __flush_tlb_range(unsigned long contextid, +extern asmlinkage void __flush_tlb_all(void); +extern asmlinkage void __flush_tlb_mm(unsigned long contextid); +extern asmlinkage void __flush_tlb_page(unsigned long contextid, unsigned long start); +extern asmlinkage void __flush_tlb_range(unsigned long contextid, unsigned long start, unsigned long end); #endif /* !__ASSEMBLY__ */ diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index 599507bcec91..c14815dca747 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -163,8 +163,3 @@ void arch_crash_save_vmcoreinfo(void) #endif } -phys_addr_t paddr_vmcoreinfo_note(void) -{ - return ia64_tpa((unsigned long)(char *)&vmcoreinfo_note); -} - diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 79c7c46d7dc1..555b11180156 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -334,7 +334,7 @@ static void ia64_mlogbuf_dump_from_init(void) ia64_mlogbuf_dump(); } -static void inline +static inline void ia64_mca_spin(const char *func) { if (monarch_cpu == smp_processor_id()) diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 5bc34eac9e01..b67bb4cb73ff 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -140,7 +140,7 @@ static inline u64 __iomem *pcibr_ate_addr(struct pcibus_info *pcibus_info, /* * Update the ate. */ -void inline +inline void ate_write(struct pcibus_info *pcibus_info, int ate_index, int count, volatile u64 ate) { diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 46d3df4b03a1..3bd9abc35485 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c @@ -52,7 +52,7 @@ * All registers defined in struct tioce will meet that criteria. */ -static void inline +static inline void tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr) { u64 mmr_base; @@ -78,7 +78,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void __iomem *mmr_addr) } } -static void inline +static inline void tioce_mmr_war_post(struct tioce_kernel *kern, void __iomem *mmr_addr) { u64 mmr_base; diff --git a/arch/m68k/coldfire/intc-simr.c b/arch/m68k/coldfire/intc-simr.c index 7cf2c156f72d..15c4b7a6e38f 100644 --- a/arch/m68k/coldfire/intc-simr.c +++ b/arch/m68k/coldfire/intc-simr.c @@ -35,7 +35,7 @@ #define EINT7 67 /* EDGE Port interrupt 7 */ static unsigned int irqebitmap[] = { 0, 1, 4, 7 }; -static unsigned int inline irq2ebit(unsigned int irq) +static inline unsigned int irq2ebit(unsigned int irq) { return irqebitmap[irq - EINT0]; } @@ -51,7 +51,7 @@ static unsigned int inline irq2ebit(unsigned int irq) #define EINT1 65 /* EDGE Port interrupt 1 */ #define EINT7 71 /* EDGE Port interrupt 7 */ -static unsigned int inline irq2ebit(unsigned int irq) +static inline unsigned int irq2ebit(unsigned int irq) { return irq - EINT0; } diff --git a/arch/m68k/configs/m5208evb_defconfig b/arch/m68k/configs/m5208evb_defconfig index 4c7b7938d53a..a3102ff7e5ed 100644 --- a/arch/m68k/configs/m5208evb_defconfig +++ b/arch/m68k/configs/m5208evb_defconfig @@ -26,7 +26,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/m68k/configs/m5249evb_defconfig b/arch/m68k/configs/m5249evb_defconfig index a782f368650f..f7bb9ed3efa8 100644 --- a/arch/m68k/configs/m5249evb_defconfig +++ b/arch/m68k/configs/m5249evb_defconfig @@ -27,7 +27,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/m68k/configs/m5272c3_defconfig b/arch/m68k/configs/m5272c3_defconfig index 6f5fb92f5cbf..1e679f6a400f 100644 --- a/arch/m68k/configs/m5272c3_defconfig +++ b/arch/m68k/configs/m5272c3_defconfig @@ -27,7 +27,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig index b5d7cd1ce856..d2987b40423e 100644 --- a/arch/m68k/configs/m5275evb_defconfig +++ b/arch/m68k/configs/m5275evb_defconfig @@ -27,7 +27,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/m68k/configs/m5307c3_defconfig b/arch/m68k/configs/m5307c3_defconfig index 1b4c09461c40..97a78c99eeee 100644 --- a/arch/m68k/configs/m5307c3_defconfig +++ b/arch/m68k/configs/m5307c3_defconfig @@ -27,7 +27,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/m68k/configs/m5407c3_defconfig b/arch/m68k/configs/m5407c3_defconfig index 275ad543d4bc..766a97f39a3a 100644 --- a/arch/m68k/configs/m5407c3_defconfig +++ b/arch/m68k/configs/m5407c3_defconfig @@ -28,7 +28,6 @@ CONFIG_INET=y # CONFIG_INET_XFRM_MODE_TRANSPORT is not set # CONFIG_INET_XFRM_MODE_TUNNEL is not set # CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set # CONFIG_INET_DIAG is not set # CONFIG_IPV6 is not set # CONFIG_FW_LOADER is not set diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index a1bdb1ea5234..39b9f311c4ef 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -116,7 +116,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { pud_t *pud; - pud = (pud_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PUD_ORDER); + pud = (pud_t *) __get_free_pages(GFP_KERNEL, PUD_ORDER); if (pud) pud_init((unsigned long)pud, (unsigned long)invalid_pmd_table); return pud; diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 98a117a05fbc..bab3d41e5987 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -47,7 +47,7 @@ extern int __cpu_logical_map[NR_CPUS]; /* Mask of CPUs which are currently definitely operating coherently */ extern cpumask_t cpu_coherent_mask; -extern void asmlinkage smp_bootstrap(void); +extern asmlinkage void smp_bootstrap(void); extern void calculate_cpu_foreign_map(void); diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 94627a3a6a0d..50c020c47e54 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -317,7 +317,8 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr) spin_lock_irqsave(&dbe_lock, flags); list_for_each_entry(dbe, &dbe_list, dbe_list) { - e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr); + e = search_extable(dbe->dbe_start, + dbe->dbe_end - dbe->dbe_start, addr); if (e) break; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 38dfa27730ff..b68b4d0726d3 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -429,7 +429,8 @@ static const struct exception_table_entry *search_dbe_tables(unsigned long addr) { const struct exception_table_entry *e; - e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr); + e = search_extable(__start___dbe_table, + __stop___dbe_table - __start___dbe_table, addr); if (!e) e = search_module_dbetables(addr); return e; diff --git a/arch/mn10300/include/asm/nmi.h b/arch/mn10300/include/asm/nmi.h index f3671cbbc117..b05627597b1b 100644 --- a/arch/mn10300/include/asm/nmi.h +++ b/arch/mn10300/include/asm/nmi.h @@ -11,4 +11,6 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H +extern void arch_touch_nmi_watchdog(void); + #endif /* _ASM_NMI_H */ diff --git a/arch/mn10300/kernel/mn10300-watchdog-low.S b/arch/mn10300/kernel/mn10300-watchdog-low.S index f2f5c9cfaabd..34f8773de7d0 100644 --- a/arch/mn10300/kernel/mn10300-watchdog-low.S +++ b/arch/mn10300/kernel/mn10300-watchdog-low.S @@ -50,9 +50,9 @@ watchdog_handler: # we can't inline it) # ############################################################################### - .globl touch_nmi_watchdog - .type touch_nmi_watchdog,@function -touch_nmi_watchdog: + .globl arch_touch_nmi_watchdog + .type arch_touch_nmi_watchdog,@function +arch_touch_nmi_watchdog: clr d0 clr d1 mov watchdog_alert_counter, a0 @@ -63,4 +63,4 @@ touch_nmi_watchdog: lne ret [],0 - .size touch_nmi_watchdog,.-touch_nmi_watchdog + .size arch_touch_nmi_watchdog,.-arch_touch_nmi_watchdog diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index a2d8e6938d67..0d5641beadf5 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c @@ -31,7 +31,7 @@ static unsigned int watchdog; static unsigned int watchdog_hz = 1; unsigned int watchdog_alert_counter[NR_CPUS]; -EXPORT_SYMBOL(touch_nmi_watchdog); +EXPORT_SYMBOL(arch_touch_nmi_watchdog); /* * the best way to detect whether a CPU has a 'hard lockup' problem diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 7177a3f4f418..36f858c37ca7 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -82,7 +82,7 @@ config NR_IRQS config NMI_IPI bool - depends on SMP && (DEBUGGER || KEXEC_CORE) + depends on SMP && (DEBUGGER || KEXEC_CORE || HARDLOCKUP_DETECTOR) default y config STACKTRACE_SUPPORT @@ -125,6 +125,7 @@ config PPC select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SG_CHAIN @@ -192,11 +193,13 @@ config PPC select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_MOD_ARCH_SPECIFIC - select HAVE_NMI if PERF_EVENTS + select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) + select HAVE_HARDLOCKUP_DETECTOR_ARCH if (PPC64 && PPC_BOOK3S) select HAVE_OPROFILE select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI if PPC64 + select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_RCU_TABLE_FREE if SMP diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 20b1485ff1e8..e2329db9d6f4 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); #else struct page *page; - page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_REPEAT), + page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 4); if (!page) return NULL; diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 09bde6e34f5d..548d9a411a0d 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -23,12 +23,13 @@ #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE PAGE_SIZE -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE 0x20000000 +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \ + 0x100000000UL) #define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h index ff1ccb375e60..6f8e79cd35d8 100644 --- a/arch/powerpc/include/asm/nmi.h +++ b/arch/powerpc/include/asm/nmi.h @@ -1,4 +1,15 @@ #ifndef _ASM_NMI_H #define _ASM_NMI_H +#ifdef CONFIG_HARDLOCKUP_DETECTOR +extern void arch_touch_nmi_watchdog(void); + +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace + +#else +static inline void arch_touch_nmi_watchdog(void) {} +#endif + #endif /* _ASM_NMI_H */ diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index ebddb2111d87..8ea98504f900 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -55,6 +55,8 @@ struct smp_ops_t { int (*cpu_bootable)(unsigned int nr); }; +extern void smp_flush_nmi_ipi(u64 delay_us); +extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us); extern void smp_send_debugger_break(void); extern void start_secondary_resume(void); extern void smp_generic_give_timebase(void); diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 0845eebc5af3..4aa7c147e447 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -38,6 +38,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ signal_64.o ptrace32.o \ paca.o nvram_64.o firmware.o obj-$(CONFIG_VDSO32) += vdso32/ +obj-$(CONFIG_HARDLOCKUP_DETECTOR) += watchdog.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4c18a5fbb4bb..e6d8354d79ef 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1314,6 +1314,31 @@ EXC_REAL_NONE(0x1800, 0x100) EXC_VIRT_NONE(0x5800, 0x100) #endif +#if defined(CONFIG_HARDLOCKUP_DETECTOR) && defined(CONFIG_HAVE_HARDLOCKUP_DETECTOR_ARCH) + +#define MASKED_DEC_HANDLER_LABEL 3f + +#define MASKED_DEC_HANDLER(_H) \ +3: /* soft-nmi */ \ + std r12,PACA_EXGEN+EX_R12(r13); \ + GET_SCRATCH0(r10); \ + std r10,PACA_EXGEN+EX_R13(r13); \ + EXCEPTION_PROLOG_PSERIES_1(soft_nmi_common, _H) + +EXC_COMMON_BEGIN(soft_nmi_common) + mr r10,r1 + ld r1,PACAEMERGSP(r13) + ld r1,PACA_NMI_EMERG_SP(r13) + subi r1,r1,INT_FRAME_SIZE + EXCEPTION_COMMON_NORET_STACK(PACA_EXGEN, 0x900, + system_reset, soft_nmi_interrupt, + ADD_NVGPRS;ADD_RECONCILE) + b ret_from_except + +#else +#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */ +#define MASKED_DEC_HANDLER(_H) +#endif /* * An interrupt came in while soft-disabled. We set paca->irq_happened, then: @@ -1336,7 +1361,7 @@ masked_##_H##interrupt: \ lis r10,0x7fff; \ ori r10,r10,0xffff; \ mtspr SPRN_DEC,r10; \ - b 2f; \ + b MASKED_DEC_HANDLER_LABEL; \ 1: cmpwi r10,PACA_IRQ_DBELL; \ beq 2f; \ cmpwi r10,PACA_IRQ_HMI; \ @@ -1351,7 +1376,8 @@ masked_##_H##interrupt: \ ld r11,PACA_EXGEN+EX_R11(r13); \ GET_SCRATCH0(r13); \ ##_H##rfid; \ - b . + b .; \ + MASKED_DEC_HANDLER(_H) /* * Real mode exceptions actually use this too, but alternate diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index 3079518f2245..dc0c49cfd90a 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -999,8 +999,7 @@ static int fadump_create_elfcore_headers(char *bufp) phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note()); phdr->p_offset = phdr->p_paddr; - phdr->p_memsz = vmcoreinfo_max_size; - phdr->p_filesz = vmcoreinfo_max_size; + phdr->p_memsz = phdr->p_filesz = VMCOREINFO_NOTE_SIZE; /* Increment number of program headers. */ (elf->e_phnum)++; diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index 9ad37f827a97..1086ea37c832 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -25,6 +25,7 @@ #include <linux/kvm_para.h> #include <linux/slab.h> #include <linux/of.h> +#include <linux/nmi.h> /* hardlockup_detector_disable() */ #include <asm/reg.h> #include <asm/sections.h> @@ -718,6 +719,12 @@ static __init void kvm_free_tmp(void) static int __init kvm_guest_init(void) { + /* + * The hardlockup detector is likely to get false positives in + * KVM guests, so disable it by default. + */ + hardlockup_detector_disable(); + if (!kvm_para_available()) goto free_tmp; diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index dd8a04f3053a..613f79f03877 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -15,6 +15,9 @@ #undef DEBUG_PROM +/* we cannot use FORTIFY as it brings in new symbols */ +#define __NO_FORTIFY + #include <stdarg.h> #include <linux/kernel.h> #include <linux/string.h> diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 4640f6d64f8b..af23d4b576ec 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -751,22 +751,3 @@ unsigned long memory_block_size_bytes(void) struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif - -#ifdef CONFIG_HARDLOCKUP_DETECTOR -u64 hw_nmi_get_sample_period(int watchdog_thresh) -{ - return ppc_proc_freq * watchdog_thresh; -} - -/* - * The hardlockup detector breaks PMU event based branches and is likely - * to get false positives in KVM guests, so disable it by default. - */ -static int __init disable_hardlockup_detector(void) -{ - hardlockup_detector_disable(); - - return 0; -} -early_initcall(disable_hardlockup_detector); -#endif diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index c6b8bace1766..997c88d54acf 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -435,13 +435,31 @@ static void do_smp_send_nmi_ipi(int cpu) } } +void smp_flush_nmi_ipi(u64 delay_us) +{ + unsigned long flags; + + nmi_ipi_lock_start(&flags); + while (nmi_ipi_busy_count) { + nmi_ipi_unlock_end(&flags); + udelay(1); + if (delay_us) { + delay_us--; + if (!delay_us) + return; + } + nmi_ipi_lock_start(&flags); + } + nmi_ipi_unlock_end(&flags); +} + /* * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. * - fn is the target callback function. * - delay_us > 0 is the delay before giving up waiting for targets to * enter the handler, == 0 specifies indefinite delay. */ -static int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) +int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) { unsigned long flags; int me = raw_smp_processor_id(); diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c new file mode 100644 index 000000000000..b67f8b03a32d --- /dev/null +++ b/arch/powerpc/kernel/watchdog.c @@ -0,0 +1,386 @@ +/* + * Watchdog support on powerpc systems. + * + * Copyright 2017, IBM Corporation. + * + * This uses code from arch/sparc/kernel/nmi.c and kernel/watchdog.c + */ +#include <linux/kernel.h> +#include <linux/param.h> +#include <linux/init.h> +#include <linux/percpu.h> +#include <linux/cpu.h> +#include <linux/nmi.h> +#include <linux/module.h> +#include <linux/export.h> +#include <linux/kprobes.h> +#include <linux/hardirq.h> +#include <linux/reboot.h> +#include <linux/slab.h> +#include <linux/kdebug.h> +#include <linux/sched/debug.h> +#include <linux/delay.h> +#include <linux/smp.h> + +#include <asm/paca.h> + +/* + * The watchdog has a simple timer that runs on each CPU, once per timer + * period. This is the heartbeat. + * + * Then there are checks to see if the heartbeat has not triggered on a CPU + * for the panic timeout period. Currently the watchdog only supports an + * SMP check, so the heartbeat only turns on when we have 2 or more CPUs. + * + * This is not an NMI watchdog, but Linux uses that name for a generic + * watchdog in some cases, so NMI gets used in some places. + */ + +static cpumask_t wd_cpus_enabled __read_mostly; + +static u64 wd_panic_timeout_tb __read_mostly; /* timebase ticks until panic */ +static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */ + +static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */ + +static DEFINE_PER_CPU(struct timer_list, wd_timer); +static DEFINE_PER_CPU(u64, wd_timer_tb); + +/* + * These are for the SMP checker. CPUs clear their pending bit in their + * heartbeat. If the bitmask becomes empty, the time is noted and the + * bitmask is refilled. + * + * All CPUs clear their bit in the pending mask every timer period. + * Once all have cleared, the time is noted and the bits are reset. + * If the time since all clear was greater than the panic timeout, + * we can panic with the list of stuck CPUs. + * + * This will work best with NMI IPIs for crash code so the stuck CPUs + * can be pulled out to get their backtraces. + */ +static unsigned long __wd_smp_lock; +static cpumask_t wd_smp_cpus_pending; +static cpumask_t wd_smp_cpus_stuck; +static u64 wd_smp_last_reset_tb; + +static inline void wd_smp_lock(unsigned long *flags) +{ + /* + * Avoid locking layers if possible. + * This may be called from low level interrupt handlers at some + * point in future. + */ + local_irq_save(*flags); + while (unlikely(test_and_set_bit_lock(0, &__wd_smp_lock))) + cpu_relax(); +} + +static inline void wd_smp_unlock(unsigned long *flags) +{ + clear_bit_unlock(0, &__wd_smp_lock); + local_irq_restore(*flags); +} + +static void wd_lockup_ipi(struct pt_regs *regs) +{ + pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", raw_smp_processor_id()); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); +} + +static void set_cpu_stuck(int cpu, u64 tb) +{ + cpumask_set_cpu(cpu, &wd_smp_cpus_stuck); + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + if (cpumask_empty(&wd_smp_cpus_pending)) { + wd_smp_last_reset_tb = tb; + cpumask_andnot(&wd_smp_cpus_pending, + &wd_cpus_enabled, + &wd_smp_cpus_stuck); + } +} + +static void watchdog_smp_panic(int cpu, u64 tb) +{ + unsigned long flags; + int c; + + wd_smp_lock(&flags); + /* Double check some things under lock */ + if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb) + goto out; + if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) + goto out; + if (cpumask_weight(&wd_smp_cpus_pending) == 0) + goto out; + + pr_emerg("Watchdog CPU:%d detected Hard LOCKUP other CPUS:%*pbl\n", + cpu, cpumask_pr_args(&wd_smp_cpus_pending)); + + /* + * Try to trigger the stuck CPUs. + */ + for_each_cpu(c, &wd_smp_cpus_pending) { + if (c == cpu) + continue; + smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000); + } + smp_flush_nmi_ipi(1000000); + + /* Take the stuck CPU out of the watch group */ + for_each_cpu(c, &wd_smp_cpus_pending) + set_cpu_stuck(c, tb); + +out: + wd_smp_unlock(&flags); + + printk_safe_flush(); + /* + * printk_safe_flush() seems to require another print + * before anything actually goes out to console. + */ + if (sysctl_hardlockup_all_cpu_backtrace) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(NULL, "Hard LOCKUP"); +} + +static void wd_smp_clear_cpu_pending(int cpu, u64 tb) +{ + if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) { + if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) { + unsigned long flags; + + pr_emerg("Watchdog CPU:%d became unstuck\n", cpu); + wd_smp_lock(&flags); + cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck); + wd_smp_unlock(&flags); + } + return; + } + cpumask_clear_cpu(cpu, &wd_smp_cpus_pending); + if (cpumask_empty(&wd_smp_cpus_pending)) { + unsigned long flags; + + wd_smp_lock(&flags); + if (cpumask_empty(&wd_smp_cpus_pending)) { + wd_smp_last_reset_tb = tb; + cpumask_andnot(&wd_smp_cpus_pending, + &wd_cpus_enabled, + &wd_smp_cpus_stuck); + } + wd_smp_unlock(&flags); + } +} + +static void watchdog_timer_interrupt(int cpu) +{ + u64 tb = get_tb(); + + per_cpu(wd_timer_tb, cpu) = tb; + + wd_smp_clear_cpu_pending(cpu, tb); + + if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb) + watchdog_smp_panic(cpu, tb); +} + +void soft_nmi_interrupt(struct pt_regs *regs) +{ + unsigned long flags; + int cpu = raw_smp_processor_id(); + u64 tb; + + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return; + + nmi_enter(); + tb = get_tb(); + if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) { + per_cpu(wd_timer_tb, cpu) = tb; + + wd_smp_lock(&flags); + if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) { + wd_smp_unlock(&flags); + goto out; + } + set_cpu_stuck(cpu, tb); + + pr_emerg("Watchdog CPU:%d Hard LOCKUP\n", cpu); + print_modules(); + print_irqtrace_events(current); + if (regs) + show_regs(regs); + else + dump_stack(); + + wd_smp_unlock(&flags); + + if (sysctl_hardlockup_all_cpu_backtrace) + trigger_allbutself_cpu_backtrace(); + + if (hardlockup_panic) + nmi_panic(regs, "Hard LOCKUP"); + } + if (wd_panic_timeout_tb < 0x7fffffff) + mtspr(SPRN_DEC, wd_panic_timeout_tb); + +out: + nmi_exit(); +} + +static void wd_timer_reset(unsigned int cpu, struct timer_list *t) +{ + t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms); + if (wd_timer_period_ms > 1000) + t->expires = __round_jiffies_up(t->expires, cpu); + add_timer_on(t, cpu); +} + +static void wd_timer_fn(unsigned long data) +{ + struct timer_list *t = this_cpu_ptr(&wd_timer); + int cpu = smp_processor_id(); + + watchdog_timer_interrupt(cpu); + + wd_timer_reset(cpu, t); +} + +void arch_touch_nmi_watchdog(void) +{ + int cpu = smp_processor_id(); + + watchdog_timer_interrupt(cpu); +} +EXPORT_SYMBOL(arch_touch_nmi_watchdog); + +static void start_watchdog_timer_on(unsigned int cpu) +{ + struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); + + per_cpu(wd_timer_tb, cpu) = get_tb(); + + setup_pinned_timer(t, wd_timer_fn, 0); + wd_timer_reset(cpu, t); +} + +static void stop_watchdog_timer_on(unsigned int cpu) +{ + struct timer_list *t = per_cpu_ptr(&wd_timer, cpu); + + del_timer_sync(t); +} + +static int start_wd_on_cpu(unsigned int cpu) +{ + if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) { + WARN_ON(1); + return 0; + } + + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) + return 0; + + if (watchdog_suspended) + return 0; + + if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) + return 0; + + cpumask_set_cpu(cpu, &wd_cpus_enabled); + if (cpumask_weight(&wd_cpus_enabled) == 1) { + cpumask_set_cpu(cpu, &wd_smp_cpus_pending); + wd_smp_last_reset_tb = get_tb(); + } + smp_wmb(); + start_watchdog_timer_on(cpu); + + return 0; +} + +static int stop_wd_on_cpu(unsigned int cpu) +{ + if (!cpumask_test_cpu(cpu, &wd_cpus_enabled)) + return 0; /* Can happen in CPU unplug case */ + + stop_watchdog_timer_on(cpu); + + cpumask_clear_cpu(cpu, &wd_cpus_enabled); + wd_smp_clear_cpu_pending(cpu, get_tb()); + + return 0; +} + +static void watchdog_calc_timeouts(void) +{ + wd_panic_timeout_tb = watchdog_thresh * ppc_tb_freq; + + /* Have the SMP detector trigger a bit later */ + wd_smp_panic_timeout_tb = wd_panic_timeout_tb * 3 / 2; + + /* 2/5 is the factor that the perf based detector uses */ + wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; +} + +void watchdog_nmi_reconfigure(void) +{ + int cpu; + + watchdog_calc_timeouts(); + + for_each_cpu(cpu, &wd_cpus_enabled) + stop_wd_on_cpu(cpu); + + for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) + start_wd_on_cpu(cpu); +} + +/* + * This runs after lockup_detector_init() which sets up watchdog_cpumask. + */ +static int __init powerpc_watchdog_init(void) +{ + int err; + + watchdog_calc_timeouts(); + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", + start_wd_on_cpu, stop_wd_on_cpu); + if (err < 0) + pr_warn("Watchdog could not be initialized"); + + return 0; +} +arch_initcall(powerpc_watchdog_init); + +static void handle_backtrace_ipi(struct pt_regs *regs) +{ + nmi_cpu_backtrace(regs); +} + +static void raise_backtrace_ipi(cpumask_t *mask) +{ + unsigned int cpu; + + for_each_cpu(cpu, mask) { + if (cpu == smp_processor_id()) + handle_backtrace_ipi(NULL); + else + smp_send_nmi_ipi(cpu, handle_backtrace_ipi, 1000000); + } +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi); +} diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 710e491206ed..8cb0190e2a73 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -93,7 +93,7 @@ int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) } if (!hpt) - hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |__GFP_NOWARN, order - PAGE_SHIFT); if (!hpt) diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c index f3917705c686..41cf5ae273cf 100644 --- a/arch/powerpc/lib/feature-fixups.c +++ b/arch/powerpc/lib/feature-fixups.c @@ -233,192 +233,192 @@ static long calc_offset(struct fixup_entry *entry, unsigned int *p) static void test_basic_patching(void) { - extern unsigned int ftr_fixup_test1; - extern unsigned int end_ftr_fixup_test1; - extern unsigned int ftr_fixup_test1_orig; - extern unsigned int ftr_fixup_test1_expected; - int size = &end_ftr_fixup_test1 - &ftr_fixup_test1; + extern unsigned int ftr_fixup_test1[]; + extern unsigned int end_ftr_fixup_test1[]; + extern unsigned int ftr_fixup_test1_orig[]; + extern unsigned int ftr_fixup_test1_expected[]; + int size = end_ftr_fixup_test1 - ftr_fixup_test1; fixup.value = fixup.mask = 8; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test1 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test1 + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test1 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test1 + 2); fixup.alt_start_off = fixup.alt_end_off = 0; /* Sanity check */ - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test1, &ftr_fixup_test1_orig, size); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_orig, size) == 0); + memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0); patch_feature_section(~8, &fixup); - check(memcmp(&ftr_fixup_test1, &ftr_fixup_test1_expected, size) == 0); + check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0); } static void test_alternative_patching(void) { - extern unsigned int ftr_fixup_test2; - extern unsigned int end_ftr_fixup_test2; - extern unsigned int ftr_fixup_test2_orig; - extern unsigned int ftr_fixup_test2_alt; - extern unsigned int ftr_fixup_test2_expected; - int size = &end_ftr_fixup_test2 - &ftr_fixup_test2; + extern unsigned int ftr_fixup_test2[]; + extern unsigned int end_ftr_fixup_test2[]; + extern unsigned int ftr_fixup_test2_orig[]; + extern unsigned int ftr_fixup_test2_alt[]; + extern unsigned int ftr_fixup_test2_expected[]; + int size = end_ftr_fixup_test2 - ftr_fixup_test2; fixup.value = fixup.mask = 0xF; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test2 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test2 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test2_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test2_alt + 1); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test2 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test2 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test2_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test2_alt + 1); /* Sanity check */ - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test2, &ftr_fixup_test2_orig, size); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_orig, size) == 0); + memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0); patch_feature_section(~0xF, &fixup); - check(memcmp(&ftr_fixup_test2, &ftr_fixup_test2_expected, size) == 0); + check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0); } static void test_alternative_case_too_big(void) { - extern unsigned int ftr_fixup_test3; - extern unsigned int end_ftr_fixup_test3; - extern unsigned int ftr_fixup_test3_orig; - extern unsigned int ftr_fixup_test3_alt; - int size = &end_ftr_fixup_test3 - &ftr_fixup_test3; + extern unsigned int ftr_fixup_test3[]; + extern unsigned int end_ftr_fixup_test3[]; + extern unsigned int ftr_fixup_test3_orig[]; + extern unsigned int ftr_fixup_test3_alt[]; + int size = end_ftr_fixup_test3 - ftr_fixup_test3; fixup.value = fixup.mask = 0xC; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test3 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test3 + 2); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test3_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test3_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test3 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test3 + 2); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test3_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test3_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); /* Expect nothing to be patched, and the error returned to us */ check(patch_feature_section(0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(0, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); check(patch_feature_section(~0xF, &fixup) == 1); - check(memcmp(&ftr_fixup_test3, &ftr_fixup_test3_orig, size) == 0); + check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0); } static void test_alternative_case_too_small(void) { - extern unsigned int ftr_fixup_test4; - extern unsigned int end_ftr_fixup_test4; - extern unsigned int ftr_fixup_test4_orig; - extern unsigned int ftr_fixup_test4_alt; - extern unsigned int ftr_fixup_test4_expected; - int size = &end_ftr_fixup_test4 - &ftr_fixup_test4; + extern unsigned int ftr_fixup_test4[]; + extern unsigned int end_ftr_fixup_test4[]; + extern unsigned int ftr_fixup_test4_orig[]; + extern unsigned int ftr_fixup_test4_alt[]; + extern unsigned int ftr_fixup_test4_expected[]; + int size = end_ftr_fixup_test4 - ftr_fixup_test4; unsigned long flag; /* Check a high-bit flag */ flag = 1UL << ((sizeof(unsigned long) - 1) * 8); fixup.value = fixup.mask = flag; - fixup.start_off = calc_offset(&fixup, &ftr_fixup_test4 + 1); - fixup.end_off = calc_offset(&fixup, &ftr_fixup_test4 + 5); - fixup.alt_start_off = calc_offset(&fixup, &ftr_fixup_test4_alt); - fixup.alt_end_off = calc_offset(&fixup, &ftr_fixup_test4_alt + 2); + fixup.start_off = calc_offset(&fixup, ftr_fixup_test4 + 1); + fixup.end_off = calc_offset(&fixup, ftr_fixup_test4 + 5); + fixup.alt_start_off = calc_offset(&fixup, ftr_fixup_test4_alt); + fixup.alt_end_off = calc_offset(&fixup, ftr_fixup_test4_alt + 2); /* Sanity check */ - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we don't patch if the value matches */ patch_feature_section(flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); /* Check we do patch if the value doesn't match */ patch_feature_section(0, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); /* Check we do patch if the mask doesn't match */ - memcpy(&ftr_fixup_test4, &ftr_fixup_test4_orig, size); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_orig, size) == 0); + memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0); patch_feature_section(~flag, &fixup); - check(memcmp(&ftr_fixup_test4, &ftr_fixup_test4_expected, size) == 0); + check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0); } static void test_alternative_case_with_branch(void) { - extern unsigned int ftr_fixup_test5; - extern unsigned int end_ftr_fixup_test5; - extern unsigned int ftr_fixup_test5_expected; - int size = &end_ftr_fixup_test5 - &ftr_fixup_test5; + extern unsigned int ftr_fixup_test5[]; + extern unsigned int end_ftr_fixup_test5[]; + extern unsigned int ftr_fixup_test5_expected[]; + int size = end_ftr_fixup_test5 - ftr_fixup_test5; - check(memcmp(&ftr_fixup_test5, &ftr_fixup_test5_expected, size) == 0); + check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0); } static void test_alternative_case_with_external_branch(void) { - extern unsigned int ftr_fixup_test6; - extern unsigned int end_ftr_fixup_test6; - extern unsigned int ftr_fixup_test6_expected; - int size = &end_ftr_fixup_test6 - &ftr_fixup_test6; + extern unsigned int ftr_fixup_test6[]; + extern unsigned int end_ftr_fixup_test6[]; + extern unsigned int ftr_fixup_test6_expected[]; + int size = end_ftr_fixup_test6 - ftr_fixup_test6; - check(memcmp(&ftr_fixup_test6, &ftr_fixup_test6_expected, size) == 0); + check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0); } static void test_cpu_macros(void) { - extern u8 ftr_fixup_test_FTR_macros; - extern u8 ftr_fixup_test_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FTR_macros_expected - - &ftr_fixup_test_FTR_macros; + extern u8 ftr_fixup_test_FTR_macros[]; + extern u8 ftr_fixup_test_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FTR_macros_expected - + ftr_fixup_test_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FTR_macros, - &ftr_fixup_test_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FTR_macros, + ftr_fixup_test_FTR_macros_expected, size) == 0); } static void test_fw_macros(void) { #ifdef CONFIG_PPC64 - extern u8 ftr_fixup_test_FW_FTR_macros; - extern u8 ftr_fixup_test_FW_FTR_macros_expected; - unsigned long size = &ftr_fixup_test_FW_FTR_macros_expected - - &ftr_fixup_test_FW_FTR_macros; + extern u8 ftr_fixup_test_FW_FTR_macros[]; + extern u8 ftr_fixup_test_FW_FTR_macros_expected[]; + unsigned long size = ftr_fixup_test_FW_FTR_macros_expected - + ftr_fixup_test_FW_FTR_macros; /* The fixups have already been done for us during boot */ - check(memcmp(&ftr_fixup_test_FW_FTR_macros, - &ftr_fixup_test_FW_FTR_macros_expected, size) == 0); + check(memcmp(ftr_fixup_test_FW_FTR_macros, + ftr_fixup_test_FW_FTR_macros_expected, size) == 0); #endif } static void test_lwsync_macros(void) { - extern u8 lwsync_fixup_test; - extern u8 end_lwsync_fixup_test; - extern u8 lwsync_fixup_test_expected_LWSYNC; - extern u8 lwsync_fixup_test_expected_SYNC; - unsigned long size = &end_lwsync_fixup_test - - &lwsync_fixup_test; + extern u8 lwsync_fixup_test[]; + extern u8 end_lwsync_fixup_test[]; + extern u8 lwsync_fixup_test_expected_LWSYNC[]; + extern u8 lwsync_fixup_test_expected_SYNC[]; + unsigned long size = end_lwsync_fixup_test - + lwsync_fixup_test; /* The fixups have already been done for us during boot */ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_LWSYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_LWSYNC, size) == 0); } else { - check(memcmp(&lwsync_fixup_test, - &lwsync_fixup_test_expected_SYNC, size) == 0); + check(memcmp(lwsync_fixup_test, + lwsync_fixup_test_expected_SYNC, size) == 0); } } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 0ee6be4f1ba4..5d78b193fec4 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -34,16 +34,9 @@ /* * Top of mmap area (just below the process stack). * - * Leave at least a ~128 MB hole on 32bit applications. - * - * On 64bit applications we randomise the stack by 1GB so we need to - * space our mmap start address by a further 1GB, otherwise there is a - * chance the mmap area will end up closer to the stack than our ulimit - * requires. + * Leave at least a ~128 MB hole. */ -#define MIN_GAP32 (128*1024*1024) -#define MIN_GAP64 ((128 + 1024)*1024*1024UL) -#define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64) +#define MIN_GAP (128*1024*1024) #define MAX_GAP (TASK_SIZE/6*5) static inline int mmap_is_legacy(void) @@ -71,9 +64,26 @@ unsigned long arch_mmap_rnd(void) return rnd << PAGE_SHIFT; } +static inline unsigned long stack_maxrandom_size(void) +{ + if (!(current->flags & PF_RANDOMIZE)) + return 0; + + /* 8MB for 32bit, 1GB for 64bit */ + if (is_32bit_task()) + return (1<<23); + else + return (1<<30); +} + static inline unsigned long mmap_base(unsigned long rnd) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = stack_maxrandom_size() + stack_guard_gap; + + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; if (gap < MIN_GAP) gap = MIN_GAP; diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index ec024c08dabe..c92ed0170be2 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h @@ -193,14 +193,13 @@ struct arch_elf_state { #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. 64-bit - tasks are aligned to 4GB. */ -#define ELF_ET_DYN_BASE (is_compat_task() ? \ - (STACK_TOP / 3 * 2) : \ - (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1)) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. */ diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 49a6bd45957b..3d0b14afa232 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -246,6 +246,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); + mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); } void machine_shutdown(void) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 3ae756c0db3d..3d1d808ea8a9 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -496,11 +496,6 @@ static void __init setup_memory_end(void) pr_notice("The maximum memory size is %luMB\n", memory_end >> 20); } -static void __init setup_vmcoreinfo(void) -{ - mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note()); -} - #ifdef CONFIG_CRASH_DUMP /* @@ -939,7 +934,6 @@ void __init setup_arch(char **cmdline_p) #endif setup_resources(); - setup_vmcoreinfo(); setup_lowcore(); smp_fill_possible_mask(); cpu_detect_mhz_feature(); diff --git a/arch/sh/include/asm/bug.h b/arch/sh/include/asm/bug.h index 1b77f068be2b..c9828f785ca0 100644 --- a/arch/sh/include/asm/bug.h +++ b/arch/sh/include/asm/bug.h @@ -48,6 +48,7 @@ do { \ "i" (__FILE__), \ "i" (__LINE__), "i" (0), \ "i" (sizeof(struct bug_entry))); \ + unreachable(); \ } while (0) #define __WARN_FLAGS(flags) \ diff --git a/arch/sh/include/asm/stackprotector.h b/arch/sh/include/asm/stackprotector.h index d9df3a76847c..141515a43b78 100644 --- a/arch/sh/include/asm/stackprotector.h +++ b/arch/sh/include/asm/stackprotector.h @@ -19,6 +19,7 @@ static __always_inline void boot_init_stack_canary(void) /* Try to get a semi random initial value. */ get_random_bytes(&canary, sizeof(canary)); canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; current->stack_canary = canary; __stack_chk_guard = current->stack_canary; diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c index d94dadedf74f..445b5e69b73c 100644 --- a/arch/sh/mm/cache-sh5.c +++ b/arch/sh/mm/cache-sh5.c @@ -234,7 +234,7 @@ static void sh64_icache_inv_current_user_range(unsigned long start, unsigned lon #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; -static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) +static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) { /* Purge all ways in a particular block of sets, specified by the base set number and number of sets. Can handle wrap-around, if that's diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c index b90cdfad2c78..7a3b4d33d2e7 100644 --- a/arch/sh/mm/extable_64.c +++ b/arch/sh/mm/extable_64.c @@ -10,6 +10,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. */ +#include <linux/bsearch.h> #include <linux/rwsem.h> #include <linux/extable.h> #include <linux/uaccess.h> @@ -40,10 +41,23 @@ static const struct exception_table_entry *check_exception_ranges(unsigned long return NULL; } +static int cmp_ex_search(const void *key, const void *elt) +{ + const struct exception_table_entry *_elt = elt; + unsigned long _key = *(unsigned long *)key; + + /* avoid overflow */ + if (_key > _elt->insn) + return 1; + if (_key < _elt->insn) + return -1; + return 0; +} + /* Simple binary search */ const struct exception_table_entry * -search_extable(const struct exception_table_entry *first, - const struct exception_table_entry *last, +search_extable(const struct exception_table_entry *base, + const size_t num, unsigned long value) { const struct exception_table_entry *mid; @@ -52,20 +66,8 @@ search_extable(const struct exception_table_entry *first, if (mid) return mid; - while (first <= last) { - long diff; - - mid = (last - first) / 2 + first; - diff = mid->insn - value; - if (diff == 0) - return mid; - else if (diff < 0) - first = mid+1; - else - last = mid-1; - } - - return NULL; + return bsearch(&value, base, num, + sizeof(struct exception_table_entry), cmp_ex_search); } int fixup_exception(struct pt_regs *regs) diff --git a/arch/sparc/include/asm/asm-prototypes.h b/arch/sparc/include/asm/asm-prototypes.h new file mode 100644 index 000000000000..d381e11c5dbb --- /dev/null +++ b/arch/sparc/include/asm/asm-prototypes.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. + */ + +#include <asm/xor.h> +#include <asm/checksum.h> +#include <asm/trap_block.h> +#include <asm/uaccess.h> +#include <asm/atomic.h> +#include <asm/ftrace.h> +#include <asm/cacheflush.h> +#include <asm/oplib.h> +#include <linux/atomic.h> + +void *__memscan_zero(void *, size_t); +void *__memscan_generic(void *, int, size_t); +void *__bzero(void *, size_t); +void VISenter(void); /* Dummy prototype to supress warning */ +#undef memcpy +#undef memset +void *memcpy(void *dest, const void *src, size_t n); +void *memset(void *s, int c, size_t n); +typedef int TItype __attribute__((mode(TI))); +TItype __multi3(TItype a, TItype b); diff --git a/arch/sparc/include/asm/nmi.h b/arch/sparc/include/asm/nmi.h index 26ad2b2607c6..284eac3ffaf2 100644 --- a/arch/sparc/include/asm/nmi.h +++ b/arch/sparc/include/asm/nmi.h @@ -7,6 +7,7 @@ void nmi_adjust_hz(unsigned int new_hz); extern atomic_t nmi_active; +void arch_touch_nmi_watchdog(void); void start_nmi_watchdog(void *unused); void stop_nmi_watchdog(void *unused); diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index e4b4e790bf89..fa466ce45bc9 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -205,7 +205,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) handle_size = (sizeof(struct mdesc_handle) - sizeof(struct mdesc_hdr) + mdesc_size); - base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_REPEAT); + base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!base) return NULL; diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 95e73c63c99d..048ad783ea3f 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -51,7 +51,7 @@ static DEFINE_PER_CPU(unsigned int, last_irq_sum); static DEFINE_PER_CPU(long, alert_counter); static DEFINE_PER_CPU(int, nmi_touch); -void touch_nmi_watchdog(void) +void arch_touch_nmi_watchdog(void) { if (atomic_read(&nmi_active)) { int cpu; @@ -61,10 +61,8 @@ void touch_nmi_watchdog(void) per_cpu(nmi_touch, cpu) = 1; } } - - touch_softlockup_watchdog(); } -EXPORT_SYMBOL(touch_nmi_watchdog); +EXPORT_SYMBOL(arch_touch_nmi_watchdog); static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) { diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S index 1c6a1bde5138..ce17c3094ba6 100644 --- a/arch/sparc/lib/atomic_64.S +++ b/arch/sparc/lib/atomic_64.S @@ -62,19 +62,23 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENDPROC(atomic_fetch_##op); \ EXPORT_SYMBOL(atomic_fetch_##op); -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) +ATOMIC_OP(add) +ATOMIC_OP_RETURN(add) +ATOMIC_FETCH_OP(add) -ATOMIC_OPS(add) -ATOMIC_OPS(sub) +ATOMIC_OP(sub) +ATOMIC_OP_RETURN(sub) +ATOMIC_FETCH_OP(sub) -#undef ATOMIC_OPS -#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) +ATOMIC_OP(and) +ATOMIC_FETCH_OP(and) -ATOMIC_OPS(and) -ATOMIC_OPS(or) -ATOMIC_OPS(xor) +ATOMIC_OP(or) +ATOMIC_FETCH_OP(or) + +ATOMIC_OP(xor) +ATOMIC_FETCH_OP(xor) -#undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP @@ -124,19 +128,23 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENDPROC(atomic64_fetch_##op); \ EXPORT_SYMBOL(atomic64_fetch_##op); -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) +ATOMIC64_OP(add) +ATOMIC64_OP_RETURN(add) +ATOMIC64_FETCH_OP(add) + +ATOMIC64_OP(sub) +ATOMIC64_OP_RETURN(sub) +ATOMIC64_FETCH_OP(sub) -ATOMIC64_OPS(add) -ATOMIC64_OPS(sub) +ATOMIC64_OP(and) +ATOMIC64_FETCH_OP(and) -#undef ATOMIC64_OPS -#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_FETCH_OP(op) +ATOMIC64_OP(or) +ATOMIC64_FETCH_OP(or) -ATOMIC64_OPS(and) -ATOMIC64_OPS(or) -ATOMIC64_OPS(xor) +ATOMIC64_OP(xor) +ATOMIC64_FETCH_OP(xor) -#undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP diff --git a/arch/sparc/lib/checksum_64.S b/arch/sparc/lib/checksum_64.S index f6732174fe6b..6cfa521f444d 100644 --- a/arch/sparc/lib/checksum_64.S +++ b/arch/sparc/lib/checksum_64.S @@ -38,6 +38,7 @@ csum_partial_fix_alignment: .align 32 .globl csum_partial + .type csum_partial,#function EXPORT_SYMBOL(csum_partial) csum_partial: /* %o0=buff, %o1=len, %o2=sum */ prefetch [%o0 + 0x000], #n_reads diff --git a/arch/sparc/lib/csum_copy.S b/arch/sparc/lib/csum_copy.S index 0ecbafc30fd0..b1051e77c49a 100644 --- a/arch/sparc/lib/csum_copy.S +++ b/arch/sparc/lib/csum_copy.S @@ -65,6 +65,7 @@ add %o5, %o4, %o4 .globl FUNC_NAME + .type FUNC_NAME,#function EXPORT_SYMBOL(FUNC_NAME) FUNC_NAME: /* %o0=src, %o1=dst, %o2=len, %o3=sum */ LOAD(prefetch, %o0 + 0x000, #n_reads) diff --git a/arch/sparc/lib/memscan_64.S b/arch/sparc/lib/memscan_64.S index daa96f4b03e6..5efee1f4be36 100644 --- a/arch/sparc/lib/memscan_64.S +++ b/arch/sparc/lib/memscan_64.S @@ -14,6 +14,8 @@ .text .align 32 .globl __memscan_zero, __memscan_generic + .type __memscan_zero,#function + .type __memscan_generic,#function .globl memscan EXPORT_SYMBOL(__memscan_zero) EXPORT_SYMBOL(__memscan_generic) diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S index bb539b42b088..e23338dbfc43 100644 --- a/arch/sparc/lib/memset.S +++ b/arch/sparc/lib/memset.S @@ -63,6 +63,7 @@ __bzero_begin: .globl __bzero + .type __bzero,#function .globl memset EXPORT_SYMBOL(__bzero) EXPORT_SYMBOL(memset) diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c index db214e9931d9..2422511dc8c5 100644 --- a/arch/sparc/mm/extable.c +++ b/arch/sparc/mm/extable.c @@ -13,11 +13,11 @@ void sort_extable(struct exception_table_entry *start, /* Caller knows they are in a range if ret->fixup == 0 */ const struct exception_table_entry * -search_extable(const struct exception_table_entry *start, - const struct exception_table_entry *last, +search_extable(const struct exception_table_entry *base, + const size_t num, unsigned long value) { - const struct exception_table_entry *walk; + int i; /* Single insn entries are encoded as: * word 1: insn address @@ -37,30 +37,30 @@ search_extable(const struct exception_table_entry *start, */ /* 1. Try to find an exact match. */ - for (walk = start; walk <= last; walk++) { - if (walk->fixup == 0) { + for (i = 0; i < num; i++) { + if (base[i].fixup == 0) { /* A range entry, skip both parts. */ - walk++; + i++; continue; } /* A deleted entry; see trim_init_extable */ - if (walk->fixup == -1) + if (base[i].fixup == -1) continue; - if (walk->insn == value) - return walk; + if (base[i].insn == value) + return &base[i]; } /* 2. Try to find a range match. */ - for (walk = start; walk <= (last - 1); walk++) { - if (walk->fixup) + for (i = 0; i < (num - 1); i++) { + if (base[i].fixup) continue; - if (walk[0].insn <= value && walk[1].insn > value) - return walk; + if (base[i].insn <= value && base[i + 1].insn > value) + return &base[i]; - walk++; + i++; } return NULL; diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index cd0e32bbcb1d..f80cfc64c55b 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -78,8 +78,8 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, return 0; refs = 0; - head = pmd_page(pmd); - page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); + head = compound_head(page); do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 94a18681353d..781521b7cf9e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -50,6 +50,7 @@ config X86 select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER + select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 select ARCH_HAS_MMIO_FLUSH @@ -162,6 +163,7 @@ config X86 select HAVE_PCSPKR_PLATFORM select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS_NMI + select HAVE_HARDLOCKUP_DETECTOR_PERF if HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP select HAVE_REGS_AND_STACK_ACCESS_API diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 00241c815524..a0838ab929f2 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -411,3 +411,8 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, debug_putstr("done.\nBooting the kernel.\n"); return output; } + +void fortify_panic(const char *name) +{ + error("detected buffer overflow"); +} diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index d2ff779f347e..796ff6c1aa53 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -33,7 +33,7 @@ #ifdef CONFIG_X86_32 -extern unsigned long asmlinkage efi_call_phys(void *, ...); +extern asmlinkage unsigned long efi_call_phys(void *, ...); #define arch_efi_call_virt_setup() kernel_fpu_begin() #define arch_efi_call_virt_teardown() kernel_fpu_end() @@ -52,7 +52,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...); #define EFI_LOADER_SIGNATURE "EL64" -extern u64 asmlinkage efi_call(void *fp, ...); +extern asmlinkage u64 efi_call(void *fp, ...); #define efi_call_phys(f, args...) efi_call((f), args) diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h index e8ab9a46bc68..1c18d83d3f09 100644 --- a/arch/x86/include/asm/elf.h +++ b/arch/x86/include/asm/elf.h @@ -245,12 +245,13 @@ extern int force_personality32; #define CORE_DUMP_USE_REGSET #define ELF_EXEC_PAGESIZE 4096 -/* This is the location that an ET_DYN program is loaded if exec'ed. Typical - use of this is to invoke "./ld.so someprog" to test out a new version of - the loader. We need to make sure that it is out of the way of the program - that it will "exec", and that there is sufficient room for the brk. */ - -#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) +/* + * This is the base location for PIE (ET_DYN with INTERP) loads. On + * 64-bit, this is raised to 4GB to leave the entire 32-bit address + * space open for things that want to use the area for 32-bit pointers. + */ +#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \ + 0x100000000UL) /* This yields a mask that user programs can use to figure out what instruction set this CPU supports. This could be done in user space, diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index dcbd9bcce714..8abedf1d650e 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -74,6 +74,7 @@ static __always_inline void boot_init_stack_canary(void) get_random_bytes(&canary, sizeof(canary)); tsc = rdtsc(); canary += tsc + (tsc << 32UL); + canary &= CANARY_MASK; current->stack_canary = canary; #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h index 3d3e8353ee5c..e9ee84873de5 100644 --- a/arch/x86/include/asm/string_32.h +++ b/arch/x86/include/asm/string_32.h @@ -142,7 +142,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from, } #define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *, const void *, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #ifdef CONFIG_X86_USE_3DNOW #include <asm/mmx.h> @@ -195,11 +197,15 @@ static inline void *__memcpy3d(void *to, const void *from, size_t len) #endif #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMMOVE void *memmove(void *dest, const void *src, size_t n); +extern int memcmp(const void *, const void *, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #define memcmp __builtin_memcmp +#endif #define __HAVE_ARCH_MEMCHR extern void *memchr(const void *cs, int c, size_t count); @@ -321,6 +327,8 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, : __memset_generic((s), (c), (count))) #define __HAVE_ARCH_MEMSET +extern void *memset(void *, int, size_t); +#ifndef CONFIG_FORTIFY_SOURCE #if (__GNUC__ >= 4) #define memset(s, c, count) __builtin_memset(s, c, count) #else @@ -330,6 +338,7 @@ void *__constant_c_and_count_memset(void *s, unsigned long pattern, (count)) \ : __memset((s), (c), (count))) #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ /* * find the first occurrence of byte 'c', or 1 past the area if none diff --git a/arch/x86/include/asm/string_64.h b/arch/x86/include/asm/string_64.h index 1f22bc277c45..2a8c822de1fc 100644 --- a/arch/x86/include/asm/string_64.h +++ b/arch/x86/include/asm/string_64.h @@ -31,6 +31,7 @@ static __always_inline void *__inline_memcpy(void *to, const void *from, size_t extern void *memcpy(void *to, const void *from, size_t len); extern void *__memcpy(void *to, const void *from, size_t len); +#ifndef CONFIG_FORTIFY_SOURCE #ifndef CONFIG_KMEMCHECK #if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4 #define memcpy(dst, src, len) \ @@ -51,6 +52,7 @@ extern void *__memcpy(void *to, const void *from, size_t len); */ #define memcpy(dst, src, len) __inline_memcpy((dst), (src), (len)) #endif +#endif /* !CONFIG_FORTIFY_SOURCE */ #define __HAVE_ARCH_MEMSET void *memset(void *s, int c, size_t n); @@ -77,6 +79,11 @@ int strcmp(const char *cs, const char *ct); #define memcpy(dst, src, len) __memcpy(dst, src, len) #define memmove(dst, src, len) __memmove(dst, src, len) #define memset(s, c, n) __memset(s, c, n) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + #endif #define __HAVE_ARCH_MEMCPY_MCSAFE 1 diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index c73c9fb281e1..d6f387780849 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -19,7 +19,7 @@ #include <linux/init.h> #include <linux/delay.h> -#ifdef CONFIG_HARDLOCKUP_DETECTOR +#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF u64 hw_nmi_get_sample_period(int watchdog_thresh) { return (u64)(cpu_khz) * 1000 * watchdog_thresh; diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 22217ece26c8..44404e2307bb 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -457,7 +457,7 @@ static int prepare_elf64_headers(struct crash_elf_data *ced, bufp += sizeof(Elf64_Phdr); phdr->p_type = PT_NOTE; phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); - phdr->p_filesz = phdr->p_memsz = sizeof(vmcoreinfo_note); + phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE; (ehdr->e_phnum)++; #ifdef CONFIG_X86_64 diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c index cad12634d6bd..2eab7d0bfedd 100644 --- a/arch/x86/lib/memcpy_32.c +++ b/arch/x86/lib/memcpy_32.c @@ -6,7 +6,7 @@ __visible void *memcpy(void *to, const void *from, size_t n) { -#ifdef CONFIG_X86_USE_3DNOW +#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE) return __memcpy3d(to, from, n); #else return __memcpy(to, from, n); diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 88215ac16b24..02c9d7553409 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -23,12 +23,7 @@ static int __init map_range(struct range *range) start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); - /* - * end + 1 here is intentional. We check several shadow bytes in advance - * to slightly speed up fastpath. In some rare cases we could cross - * boundary of mapped shadow, so we just map some more here. - */ - return vmemmap_populate(start, end + 1, NUMA_NO_NODE); + return vmemmap_populate(start, end, NUMA_NO_NODE); } static void __init clear_pgds(unsigned long start, diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 797295e792b2..229d04a83f85 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -92,13 +92,18 @@ unsigned long arch_mmap_rnd(void) static unsigned long mmap_base(unsigned long rnd, unsigned long task_size) { unsigned long gap = rlimit(RLIMIT_STACK); + unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap; unsigned long gap_min, gap_max; + /* Values close to RLIM_INFINITY can overflow. */ + if (gap + pad > gap) + gap += pad; + /* * Top of mmap area (just below the process stack). * Leave an at least ~128 MB hole with possible stack randomization. */ - gap_min = SIZE_128M + stack_maxrandom_size(task_size); + gap_min = SIZE_128M; gap_max = (task_size / 6) * 5; if (gap < gap_min) diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 1d7a7213a310..cab28cf2cffb 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -2693,8 +2693,8 @@ EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); phys_addr_t paddr_vmcoreinfo_note(void) { if (xen_pv_domain()) - return virt_to_machine(&vmcoreinfo_note).maddr; + return virt_to_machine(vmcoreinfo_note).maddr; else - return __pa_symbol(&vmcoreinfo_note); + return __pa(vmcoreinfo_note); } #endif /* CONFIG_KEXEC_CORE */ |