diff options
Diffstat (limited to 'arch')
82 files changed, 598 insertions, 442 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 4c23a68a0917..7a6a58ef8aaf 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -106,6 +106,15 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif +# -fstack-protector-strong triggers protection checks in this code, +# but it is being used too early to link to meaningful stack_chk logic. +nossp_flags := $(call cc-option, -fno-stack-protector) +CFLAGS_atags_to_fdt.o := $(nossp_flags) +CFLAGS_fdt.o := $(nossp_flags) +CFLAGS_fdt_ro.o := $(nossp_flags) +CFLAGS_fdt_rw.o := $(nossp_flags) +CFLAGS_fdt_wip.o := $(nossp_flags) + ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) asflags-y := -DZIMAGE diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index ede692ffa32e..5dd2528e9e45 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -417,6 +417,7 @@ #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) #define __NR_membarrier (__NR_SYSCALL_BASE+389) #define __NR_mlock2 (__NR_SYSCALL_BASE+390) +#define __NR_copy_file_range (__NR_SYSCALL_BASE+391) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index ac368bb068d1..dfc7cd6851ad 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -400,6 +400,7 @@ CALL(sys_userfaultfd) CALL(sys_membarrier) CALL(sys_mlock2) + CALL(sys_copy_file_range) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index cd822d8454c0..307237cfe728 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -27,6 +27,8 @@ $(warning LSE atomics not supported by binutils) endif KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables +KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads) KBUILD_AFLAGS += $(lseinstr) ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 18ca9fb9e65f..86581f793e39 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -16,7 +16,6 @@ CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_MEMCG=y CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_KMEM=y CONFIG_CGROUP_HUGETLB=y # CONFIG_UTS_NS is not set # CONFIG_IPC_NS is not set @@ -37,15 +36,13 @@ CONFIG_ARCH_EXYNOS7=y CONFIG_ARCH_LAYERSCAPE=y CONFIG_ARCH_HISI=y CONFIG_ARCH_MEDIATEK=y +CONFIG_ARCH_QCOM=y CONFIG_ARCH_ROCKCHIP=y CONFIG_ARCH_SEATTLE=y CONFIG_ARCH_RENESAS=y CONFIG_ARCH_R8A7795=y CONFIG_ARCH_STRATIX10=y CONFIG_ARCH_TEGRA=y -CONFIG_ARCH_TEGRA_132_SOC=y -CONFIG_ARCH_TEGRA_210_SOC=y -CONFIG_ARCH_QCOM=y CONFIG_ARCH_SPRD=y CONFIG_ARCH_THUNDER=y CONFIG_ARCH_UNIPHIER=y @@ -54,14 +51,19 @@ CONFIG_ARCH_XGENE=y CONFIG_ARCH_ZYNQMP=y CONFIG_PCI=y CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCI_HOST_GENERIC=y CONFIG_PCI_XGENE=y -CONFIG_SMP=y +CONFIG_PCI_LAYERSCAPE=y +CONFIG_PCI_HISI=y +CONFIG_PCIE_QCOM=y CONFIG_SCHED_MC=y CONFIG_PREEMPT=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CMA=y +CONFIG_XEN=y CONFIG_CMDLINE="console=ttyAMA0" # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_COMPAT=y @@ -100,7 +102,11 @@ CONFIG_PATA_OF_PLATFORM=y CONFIG_NETDEVICES=y CONFIG_TUN=y CONFIG_VIRTIO_NET=y +CONFIG_AMD_XGBE=y CONFIG_NET_XGENE=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=y CONFIG_SKY2=y CONFIG_RAVB=y CONFIG_SMC91X=y @@ -117,25 +123,23 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_8250_UNIPHIER=y +CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_SAMSUNG=y -CONFIG_SERIAL_SAMSUNG_UARTS_4=y -CONFIG_SERIAL_SAMSUNG_UARTS=4 CONFIG_SERIAL_SAMSUNG_CONSOLE=y +CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_SH_SCI=y CONFIG_SERIAL_SH_SCI_NR_UARTS=11 CONFIG_SERIAL_SH_SCI_CONSOLE=y -CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_XILINX_PS_UART=y CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y CONFIG_VIRTIO_CONSOLE=y # CONFIG_HW_RANDOM is not set -CONFIG_I2C=y CONFIG_I2C_QUP=y +CONFIG_I2C_UNIPHIER_F=y CONFIG_I2C_RCAR=y CONFIG_SPI=y CONFIG_SPI_PL022=y @@ -176,8 +180,6 @@ CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_TEGRA=y CONFIG_MMC_SPI=y CONFIG_MMC_DW=y -CONFIG_MMC_DW_IDMAC=y -CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y @@ -187,28 +189,33 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y CONFIG_LEDS_TRIGGER_CPU=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_XGENE=y CONFIG_DMADEVICES=y -CONFIG_RCAR_DMAC=y CONFIG_QCOM_BAM_DMA=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_RCAR_DMAC=y +CONFIG_VFIO=y +CONFIG_VFIO_PCI=y CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=y CONFIG_VIRTIO_MMIO=y +CONFIG_XEN_GNTDEV=y +CONFIG_XEN_GRANT_DEV_ALLOC=y CONFIG_COMMON_CLK_CS2000_CP=y CONFIG_COMMON_CLK_QCOM=y CONFIG_MSM_GCC_8916=y CONFIG_HWSPINLOCK_QCOM=y -# CONFIG_IOMMU_SUPPORT is not set +CONFIG_ARM_SMMU=y CONFIG_QCOM_SMEM=y CONFIG_QCOM_SMD=y CONFIG_QCOM_SMD_RPM=y +CONFIG_ARCH_TEGRA_132_SOC=y +CONFIG_ARCH_TEGRA_210_SOC=y +CONFIG_HISILICON_IRQ_MBIGEN=y CONFIG_PHY_XGENE=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set -CONFIG_EXT4_FS=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y CONFIG_QUOTA=y @@ -239,6 +246,7 @@ CONFIG_LOCKUP_DETECTOR=y # CONFIG_FTRACE is not set CONFIG_MEMTEST=y CONFIG_SECURITY=y +CONFIG_CRYPTO_ECHAINIV=y CONFIG_CRYPTO_ANSI_CPRNG=y CONFIG_ARM64_CRYPTO=y CONFIG_CRYPTO_SHA1_ARM64_CE=y diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 2d545d7aa80b..bf464de33f52 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -67,11 +67,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT)) -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE)) +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE)) +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC)) +#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT)) +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL)) #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) @@ -81,7 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY) -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY) #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE) #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT) @@ -153,6 +153,7 @@ extern struct page *empty_zero_page; #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT)) +#define pte_user(pte) (!!(pte_val(pte) & PTE_USER)) #ifdef CONFIG_ARM64_HW_AFDBM #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) @@ -163,8 +164,6 @@ extern struct page *empty_zero_page; #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte)) #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID)) -#define pte_valid_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) #define pte_valid_not_user(pte) \ ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) #define pte_valid_young(pte) \ @@ -278,13 +277,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_valid_user(pte)) { - if (!pte_special(pte) && pte_exec(pte)) - __sync_icache_dcache(pte, addr); + if (pte_valid(pte)) { if (pte_sw_dirty(pte) && pte_write(pte)) pte_val(pte) &= ~PTE_RDONLY; else pte_val(pte) |= PTE_RDONLY; + if (pte_user(pte) && pte_exec(pte) && !pte_special(pte)) + __sync_icache_dcache(pte, addr); } /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ffe9c2b6431b..917d98108b3f 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -514,9 +514,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems #endif /* EL2 debug */ + mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer + sbfx x0, x0, #8, #4 + cmp x0, #1 + b.lt 4f // Skip if no PMU present mrs x0, pmcr_el0 // Disable debug access traps ubfx x0, x0, #11, #5 // to EL2 and allow access to msr mdcr_el2, x0 // all PMU counters from EL1 +4: /* Stage-2 translation */ msr vttbr_el2, xzr diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index bc2abb8b1599..999633bd7294 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -65,6 +65,16 @@ #ifdef CONFIG_EFI /* + * Prevent the symbol aliases below from being emitted into the kallsyms + * table, by forcing them to be absolute symbols (which are conveniently + * ignored by scripts/kallsyms) rather than section relative symbols. + * The distinction is only relevant for partial linking, and only for symbols + * that are defined within a section declaration (which is not the case for + * the definitions below) so the resulting values will be identical. + */ +#define KALLSYMS_HIDE(sym) ABSOLUTE(sym) + +/* * The EFI stub has its own symbol namespace prefixed by __efistub_, to * isolate it from the kernel proper. The following symbols are legally * accessed by the stub, so provide some aliases to make them accessible. @@ -73,25 +83,25 @@ * linked at. The routines below are all implemented in assembler in a * position independent manner */ -__efistub_memcmp = __pi_memcmp; -__efistub_memchr = __pi_memchr; -__efistub_memcpy = __pi_memcpy; -__efistub_memmove = __pi_memmove; -__efistub_memset = __pi_memset; -__efistub_strlen = __pi_strlen; -__efistub_strcmp = __pi_strcmp; -__efistub_strncmp = __pi_strncmp; -__efistub___flush_dcache_area = __pi___flush_dcache_area; +__efistub_memcmp = KALLSYMS_HIDE(__pi_memcmp); +__efistub_memchr = KALLSYMS_HIDE(__pi_memchr); +__efistub_memcpy = KALLSYMS_HIDE(__pi_memcpy); +__efistub_memmove = KALLSYMS_HIDE(__pi_memmove); +__efistub_memset = KALLSYMS_HIDE(__pi_memset); +__efistub_strlen = KALLSYMS_HIDE(__pi_strlen); +__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp); +__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp); +__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area); #ifdef CONFIG_KASAN -__efistub___memcpy = __pi_memcpy; -__efistub___memmove = __pi_memmove; -__efistub___memset = __pi_memset; +__efistub___memcpy = KALLSYMS_HIDE(__pi_memcpy); +__efistub___memmove = KALLSYMS_HIDE(__pi_memmove); +__efistub___memset = KALLSYMS_HIDE(__pi_memset); #endif -__efistub__text = _text; -__efistub__end = _end; -__efistub__edata = _edata; +__efistub__text = KALLSYMS_HIDE(_text); +__efistub__end = KALLSYMS_HIDE(_end); +__efistub__edata = KALLSYMS_HIDE(_edata); #endif diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 5a22a119a74c..0adbebbc2803 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -46,7 +46,7 @@ enum address_markers_idx { PCI_START_NR, PCI_END_NR, MODULES_START_NR, - MODUELS_END_NR, + MODULES_END_NR, KERNEL_SPACE_NR, }; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index cf038c7d9fa9..cab7a5be40aa 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -120,6 +120,7 @@ static void __init cpu_set_ttbr1(unsigned long ttbr1) void __init kasan_init(void) { struct memblock_region *reg; + int i; /* * We are going to perform proper setup of shadow memory. @@ -155,6 +156,14 @@ void __init kasan_init(void) pfn_to_nid(virt_to_pfn(start))); } + /* + * KAsan may reuse the contents of kasan_zero_pte directly, so we + * should make sure that it maps the zero page read-only. + */ + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(&kasan_zero_pte[i], + pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO)); + memset(kasan_zero_page, 0, PAGE_SIZE); cpu_set_ttbr1(__pa(swapper_pg_dir)); flush_tlb_all(); diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 3571c7309c5e..cf6240741134 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages, if (end < MODULES_VADDR || end >= MODULES_END) return -EINVAL; + if (!numpages) + return 0; + data.set_mask = set_mask; data.clear_mask = clear_mask; diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S index 146bd99a7532..e6a30e1268a8 100644 --- a/arch/arm64/mm/proc-macros.S +++ b/arch/arm64/mm/proc-macros.S @@ -84,3 +84,15 @@ b.lo 9998b dsb \domain .endm + +/* + * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present + */ + .macro reset_pmuserenr_el0, tmpreg + mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer + sbfx \tmpreg, \tmpreg, #8, #4 + cmp \tmpreg, #1 // Skip if no PMU present + b.lt 9000f + msr pmuserenr_el0, xzr // Disable PMU access from EL0 +9000: + .endm diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a3d867e723b4..c164d2cb35c0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -117,7 +117,7 @@ ENTRY(cpu_do_resume) */ ubfx x11, x11, #1, #1 msr oslar_el1, x11 - msr pmuserenr_el0, xzr // Disable PMU access from EL0 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 mov x0, x12 dsb nsh // Make sure local tlb invalidation completed isb @@ -154,7 +154,7 @@ ENTRY(__cpu_setup) msr cpacr_el1, x0 // Enable FP/ASIMD mov x0, #1 << 12 // Reset mdscr_el1 and disable msr mdscr_el1, x0 // access to the DCC from EL0 - msr pmuserenr_el0, xzr // Disable PMU access from EL0 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 /* * Memory region attributes for LPAE: * diff --git a/arch/mips/bcm63xx/nvram.c b/arch/mips/bcm63xx/nvram.c index 5f2bc1e10eae..05757aed016c 100644 --- a/arch/mips/bcm63xx/nvram.c +++ b/arch/mips/bcm63xx/nvram.c @@ -19,6 +19,8 @@ #include <bcm63xx_nvram.h> +#define BCM63XX_DEFAULT_PSI_SIZE 64 + static struct bcm963xx_nvram nvram; static int mac_addr_used; @@ -85,3 +87,12 @@ int bcm63xx_nvram_get_mac_address(u8 *mac) return 0; } EXPORT_SYMBOL(bcm63xx_nvram_get_mac_address); + +int bcm63xx_nvram_get_psi_size(void) +{ + if (nvram.psi_size > 0) + return nvram.psi_size; + + return BCM63XX_DEFAULT_PSI_SIZE; +} +EXPORT_SYMBOL(bcm63xx_nvram_get_psi_size); diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h index 4e0b6bc1165e..348df49dcc9f 100644 --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h @@ -30,4 +30,6 @@ u8 *bcm63xx_nvram_get_name(void); */ int bcm63xx_nvram_get_mac_address(u8 *mac); +int bcm63xx_nvram_get_psi_size(void); + #endif /* BCM63XX_NVRAM_H */ diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h index 06f17e778c27..8d1c8162f0c1 100644 --- a/arch/powerpc/include/asm/book3s/64/hash.h +++ b/arch/powerpc/include/asm/book3s/64/hash.h @@ -50,7 +50,9 @@ * set of bits not changed in pmd_modify. */ #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ - _PAGE_ACCESSED | _PAGE_THP_HUGE) + _PAGE_ACCESSED | _PAGE_THP_HUGE | _PAGE_PTE | \ + _PAGE_SOFT_DIRTY) + #ifdef CONFIG_PPC_64K_PAGES #include <asm/book3s/64/hash-64k.h> diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 8204b0c393aa..8d1c41d28318 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -223,7 +223,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd) #define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd)) #define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_young(pmd) pte_young(pmd_pte(pmd)) -#define pmd_dirty(pmd) pte_dirty(pmd_pte(pmd)) #define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd))) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 271fefbbe521..9d08d8cbed1a 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -38,8 +38,7 @@ #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS -#define KVM_USER_MEM_SLOTS 32 -#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS +#define KVM_USER_MEM_SLOTS 512 #ifdef CONFIG_KVM_MMIO #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 5654ece02c0d..3fa9df70aa20 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -383,3 +383,4 @@ SYSCALL(ni_syscall) SYSCALL(ni_syscall) SYSCALL(ni_syscall) SYSCALL(mlock2) +SYSCALL(copy_file_range) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 6a5ace5fa0c8..1f2594d45605 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 379 +#define NR_syscalls 380 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 12a05652377a..940290d45b08 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -389,5 +389,6 @@ #define __NR_userfaultfd 364 #define __NR_membarrier 365 #define __NR_mlock2 378 +#define __NR_copy_file_range 379 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 8654cb166c19..ca9e5371930e 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe) const char *eeh_pe_loc_get(struct eeh_pe *pe) { struct pci_bus *bus = eeh_pe_bus_get(pe); - struct device_node *dn = pci_bus_to_OF_node(bus); + struct device_node *dn; const char *loc = NULL; - if (!dn) - goto out; + while (bus) { + dn = pci_bus_to_OF_node(bus); + if (!dn) { + bus = bus->parent; + continue; + } - /* PHB PE or root PE ? */ - if (pci_is_root_bus(bus)) { - loc = of_get_property(dn, "ibm,loc-code", NULL); - if (!loc) + if (pci_is_root_bus(bus)) loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); + else + loc = of_get_property(dn, "ibm,slot-location-code", + NULL); + if (loc) - goto out; + return loc; - /* Check the root port */ - dn = dn->child; - if (!dn) - goto out; + bus = bus->parent; } - loc = of_get_property(dn, "ibm,loc-code", NULL); - if (!loc) - loc = of_get_property(dn, "ibm,slot-location-code", NULL); - -out: - return loc ? loc : "N/A"; + return "N/A"; } /** diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index db475d41b57a..f28754c497e5 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence) li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ - -#ifdef CONFIG_MODULES -#if defined(_CALL_ELF) && _CALL_ELF == 2 - -#ifdef CONFIG_MODVERSIONS -.weak __crc_TOC. -.section "___kcrctab+TOC.","a" -.globl __kcrctab_TOC. -__kcrctab_TOC.: - .llong __crc_TOC. -#endif - -/* - * Export a fake .TOC. since both modpost and depmod will complain otherwise. - * Both modpost and depmod strip the leading . so we do the same here. - */ -.section "__ksymtab_strings","a" -__kstrtab_TOC.: - .asciz "TOC." - -.section "___ksymtab+TOC.","a" -/* This symbol name is important: it's used by modpost to find exported syms */ -.globl __ksymtab_TOC. -__ksymtab_TOC.: - .llong 0 /* .value */ - .llong __kstrtab_TOC. -#endif /* ELFv2 */ -#endif /* MODULES */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 59663af9315f..ac64ffdb52c8 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers, } } -/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ +/* + * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC. + * seem to be defined (value set later). + */ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) { unsigned int i; @@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) for (i = 1; i < numsyms; i++) { if (syms[i].st_shndx == SHN_UNDEF) { char *name = strtab + syms[i].st_name; - if (name[0] == '.') + if (name[0] == '.') { + if (strcmp(name+1, "TOC.") == 0) + syms[i].st_shndx = SHN_ABS; memmove(name, name+1, strlen(name)); + } } } } @@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); for (i = 1; i < numsyms; i++) { - if (syms[i].st_shndx == SHN_UNDEF + if (syms[i].st_shndx == SHN_ABS && strcmp(strtab + syms[i].st_name, "TOC.") == 0) return &syms[i]; } diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 774a253ca4e1..9bf7031a67ff 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -377,15 +377,12 @@ no_seg_found: static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) { - struct kvmppc_vcpu_book3s *vcpu_book3s; u64 esid, esid_1t; int slb_nr; struct kvmppc_slb *slbe; dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); - vcpu_book3s = to_book3s(vcpu); - esid = GET_ESID(rb); esid_1t = GET_ESID_1T(rb); slb_nr = rb & 0xfff; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index cff207b72c46..baeddb06811d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -833,6 +833,24 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, vcpu->stat.sum_exits++; + /* + * This can happen if an interrupt occurs in the last stages + * of guest entry or the first stages of guest exit (i.e. after + * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV + * and before setting it to KVM_GUEST_MODE_HOST_HV). + * That can happen due to a bug, or due to a machine check + * occurring at just the wrong time. + */ + if (vcpu->arch.shregs.msr & MSR_HV) { + printk(KERN_EMERG "KVM trap in HV mode!\n"); + printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", + vcpu->arch.trap, kvmppc_get_pc(vcpu), + vcpu->arch.shregs.msr); + kvmppc_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + run->hw.hardware_exit_reason = vcpu->arch.trap; + return RESUME_HOST; + } run->exit_reason = KVM_EXIT_UNKNOWN; run->ready_for_interrupt_injection = 1; switch (vcpu->arch.trap) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 3c6badcd53ef..6ee26de9a1de 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ 2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW - rlwimi r5, r4, 1, DAWRX_WT + rlwimi r5, r4, 2, DAWRX_WT clrrdi r4, r4, 3 std r4, VCPU_DAWR(r3) std r5, VCPU_DAWRX(r3) @@ -2404,6 +2404,8 @@ machine_check_realmode: * guest as machine check causing guest to crash. */ ld r11, VCPU_MSR(r9) + rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */ + bne mc_cont /* if so, exit to host */ andi. r10, r11, MSR_RI /* check for unrecoverable exception */ beq 1f /* Deliver a machine check to guest */ ld r10, VCPU_PC(r9) diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6fd2405c7f4a..a3b182dcb823 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); break; case KVM_REG_PPC_VRSAVE: - if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { - r = -ENXIO; - break; - } - vcpu->arch.vrsave = set_reg_val(reg->id, val); + val = get_reg_val(reg->id, vcpu->arch.vrsave); break; #endif /* CONFIG_ALTIVEC */ default: @@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); break; case KVM_REG_PPC_VRSAVE: - val = get_reg_val(reg->id, vcpu->arch.vrsave); + if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { + r = -ENXIO; + break; + } + vcpu->arch.vrsave = set_reg_val(reg->id, val); break; #endif /* CONFIG_ALTIVEC */ default: diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 22d94c3e6fc4..d0f0a514b04e 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -560,12 +560,12 @@ subsys_initcall(add_system_ram_resources); */ int devmem_is_allowed(unsigned long pfn) { + if (page_is_rtas_user_buf(pfn)) + return 1; if (iomem_is_exclusive(PFN_PHYS(pfn))) return 0; if (!page_is_ram(pfn)) return 1; - if (page_is_rtas_user_buf(pfn)) - return 1; return 0; } #endif /* CONFIG_STRICT_DEVMEM */ diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c index 7d5e295255b7..9958ba8bf0d2 100644 --- a/arch/powerpc/perf/power8-pmu.c +++ b/arch/powerpc/perf/power8-pmu.c @@ -816,7 +816,7 @@ static struct power_pmu power8_pmu = { .get_constraint = power8_get_constraint, .get_alternatives = power8_get_alternatives, .disable_pmc = power8_disable_pmc, - .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S, + .flags = PPMU_HAS_SIER | PPMU_ARCH_207S, .n_generic = ARRAY_SIZE(power8_generic_events), .generic_events = power8_generic_events, .cache_events = &power8_cache_events, diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 16aa0c779e07..595a275c36f8 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -8,6 +8,8 @@ #include <linux/types.h> +#define ARCH_IRQ_ENABLED (3UL << (BITS_PER_LONG - 8)) + /* store then OR system mask. */ #define __arch_local_irq_stosm(__or) \ ({ \ @@ -54,14 +56,17 @@ static inline notrace void arch_local_irq_enable(void) __arch_local_irq_stosm(0x03); } +/* This only restores external and I/O interrupt state */ static inline notrace void arch_local_irq_restore(unsigned long flags) { - __arch_local_irq_ssm(flags); + /* only disabled->disabled and disabled->enabled is valid */ + if (flags & ARCH_IRQ_ENABLED) + arch_local_irq_enable(); } static inline notrace bool arch_irqs_disabled_flags(unsigned long flags) { - return !(flags & (3UL << (BITS_PER_LONG - 8))); + return !(flags & ARCH_IRQ_ENABLED); } static inline notrace bool arch_irqs_disabled(void) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 6742414dbd6f..8959ebb6d2c9 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -546,7 +546,6 @@ struct kvm_vcpu_arch { struct kvm_s390_sie_block *sie_block; unsigned int host_acrs[NUM_ACRS]; struct fpu host_fpregs; - struct fpu guest_fpregs; struct kvm_s390_local_interrupt local_int; struct hrtimer ckc_timer; struct kvm_s390_pgm_info pgm; diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 1a9a98de5bde..69aa18be61af 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -8,10 +8,13 @@ #include <asm/pci_insn.h> /* I/O Map */ -#define ZPCI_IOMAP_MAX_ENTRIES 0x7fff -#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000ULL -#define ZPCI_IOMAP_ADDR_IDX_MASK 0x7fff000000000000ULL -#define ZPCI_IOMAP_ADDR_OFF_MASK 0x0000ffffffffffffULL +#define ZPCI_IOMAP_SHIFT 48 +#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL +#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1) +#define ZPCI_IOMAP_MAX_ENTRIES \ + ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT)) +#define ZPCI_IOMAP_ADDR_IDX_MASK \ + (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE) struct zpci_iomap_entry { u32 fh; @@ -21,8 +24,9 @@ struct zpci_iomap_entry { extern struct zpci_iomap_entry *zpci_iomap_start; +#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT)) #define ZPCI_IDX(addr) \ - (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48) + (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT) #define ZPCI_OFFSET(addr) \ ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK) diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index f16debf6a612..1c4fe129486d 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -166,14 +166,14 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS]; */ #define start_thread(regs, new_psw, new_stackp) do { \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA; \ - regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ + regs->psw.addr = new_psw; \ regs->gprs[15] = new_stackp; \ execve_tail(); \ } while (0) #define start_thread31(regs, new_psw, new_stackp) do { \ regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \ - regs->psw.addr = new_psw | PSW_ADDR_AMODE; \ + regs->psw.addr = new_psw; \ regs->gprs[15] = new_stackp; \ crst_table_downgrade(current->mm, 1UL << 31); \ execve_tail(); \ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index f00cd35c8ac4..99bc456cc26a 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -149,7 +149,7 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) #define arch_has_block_step() (1) #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) -#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) +#define instruction_pointer(regs) ((regs)->psw.addr) #define user_stack_pointer(regs)((regs)->gprs[15]) #define profile_pc(regs) instruction_pointer(regs) @@ -161,7 +161,7 @@ static inline long regs_return_value(struct pt_regs *regs) static inline void instruction_pointer_set(struct pt_regs *regs, unsigned long val) { - regs->psw.addr = val | PSW_ADDR_AMODE; + regs->psw.addr = val; } int regs_query_register_offset(const char *name); @@ -171,7 +171,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) { - return regs->gprs[15] & PSW_ADDR_INSN; + return regs->gprs[15]; } #endif /* __ASSEMBLY__ */ diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 34ec202472c6..ab3aa6875a59 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -310,7 +310,8 @@ #define __NR_recvmsg 372 #define __NR_shutdown 373 #define __NR_mlock2 374 -#define NR_syscalls 375 +#define __NR_copy_file_range 375 +#define NR_syscalls 376 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index fac4eeddef91..ae2cda5eee5a 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len); COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); +COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index a92b39fd0e63..3986c9f62191 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -59,8 +59,6 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) struct save_area *sa; sa = (void *) memblock_alloc(sizeof(*sa), 8); - if (!sa) - return NULL; if (is_boot_cpu) list_add(&sa->list, &dump_save_areas); else diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index 6fca0e46464e..c890a5589e59 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view, except_str = "*"; else except_str = "-"; - caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN; + caller = (unsigned long) entry->caller; rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ", area, (long long)time_spec.tv_sec, time_spec.tv_nsec / 1000, level, except_str, diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index dc8e20473484..02bd02ff648b 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) unsigned long addr; while (1) { - sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; printk("([<%016lx>] %pSR)\n", addr, (void *)addr); /* Follow the backchain. */ while (1) { low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; printk(" [<%016lx>] %pSR\n", addr, (void *)addr); } /* Zero backchain detected, check for interrupt frame. */ @@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; - addr = regs->psw.addr & PSW_ADDR_INSN; + addr = regs->psw.addr; printk(" [<%016lx>] %pSR\n", addr, (void *)addr); low = sp; sp = regs->gprs[15]; diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 20a5caf6d981..c55576bbaa1f 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -252,14 +252,14 @@ static void early_pgm_check_handler(void) unsigned long addr; addr = S390_lowcore.program_old_psw.addr; - fixup = search_exception_tables(addr & PSW_ADDR_INSN); + fixup = search_exception_tables(addr); if (!fixup) disabled_wait(0); /* Disable low address protection before storing into lowcore. */ __ctl_store(cr0, 0, 0); cr0_new = cr0 & ~(1UL << 28); __ctl_load(cr0_new, 0, 0); - S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE; + S390_lowcore.program_old_psw.addr = extable_fixup(fixup); __ctl_load(cr0, 0, 0); } @@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void) psw_t psw; psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA; - psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler; + psw.addr = (unsigned long) s390_base_ext_handler; S390_lowcore.external_new_psw = psw; - psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + psw.addr = (unsigned long) s390_base_pgm_handler; S390_lowcore.program_new_psw = psw; s390_base_pgm_handler_fn = early_pgm_check_handler; } diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index e0eaf11134b4..0f7bfeba6da6 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c @@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) goto out; if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; - ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; + ip -= MCOUNT_INSN_SIZE; trace.func = ip; trace.depth = current->curr_ret_stack + 1; /* Only trace if the calling function expects to. */ diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 0a5a6b661b93..f20abdb5630a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -2057,12 +2057,12 @@ void s390_reset_system(void) /* Set new machine check handler */ S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.mcck_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler; + (unsigned long) s390_base_mcck_handler; /* Set new program check handler */ S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT; S390_lowcore.program_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + (unsigned long) s390_base_pgm_handler; /* * Clear subchannel ID and number to signal new kernel that no CCW or diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 389db56a2208..250f5972536a 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c @@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb, __ctl_load(per_kprobe, 9, 11); regs->psw.mask |= PSW_MASK_PER; regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); - regs->psw.addr = ip | PSW_ADDR_AMODE; + regs->psw.addr = ip; } NOKPROBE_SYMBOL(enable_singlestep); @@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb, __ctl_load(kcb->kprobe_saved_ctl, 9, 11); regs->psw.mask &= ~PSW_MASK_PER; regs->psw.mask |= kcb->kprobe_saved_imask; - regs->psw.addr = ip | PSW_ADDR_AMODE; + regs->psw.addr = ip; } NOKPROBE_SYMBOL(disable_singlestep); @@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs) */ preempt_disable(); kcb = get_kprobe_ctlblk(); - p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2)); + p = get_kprobe((void *)(regs->psw.addr - 2)); if (p) { if (kprobe_running()) { @@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) break; } - regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE; + regs->psw.addr = orig_ret_address; pop_kprobe(get_kprobe_ctlblk()); kretprobe_hash_unlock(current, &flags); @@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler); static void resume_execution(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; + unsigned long ip = regs->psw.addr; int fixup = probe_get_fixup_type(p->ainsn.insn); /* Check if the kprobes location is an enabled ftrace caller */ @@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr) * In case the user-specified fault handler returned * zero, try to fix up. */ - entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + entry = search_exception_tables(regs->psw.addr); if (entry) { - regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(entry); return 1; } @@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); /* setup return addr to the jprobe handler routine */ - regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) jp->entry; regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); /* r15 is the stack pointer */ diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index 61595c1f0a0f..cfcba2dd9bb5 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c @@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs) static unsigned long instruction_pointer_guest(struct pt_regs *regs) { - return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN; + return sie_block(regs)->gpsw.addr; } unsigned long perf_instruction_pointer(struct pt_regs *regs) @@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry, struct pt_regs *regs; while (1) { - sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); /* Follow the backchain. */ while (1) { low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; - perf_callchain_store(entry, - sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); } /* Zero backchain detected, check for interrupt frame. */ sp = (unsigned long) (sf + 1); if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *) sp; - perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN); + perf_callchain_store(entry, sf->gprs[8]); low = sp; sp = regs->gprs[15]; } diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 114ee8b96f17..2bba7df4ac51 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk) return 0; low = task_stack_page(tsk); high = (struct stack_frame *) task_pt_regs(tsk); - sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN); + sf = (struct stack_frame *) tsk->thread.ksp; if (sf <= low || sf > high) return 0; - sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); + sf = (struct stack_frame *) sf->back_chain; if (sf <= low || sf > high) return 0; return sf->gprs[8]; @@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp, memset(&frame->childregs, 0, sizeof(struct pt_regs)); frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - frame->childregs.psw.addr = PSW_ADDR_AMODE | + frame->childregs.psw.addr = (unsigned long) kernel_thread_starter; frame->childregs.gprs[9] = new_stackp; /* function */ frame->childregs.gprs[10] = arg; @@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p) return 0; low = task_stack_page(p); high = (struct stack_frame *) task_pt_regs(p); - sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN); + sf = (struct stack_frame *) p->thread.ksp; if (sf <= low || sf > high) return 0; for (count = 0; count < 16; count++) { - sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN); + sf = (struct stack_frame *) sf->back_chain; if (sf <= low || sf > high) return 0; - return_address = sf->gprs[8] & PSW_ADDR_INSN; + return_address = sf->gprs[8]; if (!in_sched_functions(return_address)) return return_address; } diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 01c37b36caf9..49b1c13bf6c9 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task) if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP)) new.control |= PER_EVENT_IFETCH; new.start = 0; - new.end = PSW_ADDR_INSN; + new.end = -1UL; } /* Take care of the PER enablement bit in the PSW. */ @@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child, else if (addr == (addr_t) &dummy->cr11) /* End address of the active per set. */ return test_thread_flag(TIF_SINGLE_STEP) ? - PSW_ADDR_INSN : child->thread.per_user.end; + -1UL : child->thread.per_user.end; else if (addr == (addr_t) &dummy->bits) /* Single-step bit. */ return test_thread_flag(TIF_SINGLE_STEP) ? @@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request, } return 0; default: - /* Removing high order bit from addr (only for 31 bit). */ - addr &= PSW_ADDR_INSN; return ptrace_request(child, request, addr, data); } } diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index c6878fbbcf13..9220db5c996a 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -301,25 +301,21 @@ static void __init setup_lowcore(void) BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096); lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0); lc->restart_psw.mask = PSW_KERNEL_BITS; - lc->restart_psw.addr = - PSW_ADDR_AMODE | (unsigned long) restart_int_handler; + lc->restart_psw.addr = (unsigned long) restart_int_handler; lc->external_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->external_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) ext_int_handler; + lc->external_new_psw.addr = (unsigned long) ext_int_handler; lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK; - lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call; + lc->svc_new_psw.addr = (unsigned long) system_call; lc->program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->program_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) pgm_check_handler; + lc->program_new_psw.addr = (unsigned long) pgm_check_handler; lc->mcck_new_psw.mask = PSW_KERNEL_BITS; - lc->mcck_new_psw.addr = - PSW_ADDR_AMODE | (unsigned long) mcck_int_handler; + lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler; lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK; - lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler; + lc->io_new_psw.addr = (unsigned long) io_int_handler; lc->clock_comparator = -1ULL; lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 028cc46cb82a..d82562cf0a0e 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; + restorer = (unsigned long) ka->sa.sa_restorer; } else { /* Signal frame without vector registers are short ! */ __u16 __user *svc = (void __user *) frame + frame_size - 2; if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) return -EFAULT; - restorer = (unsigned long) svc | PSW_ADDR_AMODE; + restorer = (unsigned long) svc; } /* Set up registers for signal handler */ @@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | (PSW_USER_BITS & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); - regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) ka->sa.sa_handler; regs->gprs[2] = sig; regs->gprs[3] = (unsigned long) &frame->sc; @@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ksig->ka.sa.sa_flags & SA_RESTORER) { - restorer = (unsigned long) - ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE; + restorer = (unsigned long) ksig->ka.sa.sa_restorer; } else { __u16 __user *svc = &frame->svc_insn; if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc)) return -EFAULT; - restorer = (unsigned long) svc | PSW_ADDR_AMODE; + restorer = (unsigned long) svc; } /* Create siginfo on the signal stack */ @@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | (PSW_USER_BITS & PSW_MASK_ASC) | (regs->psw.mask & ~PSW_MASK_ASC); - regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE; + regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler; regs->gprs[2] = ksig->sig; regs->gprs[3] = (unsigned long) &frame->info; diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a13468b9a913..3c65a8eae34d 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -623,8 +623,6 @@ void __init smp_save_dump_cpus(void) return; /* Allocate a page as dumping area for the store status sigps */ page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31); - if (!page) - panic("could not allocate memory for save area\n"); /* Set multi-threading state to the previous system. */ pcpu_set_smt(sclp.mtid_prev); boot_cpu_addr = stap(); diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 1785cd82253c..5acba3cb7220 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace, unsigned long addr; while(1) { - sp &= PSW_ADDR_INSN; if (sp < low || sp > high) return sp; sf = (struct stack_frame *)sp; while(1) { - addr = sf->gprs[8] & PSW_ADDR_INSN; + addr = sf->gprs[8]; if (!trace->skip) trace->entries[trace->nr_entries++] = addr; else @@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, if (trace->nr_entries >= trace->max_entries) return sp; low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) @@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace, if (sp <= low || sp > high - sizeof(*regs)) return sp; regs = (struct pt_regs *)sp; - addr = regs->psw.addr & PSW_ADDR_INSN; + addr = regs->psw.addr; if (savesched || !in_sched_functions(addr)) { if (!trace->skip) trace->entries[trace->nr_entries++] = addr; @@ -65,7 +64,7 @@ void save_stack_trace(struct stack_trace *trace) register unsigned long sp asm ("15"); unsigned long orig_sp, new_sp; - orig_sp = sp & PSW_ADDR_INSN; + orig_sp = sp; new_sp = save_context_stack(trace, orig_sp, S390_lowcore.panic_stack - PAGE_SIZE, S390_lowcore.panic_stack, 1); @@ -86,7 +85,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { unsigned long sp, low, high; - sp = tsk->thread.ksp & PSW_ADDR_INSN; + sp = tsk->thread.ksp; low = (unsigned long) task_stack_page(tsk); high = (unsigned long) task_pt_regs(tsk); save_context_stack(trace, sp, low, high, 0); diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 5378c3ea1b98..293d8b98fd52 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom) SYSCALL(sys_recvmsg,compat_sys_recvmsg) SYSCALL(sys_shutdown,sys_shutdown) SYSCALL(sys_mlock2,compat_sys_mlock2) +SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index d69d648759c9..017eb03daee2 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs) address = *(unsigned long *)(current->thread.trap_tdb + 24); else address = regs->psw.addr; - return (void __user *) - ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN); + return (void __user *) (address - (regs->int_code >> 16)); } static inline void report_user_fault(struct pt_regs *regs, int signr) @@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr) return; printk("User process fault: interruption code %04x ilc:%d ", regs->int_code & 0xffff, regs->int_code >> 17); - print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); + print_vma_addr("in ", regs->psw.addr); printk("\n"); show_regs(regs); } @@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str) report_user_fault(regs, si_signo); } else { const struct exception_table_entry *fixup; - fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + fixup = search_exception_tables(regs->psw.addr); if (fixup) - regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup); else { enum bug_trap_type btt; - btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); + btt = report_bug(regs->psw.addr, regs); if (btt == BUG_TRAP_TYPE_WARN) return; die(regs, str); diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig index 5fce52cf0e57..5ea5af3c7db7 100644 --- a/arch/s390/kvm/Kconfig +++ b/arch/s390/kvm/Kconfig @@ -29,6 +29,7 @@ config KVM select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING select SRCU + select KVM_VFIO ---help--- Support hosting paravirtualized guest machines using the SIE virtualization capability on the mainframe. This should work diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile index b3b553469650..d42fa38c2429 100644 --- a/arch/s390/kvm/Makefile +++ b/arch/s390/kvm/Makefile @@ -7,7 +7,7 @@ # as published by the Free Software Foundation. KVM := ../../../virt/kvm -common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o +common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o ccflags-y := -Ivirt/kvm -Iarch/s390/kvm diff --git a/arch/s390/kvm/guestdbg.c b/arch/s390/kvm/guestdbg.c index 47518a324d75..d697312ce9ee 100644 --- a/arch/s390/kvm/guestdbg.c +++ b/arch/s390/kvm/guestdbg.c @@ -116,7 +116,7 @@ static void enable_all_hw_wp(struct kvm_vcpu *vcpu) if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) { *cr9 &= ~PER_CONTROL_ALTERATION; *cr10 = 0; - *cr11 = PSW_ADDR_INSN; + *cr11 = -1UL; } else { *cr9 &= ~PER_CONTROL_ALTERATION; *cr9 |= PER_EVENT_STORE; @@ -159,7 +159,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu) vcpu->arch.sie_block->gcr[0] &= ~0x800ul; vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH; vcpu->arch.sie_block->gcr[10] = 0; - vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN; + vcpu->arch.sie_block->gcr[11] = -1UL; } if (guestdbg_hw_bp_enabled(vcpu)) { diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 835d60bedb54..4af21c771f9b 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1423,44 +1423,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) return 0; } -/* - * Backs up the current FP/VX register save area on a particular - * destination. Used to switch between different register save - * areas. - */ -static inline void save_fpu_to(struct fpu *dst) -{ - dst->fpc = current->thread.fpu.fpc; - dst->regs = current->thread.fpu.regs; -} - -/* - * Switches the FP/VX register save area from which to lazy - * restore register contents. - */ -static inline void load_fpu_from(struct fpu *from) -{ - current->thread.fpu.fpc = from->fpc; - current->thread.fpu.regs = from->regs; -} - void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { /* Save host register state */ save_fpu_regs(); - save_fpu_to(&vcpu->arch.host_fpregs); - - if (test_kvm_facility(vcpu->kvm, 129)) { - current->thread.fpu.fpc = vcpu->run->s.regs.fpc; - /* - * Use the register save area in the SIE-control block - * for register restore and save in kvm_arch_vcpu_put() - */ - current->thread.fpu.vxrs = - (__vector128 *)&vcpu->run->s.regs.vrs; - } else - load_fpu_from(&vcpu->arch.guest_fpregs); + vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc; + vcpu->arch.host_fpregs.regs = current->thread.fpu.regs; + /* Depending on MACHINE_HAS_VX, data stored to vrs either + * has vector register or floating point register format. + */ + current->thread.fpu.regs = vcpu->run->s.regs.vrs; + current->thread.fpu.fpc = vcpu->run->s.regs.fpc; if (test_fp_ctl(current->thread.fpu.fpc)) /* User space provided an invalid FPC, let's clear it */ current->thread.fpu.fpc = 0; @@ -1476,19 +1450,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); gmap_disable(vcpu->arch.gmap); + /* Save guest register state */ save_fpu_regs(); + vcpu->run->s.regs.fpc = current->thread.fpu.fpc; - if (test_kvm_facility(vcpu->kvm, 129)) - /* - * kvm_arch_vcpu_load() set up the register save area to - * the &vcpu->run->s.regs.vrs and, thus, the vector registers - * are already saved. Only the floating-point control must be - * copied. - */ - vcpu->run->s.regs.fpc = current->thread.fpu.fpc; - else - save_fpu_to(&vcpu->arch.guest_fpregs); - load_fpu_from(&vcpu->arch.host_fpregs); + /* Restore host register state */ + current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc; + current->thread.fpu.regs = vcpu->arch.host_fpregs.regs; save_access_regs(vcpu->run->s.regs.acrs); restore_access_regs(vcpu->arch.host_acrs); @@ -1506,8 +1474,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64)); vcpu->arch.sie_block->gcr[0] = 0xE0UL; vcpu->arch.sie_block->gcr[14] = 0xC2000000UL; - vcpu->arch.guest_fpregs.fpc = 0; - asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc)); + /* make sure the new fpc will be lazily loaded */ + save_fpu_regs(); + current->thread.fpu.fpc = 0; vcpu->arch.sie_block->gbea = 1; vcpu->arch.sie_block->pp = 0; vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; @@ -1648,17 +1617,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, vcpu->arch.local_int.wq = &vcpu->wq; vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags; - /* - * Allocate a save area for floating-point registers. If the vector - * extension is available, register contents are saved in the SIE - * control block. The allocated save area is still required in - * particular places, for example, in kvm_s390_vcpu_store_status(). - */ - vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS, - GFP_KERNEL); - if (!vcpu->arch.guest_fpregs.fprs) - goto out_free_sie_block; - rc = kvm_vcpu_init(vcpu, kvm, id); if (rc) goto out_free_sie_block; @@ -1879,19 +1837,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { + /* make sure the new values will be lazily loaded */ + save_fpu_regs(); if (test_fp_ctl(fpu->fpc)) return -EINVAL; - memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs)); - vcpu->arch.guest_fpregs.fpc = fpu->fpc; - save_fpu_regs(); - load_fpu_from(&vcpu->arch.guest_fpregs); + current->thread.fpu.fpc = fpu->fpc; + if (MACHINE_HAS_VX) + convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs); + else + memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs)); return 0; } int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) { - memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs)); - fpu->fpc = vcpu->arch.guest_fpregs.fpc; + /* make sure we have the latest values */ + save_fpu_regs(); + if (MACHINE_HAS_VX) + convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs); + else + memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs)); + fpu->fpc = current->thread.fpu.fpc; return 0; } @@ -2396,6 +2362,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) { unsigned char archmode = 1; + freg_t fprs[NUM_FPRS]; unsigned int px; u64 clkcomp; int rc; @@ -2411,8 +2378,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) gpa = px; } else gpa -= __LC_FPREGS_SAVE_AREA; - rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, - vcpu->arch.guest_fpregs.fprs, 128); + + /* manually convert vector registers if necessary */ + if (MACHINE_HAS_VX) { + convert_vx_to_fp(fprs, current->thread.fpu.vxrs); + rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, + fprs, 128); + } else { + rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA, + vcpu->run->s.regs.vrs, 128); + } rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA, vcpu->run->s.regs.gprs, 128); rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA, @@ -2420,7 +2395,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa) rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA, &px, 4); rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA, - &vcpu->arch.guest_fpregs.fpc, 4); + &vcpu->run->s.regs.fpc, 4); rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA, &vcpu->arch.sie_block->todpr, 4); rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA, @@ -2443,19 +2418,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) * it into the save area */ save_fpu_regs(); - if (test_kvm_facility(vcpu->kvm, 129)) { - /* - * If the vector extension is available, the vector registers - * which overlaps with floating-point registers are saved in - * the SIE-control block. Hence, extract the floating-point - * registers and the FPC value and store them in the - * guest_fpregs structure. - */ - vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc; - convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs, - current->thread.fpu.vxrs); - } else - save_fpu_to(&vcpu->arch.guest_fpregs); + vcpu->run->s.regs.fpc = current->thread.fpu.fpc; save_access_regs(vcpu->run->s.regs.acrs); return kvm_s390_store_status_unloaded(vcpu, addr); diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 1b903f6ad54a..791a4146052c 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr) return; printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", regs->int_code & 0xffff, regs->int_code >> 17); - print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); + print_vma_addr(KERN_CONT "in ", regs->psw.addr); printk(KERN_CONT "\n"); printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); @@ -256,9 +256,9 @@ static noinline void do_no_context(struct pt_regs *regs) const struct exception_table_entry *fixup; /* Are we prepared to handle this kernel fault? */ - fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); + fixup = search_exception_tables(regs->psw.addr); if (fixup) { - regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE; + regs->psw.addr = extable_fixup(fixup); return; } diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index c722400c7697..73e290337092 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -98,7 +98,7 @@ void __init paging_init(void) __ctl_load(S390_lowcore.kernel_asce, 1, 1); __ctl_load(S390_lowcore.kernel_asce, 7, 7); __ctl_load(S390_lowcore.kernel_asce, 13, 13); - arch_local_irq_restore(4UL << (BITS_PER_LONG - 8)); + __arch_local_irq_stosm(0x04); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index ea01477b4aa6..45c4daa49930 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) { - if (is_compat_task() || (TASK_SIZE >= (1UL << 53))) + if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE) return 0; if (!(flags & MAP_FIXED)) addr = 0; if ((addr + len) >= TASK_SIZE) - return crst_table_upgrade(current->mm, 1UL << 53); + return crst_table_upgrade(current->mm, TASK_MAX_SIZE); return 0; } @@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr, area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { /* Upgrade the page table to 4 levels and retry. */ - rc = crst_table_upgrade(mm, 1UL << 53); + rc = crst_table_upgrade(mm, TASK_MAX_SIZE); if (rc) return (unsigned long) rc; area = arch_get_unmapped_area(filp, addr, len, pgoff, flags); @@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr, area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); if (!(area & ~PAGE_MASK)) return area; - if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) { + if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) { /* Upgrade the page table to 4 levels and retry. */ - rc = crst_table_upgrade(mm, 1UL << 53); + rc = crst_table_upgrade(mm, TASK_MAX_SIZE); if (rc) return (unsigned long) rc; area = arch_get_unmapped_area_topdown(filp, addr, len, diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index a809fa8e6f8b..5109827883ac 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) unsigned long entry; int flush; - BUG_ON(limit > (1UL << 53)); + BUG_ON(limit > TASK_MAX_SIZE); flush = 0; repeat: table = crst_table_alloc(mm); diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index 43f32ce60aa3..2794845061c6 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c @@ -57,9 +57,7 @@ static __init pg_data_t *alloc_node_data(void) { pg_data_t *res; - res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1); - if (!res) - panic("Could not allocate memory for node data!\n"); + res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8); memset(res, 0, sizeof(pg_data_t)); return res; } @@ -162,7 +160,7 @@ static int __init numa_init_late(void) register_one_node(nid); return 0; } -device_initcall(numa_init_late); +arch_initcall(numa_init_late); static int __init parse_debug(char *parm) { diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c index 8a6811b2cdb9..fe0bfe370c45 100644 --- a/arch/s390/oprofile/backtrace.c +++ b/arch/s390/oprofile/backtrace.c @@ -16,24 +16,23 @@ __show_trace(unsigned int *depth, unsigned long sp, struct pt_regs *regs; while (*depth) { - sp = sp & PSW_ADDR_INSN; if (sp < low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; - oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + oprofile_add_trace(sf->gprs[8]); /* Follow the backchain. */ while (*depth) { low = sp; - sp = sf->back_chain & PSW_ADDR_INSN; + sp = sf->back_chain; if (!sp) break; if (sp <= low || sp > high - sizeof(*sf)) return sp; sf = (struct stack_frame *) sp; (*depth)--; - oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + oprofile_add_trace(sf->gprs[8]); } @@ -46,7 +45,7 @@ __show_trace(unsigned int *depth, unsigned long sp, return sp; regs = (struct pt_regs *) sp; (*depth)--; - oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN); + oprofile_add_trace(sf->gprs[8]); low = sp; sp = regs->gprs[15]; } diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 11d4f277e9f6..8f19c8f9d660 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -68,9 +68,12 @@ static struct airq_struct zpci_airq = { .isc = PCI_ISC, }; -/* I/O Map */ +#define ZPCI_IOMAP_ENTRIES \ + min(((unsigned long) CONFIG_PCI_NR_FUNCTIONS * PCI_BAR_COUNT), \ + ZPCI_IOMAP_MAX_ENTRIES) + static DEFINE_SPINLOCK(zpci_iomap_lock); -static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); +static unsigned long *zpci_iomap_bitmap; struct zpci_iomap_entry *zpci_iomap_start; EXPORT_SYMBOL_GPL(zpci_iomap_start); @@ -265,27 +268,20 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev, unsigned long max) { struct zpci_dev *zdev = to_zpci(pdev); - u64 addr; int idx; - if ((bar & 7) != bar) + if (!pci_resource_len(pdev, bar)) return NULL; idx = zdev->bars[bar].map_idx; spin_lock(&zpci_iomap_lock); - if (zpci_iomap_start[idx].count++) { - BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || - zpci_iomap_start[idx].bar != bar); - } else { - zpci_iomap_start[idx].fh = zdev->fh; - zpci_iomap_start[idx].bar = bar; - } /* Detect overrun */ - BUG_ON(!zpci_iomap_start[idx].count); + WARN_ON(!++zpci_iomap_start[idx].count); + zpci_iomap_start[idx].fh = zdev->fh; + zpci_iomap_start[idx].bar = bar; spin_unlock(&zpci_iomap_lock); - addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); - return (void __iomem *) addr + offset; + return (void __iomem *) ZPCI_ADDR(idx) + offset; } EXPORT_SYMBOL(pci_iomap_range); @@ -297,12 +293,11 @@ EXPORT_SYMBOL(pci_iomap); void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) { - unsigned int idx; + unsigned int idx = ZPCI_IDX(addr); - idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; spin_lock(&zpci_iomap_lock); /* Detect underrun */ - BUG_ON(!zpci_iomap_start[idx].count); + WARN_ON(!zpci_iomap_start[idx].count); if (!--zpci_iomap_start[idx].count) { zpci_iomap_start[idx].fh = 0; zpci_iomap_start[idx].bar = 0; @@ -544,15 +539,15 @@ static void zpci_irq_exit(void) static int zpci_alloc_iomap(struct zpci_dev *zdev) { - int entry; + unsigned long entry; spin_lock(&zpci_iomap_lock); - entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); - if (entry == ZPCI_IOMAP_MAX_ENTRIES) { + entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES); + if (entry == ZPCI_IOMAP_ENTRIES) { spin_unlock(&zpci_iomap_lock); return -ENOSPC; } - set_bit(entry, zpci_iomap); + set_bit(entry, zpci_iomap_bitmap); spin_unlock(&zpci_iomap_lock); return entry; } @@ -561,7 +556,7 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) { spin_lock(&zpci_iomap_lock); memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry)); - clear_bit(entry, zpci_iomap); + clear_bit(entry, zpci_iomap_bitmap); spin_unlock(&zpci_iomap_lock); } @@ -611,8 +606,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev, if (zdev->bars[i].val & 4) flags |= IORESOURCE_MEM_64; - addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48); - + addr = ZPCI_ADDR(entry); size = 1UL << zdev->bars[i].size; res = __alloc_res(zdev, addr, size, flags); @@ -873,23 +867,30 @@ static int zpci_mem_init(void) zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), 16, 0, NULL); if (!zdev_fmb_cache) - goto error_zdev; + goto error_fmb; - /* TODO: use realloc */ - zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), - GFP_KERNEL); + zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES, + sizeof(*zpci_iomap_start), GFP_KERNEL); if (!zpci_iomap_start) goto error_iomap; - return 0; + zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES), + sizeof(*zpci_iomap_bitmap), GFP_KERNEL); + if (!zpci_iomap_bitmap) + goto error_iomap_bitmap; + + return 0; +error_iomap_bitmap: + kfree(zpci_iomap_start); error_iomap: kmem_cache_destroy(zdev_fmb_cache); -error_zdev: +error_fmb: return -ENOMEM; } static void zpci_mem_exit(void) { + kfree(zpci_iomap_bitmap); kfree(zpci_iomap_start); kmem_cache_destroy(zdev_fmb_cache); } diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 369a3e05d468..b0e04751c5d5 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -53,6 +53,11 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf) pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n", pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid); + + if (!pdev) + return; + + pdev->error_state = pci_channel_io_perm_failure; } void zpci_event_error(void *data) diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h index f887c6465a82..8a84e05adb2e 100644 --- a/arch/sh/include/asm/barrier.h +++ b/arch/sh/include/asm/barrier.h @@ -33,7 +33,6 @@ #endif #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) -#define smp_store_mb(var, value) __smp_store_mb(var, value) #include <asm-generic/barrier.h> diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 330e738ccfc1..9af2e6338400 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -509,11 +509,10 @@ config X86_INTEL_CE config X86_INTEL_MID bool "Intel MID platform support" - depends on X86_32 depends on X86_EXTENDED_PLATFORM depends on X86_PLATFORM_DEVICES depends on PCI - depends on PCI_GOANY + depends on X86_64 || (PCI_GOANY && X86_32) depends on X86_IO_APIC select SFI select I2C diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index 881b4768644a..e7de5c9a4fbd 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h @@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu); #define __ARCH_HAS_DO_SOFTIRQ +struct irq_desc; + #ifdef CONFIG_HOTPLUG_CPU #include <linux/cpumask.h> extern int check_irq_vectors_for_cpu_disable(void); extern void fixup_irqs(void); -extern void irq_force_complete_move(int); +extern void irq_force_complete_move(struct irq_desc *desc); #endif #ifdef CONFIG_HAVE_KVM @@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void)); extern void (*x86_platform_ipi_callback)(void); extern void native_init_IRQ(void); -struct irq_desc; extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); extern __visible unsigned int do_IRQ(struct pt_regs *regs); diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 04c27a013165..4432ab7f407c 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) } static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) { + pgprotval_t val = pgprot_val(pgprot); pgprot_t new; - unsigned long val; - val = pgprot_val(pgprot); pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); return new; } static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) { + pgprotval_t val = pgprot_val(pgprot); pgprot_t new; - unsigned long val; - val = pgprot_val(pgprot); pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | ((val & _PAGE_PAT_LARGE) >> (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index f25321894ad2..fdb0fbfb1197 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void) { int pin, ioapic, irq, irq_entry; const struct cpumask *mask; + struct irq_desc *desc; struct irq_data *idata; struct irq_chip *chip; @@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void) if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) continue; - idata = irq_get_irq_data(irq); + desc = irq_to_desc(irq); + raw_spin_lock_irq(&desc->lock); + idata = irq_desc_get_irq_data(desc); /* * Honour affinities which have been set in early boot @@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void) /* Might be lapic_chip for irq 0 */ if (chip->irq_set_affinity) chip->irq_set_affinity(idata, mask, false); + raw_spin_unlock_irq(&desc->lock); } } #endif diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 908cb37da171..3b670df4ba7b 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -31,7 +31,7 @@ struct apic_chip_data { struct irq_domain *x86_vector_domain; EXPORT_SYMBOL_GPL(x86_vector_domain); static DEFINE_RAW_SPINLOCK(vector_lock); -static cpumask_var_t vector_cpumask; +static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask; static struct irq_chip lapic_controller; #ifdef CONFIG_X86_IO_APIC static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; @@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, */ static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; static int current_offset = VECTOR_OFFSET_START % 16; - int cpu, err; + int cpu, vector; - if (d->move_in_progress) + /* + * If there is still a move in progress or the previous move has not + * been cleaned up completely, tell the caller to come back later. + */ + if (d->move_in_progress || + cpumask_intersects(d->old_domain, cpu_online_mask)) return -EBUSY; /* Only try and allocate irqs on cpus that are present */ - err = -ENOSPC; cpumask_clear(d->old_domain); + cpumask_clear(searched_cpumask); cpu = cpumask_first_and(mask, cpu_online_mask); while (cpu < nr_cpu_ids) { - int new_cpu, vector, offset; + int new_cpu, offset; + /* Get the possible target cpus for @mask/@cpu from the apic */ apic->vector_allocation_domain(cpu, vector_cpumask, mask); + /* + * Clear the offline cpus from @vector_cpumask for searching + * and verify whether the result overlaps with @mask. If true, + * then the call to apic->cpu_mask_to_apicid_and() will + * succeed as well. If not, no point in trying to find a + * vector in this mask. + */ + cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask); + if (!cpumask_intersects(vector_searchmask, mask)) + goto next_cpu; + if (cpumask_subset(vector_cpumask, d->domain)) { - err = 0; if (cpumask_equal(vector_cpumask, d->domain)) - break; + goto success; /* - * New cpumask using the vector is a proper subset of - * the current in use mask. So cleanup the vector - * allocation for the members that are not used anymore. + * Mark the cpus which are not longer in the mask for + * cleanup. */ - cpumask_andnot(d->old_domain, d->domain, - vector_cpumask); - d->move_in_progress = - cpumask_intersects(d->old_domain, cpu_online_mask); - cpumask_and(d->domain, d->domain, vector_cpumask); - break; + cpumask_andnot(d->old_domain, d->domain, vector_cpumask); + vector = d->cfg.vector; + goto update; } vector = current_vector; @@ -158,45 +170,60 @@ next: vector = FIRST_EXTERNAL_VECTOR + offset; } - if (unlikely(current_vector == vector)) { - cpumask_or(d->old_domain, d->old_domain, - vector_cpumask); - cpumask_andnot(vector_cpumask, mask, d->old_domain); - cpu = cpumask_first_and(vector_cpumask, - cpu_online_mask); - continue; - } + /* If the search wrapped around, try the next cpu */ + if (unlikely(current_vector == vector)) + goto next_cpu; if (test_bit(vector, used_vectors)) goto next; - for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { + for_each_cpu(new_cpu, vector_searchmask) { if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) goto next; } /* Found one! */ current_vector = vector; current_offset = offset; - if (d->cfg.vector) { + /* Schedule the old vector for cleanup on all cpus */ + if (d->cfg.vector) cpumask_copy(d->old_domain, d->domain); - d->move_in_progress = - cpumask_intersects(d->old_domain, cpu_online_mask); - } - for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) + for_each_cpu(new_cpu, vector_searchmask) per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); - d->cfg.vector = vector; - cpumask_copy(d->domain, vector_cpumask); - err = 0; - break; - } + goto update; - if (!err) { - /* cache destination APIC IDs into cfg->dest_apicid */ - err = apic->cpu_mask_to_apicid_and(mask, d->domain, - &d->cfg.dest_apicid); +next_cpu: + /* + * We exclude the current @vector_cpumask from the requested + * @mask and try again with the next online cpu in the + * result. We cannot modify @mask, so we use @vector_cpumask + * as a temporary buffer here as it will be reassigned when + * calling apic->vector_allocation_domain() above. + */ + cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask); + cpumask_andnot(vector_cpumask, mask, searched_cpumask); + cpu = cpumask_first_and(vector_cpumask, cpu_online_mask); + continue; } + return -ENOSPC; - return err; +update: + /* + * Exclude offline cpus from the cleanup mask and set the + * move_in_progress flag when the result is not empty. + */ + cpumask_and(d->old_domain, d->old_domain, cpu_online_mask); + d->move_in_progress = !cpumask_empty(d->old_domain); + d->cfg.vector = vector; + cpumask_copy(d->domain, vector_cpumask); +success: + /* + * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail + * as we already established, that mask & d->domain & cpu_online_mask + * is not empty. + */ + BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain, + &d->cfg.dest_apicid)); + return 0; } static int assign_irq_vector(int irq, struct apic_chip_data *data, @@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node, static void clear_irq_vector(int irq, struct apic_chip_data *data) { struct irq_desc *desc; - unsigned long flags; int cpu, vector; - raw_spin_lock_irqsave(&vector_lock, flags); BUG_ON(!data->cfg.vector); vector = data->cfg.vector; @@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) data->cfg.vector = 0; cpumask_clear(data->domain); - if (likely(!data->move_in_progress)) { - raw_spin_unlock_irqrestore(&vector_lock, flags); + /* + * If move is in progress or the old_domain mask is not empty, + * i.e. the cleanup IPI has not been processed yet, we need to remove + * the old references to desc from all cpus vector tables. + */ + if (!data->move_in_progress && cpumask_empty(data->old_domain)) return; - } desc = irq_to_desc(irq); for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { @@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data) } } data->move_in_progress = 0; - raw_spin_unlock_irqrestore(&vector_lock, flags); } void init_irq_alloc_info(struct irq_alloc_info *info, @@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) static void x86_vector_free_irqs(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { + struct apic_chip_data *apic_data; struct irq_data *irq_data; + unsigned long flags; int i; for (i = 0; i < nr_irqs; i++) { irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); clear_irq_vector(virq + i, irq_data->chip_data); - free_apic_chip_data(irq_data->chip_data); + apic_data = irq_data->chip_data; + irq_domain_reset_irq_data(irq_data); + raw_spin_unlock_irqrestore(&vector_lock, flags); + free_apic_chip_data(apic_data); #ifdef CONFIG_X86_IO_APIC if (virq + i < nr_legacy_irqs()) legacy_irq_data[virq + i] = NULL; #endif - irq_domain_reset_irq_data(irq_data); } } } @@ -406,6 +438,8 @@ int __init arch_early_irq_init(void) arch_init_htirq_domain(x86_vector_domain); BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); + BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); + BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL)); return arch_early_ioapic_init(); } @@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data, return -EINVAL; err = assign_irq_vector(irq, data, dest); - if (err) { - if (assign_irq_vector(irq, data, - irq_data_get_affinity_mask(irq_data))) - pr_err("Failed to recover vector for irq %d\n", irq); - return err; - } - - return IRQ_SET_MASK_OK; + return err ? err : IRQ_SET_MASK_OK; } static struct irq_chip lapic_controller = { @@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = { #ifdef CONFIG_SMP static void __send_cleanup_vector(struct apic_chip_data *data) { - cpumask_var_t cleanup_mask; - - if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { - unsigned int i; - - for_each_cpu_and(i, data->old_domain, cpu_online_mask) - apic->send_IPI_mask(cpumask_of(i), - IRQ_MOVE_CLEANUP_VECTOR); - } else { - cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask); - apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); - free_cpumask_var(cleanup_mask); - } + raw_spin_lock(&vector_lock); + cpumask_and(data->old_domain, data->old_domain, cpu_online_mask); data->move_in_progress = 0; + if (!cpumask_empty(data->old_domain)) + apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR); + raw_spin_unlock(&vector_lock); } void send_cleanup_vector(struct irq_cfg *cfg) @@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) goto unlock; /* - * Check if the irq migration is in progress. If so, we - * haven't received the cleanup request yet for this irq. + * Nothing to cleanup if irq migration is in progress + * or this cpu is not set in the cleanup mask. */ - if (data->move_in_progress) + if (data->move_in_progress || + !cpumask_test_cpu(me, data->old_domain)) goto unlock; + /* + * We have two cases to handle here: + * 1) vector is unchanged but the target mask got reduced + * 2) vector and the target mask has changed + * + * #1 is obvious, but in #2 we have two vectors with the same + * irq descriptor: the old and the new vector. So we need to + * make sure that we only cleanup the old vector. The new + * vector has the current @vector number in the config and + * this cpu is part of the target mask. We better leave that + * one alone. + */ if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain)) goto unlock; @@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) goto unlock; } __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); + cpumask_clear_cpu(me, data->old_domain); unlock: raw_spin_unlock(&desc->lock); } @@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg) __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); } -void irq_force_complete_move(int irq) +/* + * Called with @desc->lock held and interrupts disabled. + */ +void irq_force_complete_move(struct irq_desc *desc) { - struct irq_cfg *cfg = irq_cfg(irq); + struct irq_data *irqdata = irq_desc_get_irq_data(desc); + struct apic_chip_data *data = apic_chip_data(irqdata); + struct irq_cfg *cfg = data ? &data->cfg : NULL; - if (cfg) - __irq_complete_move(cfg, cfg->vector); + if (!cfg) + return; + + __irq_complete_move(cfg, cfg->vector); + + /* + * This is tricky. If the cleanup of @data->old_domain has not been + * done yet, then the following setaffinity call will fail with + * -EBUSY. This can leave the interrupt in a stale state. + * + * The cleanup cannot make progress because we hold @desc->lock. So in + * case @data->old_domain is not yet cleaned up, we need to drop the + * lock and acquire it again. @desc cannot go away, because the + * hotplug code holds the sparse irq lock. + */ + raw_spin_lock(&vector_lock); + /* Clean out all offline cpus (including ourself) first. */ + cpumask_and(data->old_domain, data->old_domain, cpu_online_mask); + while (!cpumask_empty(data->old_domain)) { + raw_spin_unlock(&vector_lock); + raw_spin_unlock(&desc->lock); + cpu_relax(); + raw_spin_lock(&desc->lock); + /* + * Reevaluate apic_chip_data. It might have been cleared after + * we dropped @desc->lock. + */ + data = apic_chip_data(irqdata); + if (!data) + return; + raw_spin_lock(&vector_lock); + } + raw_spin_unlock(&vector_lock); } #endif diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index d760c6bb37b5..624db00583f4 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -889,7 +889,10 @@ void __init uv_system_init(void) return; } pr_info("UV: Found %s hub\n", hub); - map_low_mmrs(); + + /* We now only need to map the MMRs on UV1 */ + if (is_uv1_hub()) + map_low_mmrs(); m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); m_val = m_n_config.s.m_skt; diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index a667078a5180..fed2ab1f1065 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c @@ -1960,7 +1960,8 @@ intel_bts_constraints(struct perf_event *event) static int intel_alt_er(int idx, u64 config) { - int alt_idx; + int alt_idx = idx; + if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) return idx; @@ -2897,14 +2898,12 @@ static void intel_pmu_cpu_starting(int cpu) return; if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) { - void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED]; - for_each_cpu(i, topology_sibling_cpumask(cpu)) { struct intel_shared_regs *pc; pc = per_cpu(cpu_hw_events, i).shared_regs; if (pc && pc->core_id == core_id) { - *onln = cpuc->shared_regs; + cpuc->kfree_on_online[0] = cpuc->shared_regs; cpuc->shared_regs = pc; break; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index f97f8075bf04..3bf41d413775 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -995,6 +995,9 @@ static int __init uncore_pci_init(void) case 87: /* Knights Landing */ ret = knl_uncore_pci_init(); break; + case 94: /* SkyLake */ + ret = skl_uncore_pci_init(); + break; default: return 0; } diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 07aa2d6bd710..a7086b862156 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -336,6 +336,7 @@ int snb_uncore_pci_init(void); int ivb_uncore_pci_init(void); int hsw_uncore_pci_init(void); int bdw_uncore_pci_init(void); +int skl_uncore_pci_init(void); void snb_uncore_cpu_init(void); void nhm_uncore_cpu_init(void); int snb_pci2phy_map_init(int devid); diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c index 0b934820fafd..2bd030ddd0db 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c @@ -8,6 +8,7 @@ #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 +#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f /* SNB event control */ #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff @@ -524,6 +525,14 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = { { /* end: all zeroes */ }, }; +static const struct pci_device_id skl_uncore_pci_ids[] = { + { /* IMC */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, + { /* end: all zeroes */ }, +}; + static struct pci_driver snb_uncore_pci_driver = { .name = "snb_uncore", .id_table = snb_uncore_pci_ids, @@ -544,6 +553,11 @@ static struct pci_driver bdw_uncore_pci_driver = { .id_table = bdw_uncore_pci_ids, }; +static struct pci_driver skl_uncore_pci_driver = { + .name = "skl_uncore", + .id_table = skl_uncore_pci_ids, +}; + struct imc_uncore_pci_dev { __u32 pci_id; struct pci_driver *driver; @@ -558,6 +572,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ + IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ { /* end marker */ } }; @@ -610,6 +625,11 @@ int bdw_uncore_pci_init(void) return imc_uncore_pci_init(); } +int skl_uncore_pci_init(void) +{ + return imc_uncore_pci_init(); +} + /* end of Sandy Bridge uncore support */ /* Nehalem uncore support */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index f129a9af6357..2c0f3407bd1f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data) reserve_ebda_region(); + switch (boot_params.hdr.hardware_subarch) { + case X86_SUBARCH_INTEL_MID: + x86_intel_mid_early_setup(); + break; + default: + break; + } + start_kernel(); } diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index f8062aaf5df9..61521dc19c10 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -462,7 +462,7 @@ void fixup_irqs(void) * non intr-remapping case, we can't wait till this interrupt * arrives at this cpu before completing the irq move. */ - irq_force_complete_move(irq); + irq_force_complete_move(desc); if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; @@ -470,6 +470,15 @@ void fixup_irqs(void) } chip = irq_data_get_irq_chip(data); + /* + * The interrupt descriptor might have been cleaned up + * already, but it is not yet removed from the radix tree + */ + if (!chip) { + raw_spin_unlock(&desc->lock); + continue; + } + if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index fc6a4c8f6e2a..2440814b0069 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -33,7 +33,7 @@ struct cpa_data { pgd_t *pgd; pgprot_t mask_set; pgprot_t mask_clr; - int numpages; + unsigned long numpages; int flags; unsigned long pfn; unsigned force_split : 1; @@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) * CPA operation. Either a large page has been * preserved or a single page update happened. */ - BUG_ON(cpa->numpages > numpages); + BUG_ON(cpa->numpages > numpages || !cpa->numpages); numpages -= cpa->numpages; if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) cpa->curpage++; diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 1c7380da65ff..2d66db8f80f9 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -8,6 +8,7 @@ #include <linux/memblock.h> #include <linux/bootmem.h> #include <linux/acpi.h> +#include <linux/dmi.h> #include <asm/efi.h> #include <asm/uv/uv.h> @@ -248,6 +249,16 @@ out: return ret; } +static const struct dmi_system_id sgi_uv1_dmi[] = { + { NULL, "SGI UV1", + { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), + DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"), + } + }, + { } /* NULL entry stops DMI scanning */ +}; + void __init efi_apply_memmap_quirks(void) { /* @@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void) efi_unmap_memmap(); } - /* - * UV doesn't support the new EFI pagetable mapping yet. - */ - if (is_uv_system()) + /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */ + if (dmi_check_system(sgi_uv1_dmi)) set_bit(EFI_OLD_MEMMAP, &efi.flags); } diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbc21e2e4ae..90bb997ed0a2 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void) intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); else { intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); - pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); + pr_info("ARCH: Unknown SoC, assuming Penwell!\n"); } out: @@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg) else if (strcmp("lapic_and_apbt", arg) == 0) intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT; else { - pr_warn("X86 INTEL_MID timer option %s not recognised" - " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n", - arg); + pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n", + arg); return -EINVAL; } return 0; } __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer); - diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index c1bdafaac3ca..c61b6c332e97 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused) if (imr_is_enabled(&imr)) { base = imr_to_phys(imr.addr_lo); end = imr_to_phys(imr.addr_hi) + IMR_MASK; + size = end - base + 1; } else { base = 0; end = 0; + size = 0; } - size = end - base; seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, &base, &end, size, imr.rmask, imr.wmask, @@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev) { phys_addr_t base = virt_to_phys(&_text); size_t size = virt_to_phys(&__end_rodata) - base; + unsigned long start, end; int i; int ret; @@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev) for (i = 0; i < idev->max_imr; i++) imr_clear(i); + start = (unsigned long)_text; + end = (unsigned long)__end_rodata - 1; + /* * Setup a locked IMR around the physical extent of the kernel * from the beginning of the .text secton to the end of the * .rodata section as one physically contiguous block. + * + * We don't round up @size since it is already PAGE_SIZE aligned. + * See vmlinux.lds.S for details. */ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); if (ret < 0) { - pr_err("unable to setup IMR for kernel: (%p - %p)\n", - &_text, &__end_rodata); + pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", + size / 1024, start, end); } else { - pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", - size / 1024, &_text, &__end_rodata); + pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n", + size / 1024, start, end); } } |