diff options
Diffstat (limited to 'arch')
71 files changed, 469 insertions, 257 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 56a4df952fb0..22e58a99f38b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK config VGA_HOSE bool - depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI + depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI) default y help Support VGA on an arbitrary hose; needed for several platforms diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h index 1f7fba671ae6..d70408d36677 100644 --- a/arch/alpha/include/asm/rtc.h +++ b/arch/alpha/include/asm/rtc.h @@ -1,14 +1,10 @@ #ifndef _ALPHA_RTC_H #define _ALPHA_RTC_H -#if defined(CONFIG_ALPHA_GENERIC) +#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ + || defined(CONFIG_ALPHA_GENERIC) # define get_rtc_time alpha_mv.rtc_get_time # define set_rtc_time alpha_mv.rtc_set_time -#else -# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) -# define get_rtc_time marvel_get_rtc_time -# define set_rtc_time marvel_set_rtc_time -# endif #endif #include <asm-generic/rtc.h> diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index 5e7c28f92f19..61893d7bdda5 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -11,6 +11,7 @@ #include <asm/core_tsunami.h> #undef __EXTERN_INLINE +#include <linux/module.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/sched.h> diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 14a4b6a7cf59..407accc80877 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -317,7 +317,7 @@ marvel_init_irq(void) } static int -marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 80abafb9bf33..9650c143afc1 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -#ifdef __ARMEB__ -#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB -#else -#define AUDIT_ARCH_NR AUDIT_ARCH_ARM -#endif - asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; - /* - * Save IP. IP is used to denote syscall entry/exit: - * IP = 0 -> entry, = 1 -> exit - */ - ip = regs->ARM_ip; - regs->ARM_ip = why; - - if (!ip) + if (why) audit_syscall_exit(regs); else - audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); if (!test_thread_flag(TIF_SYSCALL_TRACE)) @@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) current_thread_info()->syscall = scno; + /* + * IP is used to denote syscall entry/exit: + * IP = 0 -> entry, =1 -> exit + */ + ip = regs->ARM_ip; + regs->ARM_ip = why; + /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index f6a4d32b0421..8f4644659777 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) enter_lazy_tlb(mm, current); local_flush_tlb_all(); + printk("CPU%u: Booted secondary processor\n", cpu); + cpu_init(); preempt_disable(); trace_hardirqs_off(); diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index d2b177905cdb..76cbb055dd05 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -115,7 +115,7 @@ int kernel_execve(const char *filename, "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); + : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); out: return ret; diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e81c35f936b5..b8df521fb68e 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -232,6 +232,9 @@ config MACH_ARMLEX4210 config MACH_UNIVERSAL_C210 bool "Mobile UNIVERSAL_C210 Board" select CPU_EXYNOS4210 + select S5P_HRT + select CLKSRC_MMIO + select HAVE_SCHED_CLOCK select S5P_GPIO_INT select S5P_DEV_FIMC0 select S5P_DEV_FIMC1 diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c index 5cd7a8b8868c..7ac6ff4c46bd 100644 --- a/arch/arm/mach-exynos/clock-exynos5.c +++ b/arch/arm/mach-exynos/clock-exynos5.c @@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = { .name = "dma", .devname = "dma-pl330.1", .enable = exynos5_clk_ip_fsys_ctrl, - .ctrlbit = (1 << 1), + .ctrlbit = (1 << 2), }; static struct clk exynos5_clk_mdma1 = { diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index cb2b027f09a6..a34036eb8ba2 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -40,6 +40,7 @@ #include <plat/pd.h> #include <plat/regs-fb-v4.h> #include <plat/fimc-core.h> +#include <plat/s5p-time.h> #include <plat/camport.h> #include <plat/mipi_csis.h> @@ -1063,6 +1064,7 @@ static void __init universal_map_io(void) exynos_init_io(NULL, 0); s3c24xx_init_clocks(24000000); s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); + s5p_set_timer_source(S5P_PWM2, S5P_PWM4); } static void s5p_tv_setup(void) @@ -1113,7 +1115,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") .map_io = universal_map_io, .handle_irq = gic_handle_irq, .init_machine = universal_machine_init, - .timer = &exynos4_timer, + .timer = &s5p_timer, .reserve = &universal_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 1c672d9e6656..f7fe1b9f3170 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -14,6 +14,7 @@ #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/kexec.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <mach/bridge-regs.h> diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index fcce7ff37630..cfd98b186fcc 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) struct irq_chip *irq_chip = NULL; int gpio, irq_num, fiq_count; - irq_desc = irq_to_desc(IH_GPIO_BASE); + irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); if (irq_desc) irq_chip = irq_desc->irq_data.chip; diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 930c0d380435..740cee9369ba 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = { static void __init igep_init(void) { - regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h index 1e2d3322f33e..c88420de1151 100644 --- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h +++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h @@ -941,10 +941,10 @@ #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) #define OMAP4_DSI1_LANEENABLE_SHIFT 24 #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) -#define OMAP4_DSI2_PIPD_SHIFT 19 -#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) -#define OMAP4_DSI1_PIPD_SHIFT 14 -#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) +#define OMAP4_DSI1_PIPD_SHIFT 19 +#define OMAP4_DSI1_PIPD_MASK (0x1f << 19) +#define OMAP4_DSI2_PIPD_SHIFT 14 +#define OMAP4_DSI2_PIPD_MASK (0x1f << 14) /* CONTROL_MCBSPLP */ #define OMAP4_ALBCTRLRX_FSX_SHIFT 31 diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h index eac68978a2c2..db70e79a1198 100644 --- a/arch/arm/mach-orion5x/mpp.h +++ b/arch/arm/mach-orion5x/mpp.h @@ -65,8 +65,8 @@ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1) +#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index cb224a344af0..0891ec6e27f5 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = { }; /* SDHI0 */ -static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED, .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, + .cd_gpio = GPIO_PORT251, }; static struct resource sdhi0_resources[] = { @@ -557,7 +547,6 @@ static void __init ag5evm_init(void) lcd_backlight_reset(); /* enable SDHI0 on CN15 [SD I/F] */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -566,13 +555,6 @@ static void __init ag5evm_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "sdhi0 cd", &sdhi0_device.dev)) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_warn("Unable to setup SDHI0 GPIO IRQ\n"); - /* enable SDHI1 on CN4 [WLAN I/F] */ gpio_request(GPIO_FN_SDHICLK1, NULL); gpio_request(GPIO_FN_SDHICMD1_PU, NULL); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index f49e28abe0ab..8c6202bb6aeb 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev) } /* SDHI0 */ -static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_flags = TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .cd_gpio = GPIO_PORT172, }; static struct resource sdhi0_resources[] = { @@ -1384,7 +1375,6 @@ static void __init mackerel_init(void) { u32 srcr4; struct clk *clk; - int ret; /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1481,7 +1471,6 @@ static void __init mackerel_init(void) irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); /* enable SDHI0 */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -1490,13 +1479,6 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); - if (!ret) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); - #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 6ac015c89206..b202c1272526 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -16,6 +16,59 @@ __CPUINIT +/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks! + * + * The secondary kernel init calls v7_flush_dcache_all before it enables + * the L1; however, the L1 comes out of reset in an undefined state, so + * the clean + invalidate performed by v7_flush_dcache_all causes a bunch + * of cache lines with uninitialized data and uninitialized tags to get + * written out to memory, which does really unpleasant things to the main + * processor. We fix this by performing an invalidate, rather than a + * clean + invalidate, before jumping into the kernel. + * + * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs + * to be called for both secondary cores startup and primary core resume + * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S. + */ +ENTRY(v7_invalidate_l1) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + ldr r1, =0x7fff + and r2, r1, r0, lsr #13 + + ldr r1, =0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, r5, c7, c6, 2 + bgt 2b + cmp r2, #0 + bgt 1b + dsb + isb + mov pc, lr +ENDPROC(v7_invalidate_l1) + +ENTRY(shmobile_invalidate_start) + bl v7_invalidate_l1 + b secondary_startup +ENDPROC(shmobile_invalidate_start) + /* * Reset vector for secondary CPUs. * This will be mapped at address 0 by SBAR register. @@ -24,4 +77,5 @@ .align 12 ENTRY(shmobile_secondary_vector) ldr pc, 1f -1: .long secondary_startup - PAGE_OFFSET + PLAT_PHYS_OFFSET +1: .long shmobile_invalidate_start - PAGE_OFFSET + PLAT_PHYS_OFFSET +ENDPROC(shmobile_secondary_vector) diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h index 83ad3fe0a75f..c85e6ecda606 100644 --- a/arch/arm/mach-shmobile/include/mach/common.h +++ b/arch/arm/mach-shmobile/include/mach/common.h @@ -4,7 +4,6 @@ extern void shmobile_earlytimer_init(void); extern struct sys_timer shmobile_timer; struct twd_local_timer; -void shmobile_twd_init(struct twd_local_timer *twd_local_timer); extern void shmobile_setup_console(void); extern void shmobile_secondary_vector(void); extern int shmobile_platform_cpu_kill(unsigned int cpu); @@ -82,5 +81,6 @@ extern int r8a7779_platform_cpu_kill(unsigned int cpu); extern void r8a7779_secondary_init(unsigned int cpu); extern int r8a7779_boot_secondary(unsigned int cpu); extern void r8a7779_smp_prepare_cpus(void); +extern void r8a7779_register_twd(void); #endif /* __ARCH_MACH_COMMON_H */ diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c index 12c6f529ab89..e98e46f6cf55 100644 --- a/arch/arm/mach-shmobile/setup-r8a7779.c +++ b/arch/arm/mach-shmobile/setup-r8a7779.c @@ -262,10 +262,14 @@ void __init r8a7779_add_standard_devices(void) ARRAY_SIZE(r8a7779_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak r8a7779_register_twd(void) { } + static void __init r8a7779_earlytimer_init(void) { r8a7779_clock_init(); shmobile_earlytimer_init(); + r8a7779_register_twd(); } void __init r8a7779_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 5bebffc10455..04a0dfe75493 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -688,10 +688,14 @@ void __init sh73a0_add_standard_devices(void) ARRAY_SIZE(sh73a0_late_devices)); } +/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ +void __init __weak sh73a0_register_twd(void) { } + static void __init sh73a0_earlytimer_init(void) { sh73a0_clock_init(); shmobile_earlytimer_init(); + sh73a0_register_twd(); } void __init sh73a0_add_early_devices(void) diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c index b62e19d4c9af..6d1d0238cbf7 100644 --- a/arch/arm/mach-shmobile/smp-r8a7779.c +++ b/arch/arm/mach-shmobile/smp-r8a7779.c @@ -64,8 +64,15 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init r8a7779_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif + static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { void __iomem *scu_base = scu_base_addr(); @@ -84,7 +91,6 @@ unsigned int __init r8a7779_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c index 14ad8b052f1a..e36c41c4ab40 100644 --- a/arch/arm/mach-shmobile/smp-sh73a0.c +++ b/arch/arm/mach-shmobile/smp-sh73a0.c @@ -42,7 +42,13 @@ static void __iomem *scu_base_addr(void) static DEFINE_SPINLOCK(scu_lock); static unsigned long tmp; +#ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, 0xf0000600, 29); +void __init sh73a0_register_twd(void) +{ + twd_local_timer_register(&twd_local_timer); +} +#endif static void modify_scu_cpu_psr(unsigned long set, unsigned long clr) { @@ -62,7 +68,6 @@ unsigned int __init sh73a0_get_core_count(void) { void __iomem *scu_base = scu_base_addr(); - shmobile_twd_init(&twd_local_timer); return scu_get_core_count(scu_base); } diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index 2fba5f3d1c8a..8b79e7917a23 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c @@ -46,15 +46,6 @@ static void __init shmobile_timer_init(void) { } -void __init shmobile_twd_init(struct twd_local_timer *twd_local_timer) -{ -#ifdef CONFIG_HAVE_ARM_TWD - int err = twd_local_timer_register(twd_local_timer); - if (err) - pr_err("twd_local_timer_register failed %d\n", err); -#endif -} - struct sys_timer shmobile_timer = { .init = shmobile_timer_init, }; diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f5104b7c52cd..463fb3bbe11e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1174,7 +1174,7 @@ out: bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { - return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c index 235947844f27..09df4b89e8be 100644 --- a/arch/m68k/platform/520x/config.c +++ b/arch/m68k/platform/520x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m520x_qspi_init(void) { @@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void) writew(par, MCF_GPIO_PAR_UART); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m520x_uarts_init(); m520x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m520x_qspi_init(); #endif } diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/523x/config.c index c8b405d5a961..d47dfd8f50a2 100644 --- a/arch/m68k/platform/523x/config.c +++ b/arch/m68k/platform/523x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m523x_qspi_init(void) { @@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void) writew(par, MCFGPIO_PAR_TIMER); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m523x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m523x_qspi_init(); #endif } diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/5249/config.c index bbf05135bb98..300e729a58d0 100644 --- a/arch/m68k/platform/5249/config.c +++ b/arch/m68k/platform/5249/config.c @@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = { /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m5249_qspi_init(void) { @@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void) mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) #ifdef CONFIG_M5249C3 m5249_smc91x_init(); #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m5249_qspi_init(); #endif } diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c index f91a53294c35..b3cb378c5e94 100644 --- a/arch/m68k/platform/527x/config.c +++ b/arch/m68k/platform/527x/config.c @@ -23,7 +23,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m527x_qspi_init(void) { @@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void) #endif } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m527x_qspi_init(); #endif } diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/528x/config.c index d4492926614c..c5f11ba49be5 100644 --- a/arch/m68k/platform/528x/config.c +++ b/arch/m68k/platform/528x/config.c @@ -24,7 +24,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m528x_qspi_init(void) { @@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void) __raw_writeb(0x07, MCFGPIO_PQSPAR); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m528x_uarts_init(); m528x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m528x_qspi_init(); #endif } diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/532x/config.c index 2bec3477b739..37082d02f2bd 100644 --- a/arch/m68k/platform/532x/config.c +++ b/arch/m68k/platform/532x/config.c @@ -30,7 +30,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m532x_qspi_init(void) { @@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void) writew(0x01f0, MCF_GPIO_PAR_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m532x_uarts_init(); m532x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m532x_qspi_init(); #endif diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c index 7af97362b95c..3aa77ddea89d 100644 --- a/arch/m68k/platform/coldfire/device.c +++ b/arch/m68k/platform/coldfire/device.c @@ -121,7 +121,7 @@ static struct platform_device mcf_fec1 = { #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* * The ColdFire QSPI module is an SPI protocol hardware block used * on a number of different ColdFire CPUs. @@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = { .resource = mcf_qspi_resources, .dev.platform_data = &mcf_qspi_data, }; -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, @@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_fec1, #endif #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif }; diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index 910dddf65e44..9cd69ad6aa02 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -24,6 +24,7 @@ #include <linux/sched.h> #include <linux/profile.h> #include <linux/smp.h> +#include <linux/cpu.h> #include <asm/tlbflush.h> #include <asm/bitops.h> #include <asm/processor.h> @@ -38,7 +39,6 @@ #include "internal.h" #ifdef CONFIG_HOTPLUG_CPU -#include <linux/cpu.h> #include <asm/cacheflush.h> static unsigned long sleep_mode[NR_CPUS]; @@ -874,10 +874,13 @@ static void __init smp_online(void) cpu = smp_processor_id(); - local_irq_enable(); + notify_cpu_starting(cpu); + ipi_call_lock(); set_cpu_online(cpu, true); - smp_wmb(); + ipi_call_unlock(); + + local_irq_enable(); } /** diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 4e9626836bab..d1d864b81bae 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -2,7 +2,6 @@ #define _PARISC_HARDWARE_H #include <linux/mod_devicetable.h> -#include <asm/pdc.h> #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID #define HVERSION_ANY_ID PA_HVERSION_ANY_ID @@ -95,12 +94,14 @@ struct bc_module { #define HPHW_MC 15 #define HPHW_FAULTY 31 +struct parisc_device_id; /* hardware.c: */ extern const char *parisc_hardware_description(struct parisc_device_id *id); extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); struct pci_dev; +struct hardware_path; /* drivers.c: */ extern struct parisc_device *alloc_pa_dev(unsigned long hpa, diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index a84cc1f925f6..4e0e7dbf0f3f 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -160,5 +160,11 @@ extern int npmem_ranges; #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> +#include <asm/pdc.h> + +#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) + +/* DEFINITION OF THE ZERO-PAGE (PAG0) */ +/* based on work by Jason Eckhardt (jason@equator.com) */ #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 4ca510b3c6f8..7f0f2d23059d 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -343,8 +343,6 @@ #ifdef __KERNEL__ -#include <asm/page.h> /* for __PAGE_OFFSET */ - extern int pdc_type; /* Values for pdc_type */ @@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) { #endif /* __KERNEL__ */ -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) - -/* DEFINITION OF THE ZERO-PAGE (PAG0) */ -/* based on work by Jason Eckhardt (jason@equator.com) */ - /* flags of the device_path */ #define PF_AUTOBOOT 0x80 #define PF_AUTOSEARCH 0x40 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 22dadeb58695..ee99f2339356 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -44,6 +44,8 @@ struct vm_area_struct; #endif /* !__ASSEMBLY__ */ +#include <asm/page.h> + #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 804aa28ab1d6..3516e0b27044 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,6 +1,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H +#include <asm/barrier.h> +#include <asm/ldcw.h> #include <asm/processor.h> #include <asm/spinlock_types.h> diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 4f004596a6e7..47341aa208f2 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -50,6 +50,7 @@ #include <linux/init.h> #include <linux/major.h> #include <linux/tty.h> +#include <asm/page.h> /* for PAGE0 */ #include <asm/pdc.h> /* for iodc_call() proto and friends */ static DEFINE_SPINLOCK(pdc_console_lock); @@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) { - if (!tty->count) { + if (tty->count == 1) { del_timer_sync(&pdc_console_timer); tty_port_tty_set(&tty_port, NULL); } diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 0bb1d63907f8..4dc7b7942b4c 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -31,6 +31,7 @@ #include <linux/delay.h> #include <linux/bitops.h> #include <linux/ftrace.h> +#include <linux/cpu.h> #include <linux/atomic.h> #include <asm/current.h> @@ -295,8 +296,13 @@ smp_cpu_init(int cpunum) printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); - } + } + + notify_cpu_starting(cpunum); + + ipi_call_lock(); set_cpu_online(cpunum, true); + ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 7c0774397b89..70e105d62423 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -29,6 +29,7 @@ #include <asm/uaccess.h> #include <asm/io.h> #include <asm/irq.h> +#include <asm/page.h> #include <asm/param.h> #include <asm/pdc.h> #include <asm/led.h> diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index aa795ccef294..fd07f43d6622 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s { u64 sdr1; u64 hior; u64 msr_mask; - u64 vsid_next; #ifdef CONFIG_PPC_BOOK3S_32 u32 vsid_pool[VSID_POOL_SIZE]; + u32 vsid_next; #else - u64 vsid_first; - u64 vsid_max; + u64 proto_vsid_first; + u64 proto_vsid_max; + u64 proto_vsid_next; #endif int context_id[SID_CONTEXTS]; diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ef2074c3e906 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -700,6 +696,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) */ @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..8f880bc77c56 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 43eb74fcedde..641da9e868ce 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 6f87f39a1ac2..10fc8ec9d2a8 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) backwards_map = !backwards_map; /* Uh-oh ... out of mappings. Let's flush! */ - if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) { - vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; + if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { + vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; memset(vcpu_book3s->sid_map, 0, sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); kvmppc_mmu_pte_flush(vcpu, 0, 0); kvmppc_mmu_flush_segments(vcpu); } - map->host_vsid = vcpu_book3s->vsid_next++; + map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); map->guest_vsid = gvsid; map->valid = true; @@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) return -1; vcpu3s->context_id[0] = err; - vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; - vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; - vcpu3s->vsid_next = vcpu3s->vsid_first; + vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) + << USER_ESID_BITS) - 1; + vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; + vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; kvmppc_mmu_hpte_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..c3beaeef3f60 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..108d1f580177 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index def880aea63a..cec4daddbf31 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); + hp[0] = 0; continue; } diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..6e6e9cef34a8 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,7 +197,8 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - andi. r0,r12,0x2 + andi. r0, r12, 0x2 + cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 @@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif b no_ld_last_inst @@ -316,23 +323,17 @@ no_dcbz32_off: * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], - * or we jump to an interrupt handler with bctr if there - * is an interrupt to be handled first. In the latter - * case, the rfi[d] at the end of the interrupt handler - * will get us back to where we want to continue. + * or we jump to an interrupt handler if there is an + * interrupt to be handled first. In the latter case, + * the rfi[d] at the end of the interrupt handler will + * get us back to where we want to continue. */ - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_PERFMON -1: mtctr r12 - /* Register usage at this point: * * R1 = host R1 * R2 = host R2 + * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * @@ -342,12 +343,25 @@ no_dcbz32_off: PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r8, HSTATE_VMHANDLER(r13) - /* Restore host msr -> SRR1 */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + beq cr1, 1f + mtspr SPRN_HSRR1, r6 + mtspr SPRN_HSRR0, r8 +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif +1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ - beqctr + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + beqa BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER + beqa BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beqa BOOK3S_INTERRUPT_PERFMON + RFI kvmppc_handler_trampoline_exit_end: diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 38d48a59879c..9708851a8b9f 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -269,4 +269,4 @@ static int __init sunfire_init(void) return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b57a5942ba64..874162a11ceb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -495,11 +495,11 @@ xcall_fetch_glob_regs: stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ - rdpr %cwp, %g2 - sub %g2, 1, %g7 + rdpr %cwp, %g3 + sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 - wrpr %g2, %cwp + wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index bc4f562bd459..7594764d8a69 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -100,9 +100,14 @@ extern void cpu_idle_on_new_stack(struct thread_info *old_ti, #else /* __ASSEMBLY__ */ -/* how to get the thread information struct from ASM */ +/* + * How to get the thread information struct from assembly. + * Note that we use different macros since different architectures + * have different semantics in their "mm" instruction and we would + * like to guarantee that the macro expands to exactly one instruction. + */ #ifdef __tilegx__ -#define GET_THREAD_INFO(reg) move reg, sp; mm reg, zero, LOG2_THREAD_SIZE, 63 +#define EXTRACT_THREAD_INFO(reg) mm reg, zero, LOG2_THREAD_SIZE, 63 #else #define GET_THREAD_INFO(reg) mm reg, sp, zero, LOG2_THREAD_SIZE, 31 #endif diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c index 77763ccd5a7d..cdef6e5ec022 100644 --- a/arch/tile/kernel/compat_signal.c +++ b/arch/tile/kernel/compat_signal.c @@ -403,19 +403,17 @@ int compat_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Set up registers for signal handler. * Registers that we don't modify keep the value they had from * user-space at the time we took the signal. + * We always pass siginfo and mcontext, regardless of SA_SIGINFO, + * since some things rely on this (e.g. glibc's debug/segfault.c). */ regs->pc = ptr_to_compat_reg(ka->sa.sa_handler); regs->ex1 = PL_ICS_EX1(USER_PL, 1); /* set crit sec in handler */ regs->sp = ptr_to_compat_reg(frame); regs->lr = restorer; regs->regs[0] = (unsigned long) usig; - - if (ka->sa.sa_flags & SA_SIGINFO) { - /* Need extra arguments, so mark to restore caller-saves. */ - regs->regs[1] = ptr_to_compat_reg(&frame->info); - regs->regs[2] = ptr_to_compat_reg(&frame->uc); - regs->flags |= PT_FLAGS_CALLER_SAVES; - } + regs->regs[1] = ptr_to_compat_reg(&frame->info); + regs->regs[2] = ptr_to_compat_reg(&frame->uc); + regs->flags |= PT_FLAGS_CALLER_SAVES; /* * Notify any tracer that was single-stepping it. diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index 5d56a1ef5ba5..6943515100f8 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -839,6 +839,18 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + * Get base of stack in r32. + */ + { + GET_THREAD_INFO(r32) + movei r33, 0 + } + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -848,9 +860,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -866,16 +875,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out - * exactly what we need to do for each flag bit, then if - * necessary, reload the flags and recheck. + * handling, notify-resume, or single-step. Call out to C + * code to figure out exactly what we need to do for each flag bit, + * then if necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnz r33, 1f } - bnz r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnz r0, .Lretry_work_pending /* * In the NMI case we @@ -1180,10 +1191,12 @@ handle_syscall: add r20, r20, tp lw r21, r20 addi r21, r21, 1 - sw r20, r21 + { + sw r20, r21 + GET_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET lw r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE @@ -1362,7 +1375,10 @@ handle_ill: 3: /* set PC and continue */ lw r26, r24 - sw r28, r26 + { + sw r28, r26 + GET_THREAD_INFO(r0) + } /* * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill. @@ -1370,7 +1386,6 @@ handle_ill: * need to clear it here and can't really impose on all other arches. * So what's another write between friends? */ - GET_THREAD_INFO(r0) addi r1, r0, THREAD_INFO_FLAGS_OFFSET { diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 49d9d6621682..30ae76e50c44 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -647,6 +647,20 @@ STD_ENTRY(interrupt_return) FEEDBACK_REENTER(interrupt_return) /* + * Use r33 to hold whether we have already loaded the callee-saves + * into ptregs. We don't want to do it twice in this loop, since + * then we'd clobber whatever changes are made by ptrace, etc. + */ + { + movei r33, 0 + move r32, sp + } + + /* Get base of stack in r32. */ + EXTRACT_THREAD_INFO(r32) + +.Lretry_work_pending: + /* * Disable interrupts so as to make sure we don't * miss an interrupt that sets any of the thread flags (like * need_resched or sigpending) between sampling and the iret. @@ -656,9 +670,6 @@ STD_ENTRY(interrupt_return) IRQ_DISABLE(r20, r21) TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */ - /* Get base of stack in r32; note r30/31 are used as arguments here. */ - GET_THREAD_INFO(r32) - /* Check to see if there is any work to do before returning to user. */ { @@ -674,16 +685,18 @@ STD_ENTRY(interrupt_return) /* * Make sure we have all the registers saved for signal - * handling or single-step. Call out to C code to figure out + * handling or notify-resume. Call out to C code to figure out * exactly what we need to do for each flag bit, then if * necessary, reload the flags and recheck. */ - push_extra_callee_saves r0 { PTREGS_PTR(r0, PTREGS_OFFSET_BASE) - jal do_work_pending + bnez r33, 1f } - bnez r0, .Lresume_userspace + push_extra_callee_saves r0 + movei r33, 1 +1: jal do_work_pending + bnez r0, .Lretry_work_pending /* * In the NMI case we @@ -968,11 +981,16 @@ handle_syscall: shl16insli r20, r20, hw0(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET) add r20, r20, tp ld4s r21, r20 - addi r21, r21, 1 - st4 r20, r21 + { + addi r21, r21, 1 + move r31, sp + } + { + st4 r20, r21 + EXTRACT_THREAD_INFO(r31) + } /* Trace syscalls, if requested. */ - GET_THREAD_INFO(r31) addi r31, r31, THREAD_INFO_FLAGS_OFFSET ld r30, r31 andi r30, r30, _TIF_SYSCALL_TRACE diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index 2d5ef617bb39..54e6c64b85cc 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -567,6 +567,10 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, */ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) { + /* If we enter in kernel mode, do nothing and exit the caller loop. */ + if (!user_mode(regs)) + return 0; + if (thread_info_flags & _TIF_NEED_RESCHED) { schedule(); return 1; @@ -589,8 +593,7 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags) return 1; } if (thread_info_flags & _TIF_SINGLESTEP) { - if ((regs->ex1 & SPR_EX_CONTEXT_1_1__PL_MASK) == 0) - single_step_once(regs); + single_step_once(regs); return 0; } panic("work_pending: bad flags %#x\n", thread_info_flags); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79ad..c9866b0b77d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -81,7 +81,7 @@ config X86 select CLKEVT_I8253 select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP - select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC + select DCACHE_WORD_ACCESS config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index d3c0b0277666..fb7117a4ade1 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -403,13 +403,11 @@ static void print_absolute_symbols(void) for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - Elf32_Sym *sh_symtab; int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; } - sh_symtab = sec->symtab; sym_strtab = sec->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { Elf32_Sym *sym; diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 4824fb45560f..07b3a68d2d29 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -294,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* OK, This is the point of no return */ set_personality(PER_LINUX); - set_thread_flag(TIF_IA32); - current->mm->context.ia32_compat = 1; + set_personality_ia32(false); setup_new_exec(bprm); diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 6fe6767b7124..e58f03b206c3 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a) return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); } +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, dummy; + + asm( + "1:\tmov %2,%0\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3:\t" + "lea %2,%1\n\t" + "and %3,%1\n\t" + "mov (%1),%0\n\t" + "leal %2,%%ecx\n\t" + "andl %4,%%ecx\n\t" + "shll $3,%%ecx\n\t" + "shr %%cl,%0\n\t" + "jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + :"=&r" (ret),"=&c" (dummy) + :"m" (*(unsigned long *)addr), + "i" (-sizeof(unsigned long)), + "i" (sizeof(unsigned long)-1)); + return ret; +} + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1c67ca100e4c..146bb6218eec 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -580,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* re-enable TopologyExtensions if switched off by BIOS */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + u64 val; + + if (!rdmsrl_amd_safe(0xc0011005, &val)) { + val |= 1ULL << 54; + wrmsrl_amd_safe(0xc0011005, val); + rdmsrl(0xc0011005, val); + if (val & (1ULL << 54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + printk(KERN_INFO FW_INFO "CPU: Re-enabling " + "disabled Topology Extensions Support\n"); + } + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b8ba6e4a27e4..e554e5ad2fe8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -79,7 +79,6 @@ struct kvm_task_sleep_node { u32 token; int cpu; bool halted; - struct mm_struct *mm; }; static struct kvm_task_sleep_head { @@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.mm = current->active_mm; n.halted = idle || preempt_count() > 1; - atomic_inc(&n.mm->mm_count); init_waitqueue_head(&n.wq); hlist_add_head(&n.link, &b->list); spin_unlock(&b->lock); @@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); - if (!n->mm) - return; - mmdrop(n->mm); if (n->halted) smp_send_reschedule(n->cpu); else if (waitqueue_active(&n->wq)) @@ -207,7 +201,7 @@ again: * async PF was not yet handled. * Add dummy entry for the token. */ - n = kmalloc(sizeof(*n), GFP_ATOMIC); + n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { /* * Allocation failed! Busy wait while other cpu @@ -219,7 +213,6 @@ again: } n->token = token; n->cpu = smp_processor_id(); - n->mm = NULL; init_waitqueue_head(&n->wq); hlist_add_head(&n->link, &b->list); } else diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 733ca39f367e..43d8b48b23e6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -423,6 +423,7 @@ void set_personality_ia32(bool x32) current_thread_info()->status |= TS_COMPAT; } } +EXPORT_SYMBOL_GPL(set_personality_ia32); unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da373..5a98aa272184 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void) #endif rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { - const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; + size_t atom_size; + /* + * On 64bit, use PMD_SIZE for atom_size so that embedded + * percpu areas are aligned to PMD. This, in the future, + * can also allow using PMD mappings in vmalloc area. Use + * PAGE_SIZE on 32bit as vmalloc space is highly contended + * and large vmalloc area allocs can easily fail. + */ +#ifdef CONFIG_X86_64 + atom_size = PMD_SIZE; +#else + atom_size = PAGE_SIZE; +#endif rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size, atom_size, pcpu_cpu_distance, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 91a5e989abcf..185a2b823a2d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6581,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 66d377e334f7..646e3b5b4bb6 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c @@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = { .name = "net5501:1", .gpio = 6, .default_trigger = "default-on", - .active_low = 1, + .active_low = 0, }, }; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index a8f8844b8d32..95dccce8e979 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -63,6 +63,7 @@ #include <asm/stackprotector.h> #include <asm/hypervisor.h> #include <asm/mwait.h> +#include <asm/pci_x86.h> #ifdef CONFIG_ACPI #include <linux/acpi.h> @@ -809,9 +810,40 @@ static void xen_io_delay(void) } #ifdef CONFIG_X86_LOCAL_APIC +static unsigned long xen_set_apic_id(unsigned int x) +{ + WARN_ON(1); + return x; +} +static unsigned int xen_get_apic_id(unsigned long x) +{ + return ((x)>>24) & 0xFFu; +} static u32 xen_apic_read(u32 reg) { - return 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = 0, + }; + int ret = 0; + + /* Shouldn't need this as APIC is turned off for PV, and we only + * get called on the bootup processor. But just in case. */ + if (!xen_initial_domain() || smp_processor_id()) + return 0; + + if (reg == APIC_LVR) + return 0x10; + + if (reg != APIC_ID) + return 0; + + ret = HYPERVISOR_dom0_op(&op); + if (ret) + return 0; + + return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) @@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void) apic->icr_write = xen_apic_icr_write; apic->wait_icr_idle = xen_apic_wait_icr_idle; apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; + apic->set_apic_id = xen_set_apic_id; + apic->get_apic_id = xen_get_apic_id; } #endif @@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); } - - +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif xen_raw_console_write("about to get started...\n"); xen_setup_runstate_info(0); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b8e279479a6b..69f5857660ac 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + pteval_t flags = val & PTE_FLAGS_MASK; - val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; |