summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig24
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/Makefile3
-rw-r--r--arch/arm/boot/dts/am335x-boneblack.dts4
-rw-r--r--arch/arm/boot/dts/am4372.dtsi7
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts4
-rw-r--r--arch/arm/boot/dts/atlas7.dtsi1042
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts25
-rw-r--r--arch/arm/common/mcpm_platsmp.c12
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/sunxi_defconfig6
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h47
-rw-r--r--arch/arm/include/asm/barrier.h13
-rw-r--r--arch/arm/include/asm/bitops.h24
-rw-r--r--arch/arm/include/asm/cacheflush.h21
-rw-r--r--arch/arm/include/asm/dma-mapping.h2
-rw-r--r--arch/arm/include/asm/domain.h53
-rw-r--r--arch/arm/include/asm/fixmap.h15
-rw-r--r--arch/arm/include/asm/futex.h19
-rw-r--r--arch/arm/include/asm/glue-cache.h2
-rw-r--r--arch/arm/include/asm/io.h75
-rw-r--r--arch/arm/include/asm/memory.h4
-rw-r--r--arch/arm/include/asm/outercache.h17
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable-2level.h31
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/smp_plat.h9
-rw-r--r--arch/arm/include/asm/switch_to.h5
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/uaccess.h85
-rw-r--r--arch/arm/kernel/armksyms.c12
-rw-r--r--arch/arm/kernel/entry-armv.S34
-rw-r--r--arch/arm/kernel/entry-common.S3
-rw-r--r--arch/arm/kernel/entry-header.S112
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/irq.c1
-rw-r--r--arch/arm/kernel/perf_event.c7
-rw-r--r--arch/arm/kernel/process.c56
-rw-r--r--arch/arm/kernel/reboot.c2
-rw-r--r--arch/arm/kernel/setup.c6
-rw-r--r--arch/arm/kernel/smp.c21
-rw-r--r--arch/arm/kernel/swp_emulate.c3
-rw-r--r--arch/arm/kernel/traps.c1
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/lib/clear_user.S6
-rw-r--r--arch/arm/lib/copy_from_user.S6
-rw-r--r--arch/arm/lib/copy_to_user.S6
-rw-r--r--arch/arm/lib/csumpartialcopyuser.S14
-rw-r--r--arch/arm/lib/memcpy.S2
-rw-r--r--arch/arm/lib/memset.S2
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c6
-rw-r--r--arch/arm/mach-mmp/pm-pxa910.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig7
-rw-r--r--arch/arm/mach-omap2/common.c1
-rw-r--r--arch/arm/mach-omap2/common.h9
-rw-r--r--arch/arm/mach-omap2/dma.c1
-rw-r--r--arch/arm/mach-omap2/include/mach/barriers.h33
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm/mach-omap2/omap4-common.c121
-rw-r--r--arch/arm/mach-omap2/sleep44xx.S8
-rw-r--r--arch/arm/mach-prima2/Kconfig1
-rw-r--r--arch/arm/mach-prima2/pm.c1
-rw-r--r--arch/arm/mach-prima2/rtciobrg.c48
-rw-r--r--arch/arm/mach-shmobile/common.h2
-rw-r--r--arch/arm/mach-shmobile/platsmp.c4
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/smp-r8a7791.c2
-rw-r--r--arch/arm/mach-shmobile/smp-sh73a0.c2
-rw-r--r--arch/arm/mach-sunxi/Kconfig2
-rw-r--r--arch/arm/mach-sunxi/sunxi.c5
-rw-r--r--arch/arm/mach-ux500/cache-l2x0.c1
-rw-r--r--arch/arm/mm/Kconfig4
-rw-r--r--arch/arm/mm/abort-ev4.S1
-rw-r--r--arch/arm/mm/abort-ev5t.S4
-rw-r--r--arch/arm/mm/abort-ev5tj.S4
-rw-r--r--arch/arm/mm/abort-ev6.S8
-rw-r--r--arch/arm/mm/abort-ev7.S1
-rw-r--r--arch/arm/mm/abort-lv4t.S2
-rw-r--r--arch/arm/mm/abort-macro.S14
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c5
-rw-r--r--arch/arm/mm/dma-mapping.c24
-rw-r--r--arch/arm/mm/dma.h32
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/highmem.c6
-rw-r--r--arch/arm/mm/ioremap.c33
-rw-r--r--arch/arm/mm/mmu.c99
-rw-r--r--arch/arm/mm/nommu.c39
-rw-r--r--arch/arm/mm/pgd.c10
-rw-r--r--arch/arm/mm/proc-v7.S14
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm/vdso/vdsomunge.c56
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/boot/dts/apm/apm-mustang.dts10
-rw-r--r--arch/arm64/boot/dts/arm/Makefile1
-rw-r--r--arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts191
-rw-r--r--arch/arm64/boot/dts/cavium/thunder-88xx.dtsi9
-rw-r--r--arch/arm64/configs/defconfig1
-rw-r--r--arch/arm64/include/asm/acpi.h8
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/entry32.S2
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/mm/Makefile2
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/include/asm/mach-loongson64/mmzone.h2
-rw-r--r--arch/mips/include/asm/smp.h1
-rw-r--r--arch/mips/kernel/branch.c4
-rw-r--r--arch/mips/kernel/cps-vec.S96
-rw-r--r--arch/mips/kernel/scall32-o32.S37
-rw-r--r--arch/mips/kernel/scall64-o32.S35
-rw-r--r--arch/mips/kernel/setup.c13
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp.c44
-rw-r--r--arch/mips/kernel/traps.c8
-rw-r--r--arch/mips/loongson64/common/bonito-irq.c2
-rw-r--r--arch/mips/loongson64/common/cmdline.c2
-rw-r--r--arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c2
-rw-r--r--arch/mips/loongson64/common/env.c2
-rw-r--r--arch/mips/loongson64/common/irq.c2
-rw-r--r--arch/mips/loongson64/common/setup.c2
-rw-r--r--arch/mips/loongson64/fuloong-2e/irq.c2
-rw-r--r--arch/mips/loongson64/lemote-2f/clock.c4
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c2
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mm/c-r4k.c18
-rw-r--r--arch/mips/mti-malta/malta-time.c20
-rw-r--r--arch/mips/pistachio/init.c8
-rw-r--r--arch/mips/pistachio/time.c5
-rw-r--r--arch/parisc/include/asm/pgtable.h55
-rw-r--r--arch/parisc/include/asm/tlbflush.h53
-rw-r--r--arch/parisc/kernel/cache.c105
-rw-r--r--arch/parisc/kernel/entry.S163
-rw-r--r--arch/parisc/kernel/traps.c4
-rw-r--r--arch/powerpc/kernel/idle_power7.S31
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/mm/fault.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-prd.c9
-rw-r--r--arch/powerpc/sysdev/ppc4xx_hsta_msi.c1
-rw-r--r--arch/tile/lib/memcpy_user_64.c4
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/include/asm/espfix.h2
-rw-r--r--arch/x86/include/asm/kasan.h8
-rw-r--r--arch/x86/kernel/apic/vector.c10
-rw-r--r--arch/x86/kernel/early_printk.c4
-rw-r--r--arch/x86/kernel/espfix_64.c28
-rw-r--r--arch/x86/kernel/head64.c10
-rw-r--r--arch/x86/kernel/head_64.S29
-rw-r--r--arch/x86/kernel/irq.c20
-rw-r--r--arch/x86/kernel/smpboot.c27
-rw-r--r--arch/x86/kernel/tsc.c11
-rw-r--r--arch/x86/lib/usercopy.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c47
156 files changed, 2953 insertions, 766 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a750c1425c3a..345882856287 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
config ARCH_HAS_BANDGAP
bool
+config FIX_EARLYCON_MEM
+ def_bool y if MMU
+
config GENERIC_HWEIGHT
bool
default y
@@ -1693,6 +1696,27 @@ config HIGHMEM
config HIGHPTE
bool "Allocate 2nd-level pagetables from highmem"
depends on HIGHMEM
+ help
+ The VM uses one page of physical memory for each page table.
+ For systems with a lot of processes, this can use a lot of
+ precious low memory, eventually leading to low memory being
+ consumed by page tables. Setting this option will allow
+ user-space 2nd level page tables to reside in high memory.
+
+config CPU_SW_DOMAIN_PAN
+ bool "Enable use of CPU domains to implement privileged no-access"
+ depends on MMU && !ARM_LPAE
+ default y
+ help
+ Increase kernel security by ensuring that normal kernel accesses
+ are unable to access userspace addresses. This can help prevent
+ use-after-free bugs becoming an exploitable privilege escalation
+ by ensuring that magic values (such as LIST_POISON) will always
+ fault when dereferenced.
+
+ CPUs with low-vector mappings use a best-efforts implementation.
+ Their lower 1MB needs to remain accessible for the vectors, but
+ the remainder of userspace will become appropriately inaccessible.
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index f1b157971366..a2e16f940394 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1635,7 +1635,7 @@ config PID_IN_CONTEXTIDR
config DEBUG_SET_MODULE_RONX
bool "Set loadable kernel module data as NX and text as RO"
- depends on MODULES
+ depends on MODULES && MMU
---help---
This option helps catch unintended modifications to loadable
kernel module's text and read-only data. It also prevents execution
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 07ab3d203916..7451b447cc2d 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -312,6 +312,9 @@ INSTALL_TARGETS = zinstall uinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
+bootpImage uImage: zImage
+zImage: Image
+
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
diff --git a/arch/arm/boot/dts/am335x-boneblack.dts b/arch/arm/boot/dts/am335x-boneblack.dts
index 901739fcb85a..5c42d259fa68 100644
--- a/arch/arm/boot/dts/am335x-boneblack.dts
+++ b/arch/arm/boot/dts/am335x-boneblack.dts
@@ -80,3 +80,7 @@
status = "okay";
};
};
+
+&rtc {
+ system-power-controller;
+};
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index c80a3e233792..ade28c790f4b 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -132,6 +132,12 @@
};
};
+ emif: emif@4c000000 {
+ compatible = "ti,emif-am4372";
+ reg = <0x4c000000 0x1000000>;
+ ti,hwmods = "emif";
+ };
+
edma: edma@49000000 {
compatible = "ti,edma3";
ti,hwmods = "tpcc", "tptc0", "tptc1", "tptc2";
@@ -941,6 +947,7 @@
ti,hwmods = "dss_rfbi";
clocks = <&disp_clk>;
clock-names = "fck";
+ status = "disabled";
};
};
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index a42cc377a862..a63bf78191ea 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -605,6 +605,10 @@
phy-supply = <&ldousb_reg>;
};
+&usb2_phy2 {
+ phy-supply = <&ldousb_reg>;
+};
+
&usb1 {
dr_mode = "host";
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi
index 5dfd3a44bf82..3e21311f9514 100644
--- a/arch/arm/boot/dts/atlas7.dtsi
+++ b/arch/arm/boot/dts/atlas7.dtsi
@@ -135,6 +135,1025 @@
compatible = "sirf,atlas7-ioc";
reg = <0x18880000 0x1000>,
<0x10E40000 0x1000>;
+
+ audio_ac97_pmx: audio_ac97@0 {
+ audio_ac97 {
+ groups = "audio_ac97_grp";
+ function = "audio_ac97";
+ };
+ };
+
+ audio_func_dbg_pmx: audio_func_dbg@0 {
+ audio_func_dbg {
+ groups = "audio_func_dbg_grp";
+ function = "audio_func_dbg";
+ };
+ };
+
+ audio_i2s_pmx: audio_i2s@0 {
+ audio_i2s {
+ groups = "audio_i2s_grp";
+ function = "audio_i2s";
+ };
+ };
+
+ audio_i2s_2ch_pmx: audio_i2s_2ch@0 {
+ audio_i2s_2ch {
+ groups = "audio_i2s_2ch_grp";
+ function = "audio_i2s_2ch";
+ };
+ };
+
+ audio_i2s_extclk_pmx: audio_i2s_extclk@0 {
+ audio_i2s_extclk {
+ groups = "audio_i2s_extclk_grp";
+ function = "audio_i2s_extclk";
+ };
+ };
+
+ audio_uart0_pmx: audio_uart0@0 {
+ audio_uart0 {
+ groups = "audio_uart0_grp";
+ function = "audio_uart0";
+ };
+ };
+
+ audio_uart1_pmx: audio_uart1@0 {
+ audio_uart1 {
+ groups = "audio_uart1_grp";
+ function = "audio_uart1";
+ };
+ };
+
+ audio_uart2_pmx0: audio_uart2@0 {
+ audio_uart2_0 {
+ groups = "audio_uart2_grp0";
+ function = "audio_uart2_m0";
+ };
+ };
+
+ audio_uart2_pmx1: audio_uart2@1 {
+ audio_uart2_1 {
+ groups = "audio_uart2_grp1";
+ function = "audio_uart2_m1";
+ };
+ };
+
+ c_can_trnsvr_pmx: c_can_trnsvr@0 {
+ c_can_trnsvr {
+ groups = "c_can_trnsvr_grp";
+ function = "c_can_trnsvr";
+ };
+ };
+
+ c0_can_pmx0: c0_can@0 {
+ c0_can_0 {
+ groups = "c0_can_grp0";
+ function = "c0_can_m0";
+ };
+ };
+
+ c0_can_pmx1: c0_can@1 {
+ c0_can_1 {
+ groups = "c0_can_grp1";
+ function = "c0_can_m1";
+ };
+ };
+
+ c1_can_pmx0: c1_can@0 {
+ c1_can_0 {
+ groups = "c1_can_grp0";
+ function = "c1_can_m0";
+ };
+ };
+
+ c1_can_pmx1: c1_can@1 {
+ c1_can_1 {
+ groups = "c1_can_grp1";
+ function = "c1_can_m1";
+ };
+ };
+
+ c1_can_pmx2: c1_can@2 {
+ c1_can_2 {
+ groups = "c1_can_grp2";
+ function = "c1_can_m2";
+ };
+ };
+
+ ca_audio_lpc_pmx: ca_audio_lpc@0 {
+ ca_audio_lpc {
+ groups = "ca_audio_lpc_grp";
+ function = "ca_audio_lpc";
+ };
+ };
+
+ ca_bt_lpc_pmx: ca_bt_lpc@0 {
+ ca_bt_lpc {
+ groups = "ca_bt_lpc_grp";
+ function = "ca_bt_lpc";
+ };
+ };
+
+ ca_coex_pmx: ca_coex@0 {
+ ca_coex {
+ groups = "ca_coex_grp";
+ function = "ca_coex";
+ };
+ };
+
+ ca_curator_lpc_pmx: ca_curator_lpc@0 {
+ ca_curator_lpc {
+ groups = "ca_curator_lpc_grp";
+ function = "ca_curator_lpc";
+ };
+ };
+
+ ca_pcm_debug_pmx: ca_pcm_debug@0 {
+ ca_pcm_debug {
+ groups = "ca_pcm_debug_grp";
+ function = "ca_pcm_debug";
+ };
+ };
+
+ ca_pio_pmx: ca_pio@0 {
+ ca_pio {
+ groups = "ca_pio_grp";
+ function = "ca_pio";
+ };
+ };
+
+ ca_sdio_debug_pmx: ca_sdio_debug@0 {
+ ca_sdio_debug {
+ groups = "ca_sdio_debug_grp";
+ function = "ca_sdio_debug";
+ };
+ };
+
+ ca_spi_pmx: ca_spi@0 {
+ ca_spi {
+ groups = "ca_spi_grp";
+ function = "ca_spi";
+ };
+ };
+
+ ca_trb_pmx: ca_trb@0 {
+ ca_trb {
+ groups = "ca_trb_grp";
+ function = "ca_trb";
+ };
+ };
+
+ ca_uart_debug_pmx: ca_uart_debug@0 {
+ ca_uart_debug {
+ groups = "ca_uart_debug_grp";
+ function = "ca_uart_debug";
+ };
+ };
+
+ clkc_pmx0: clkc@0 {
+ clkc_0 {
+ groups = "clkc_grp0";
+ function = "clkc_m0";
+ };
+ };
+
+ clkc_pmx1: clkc@1 {
+ clkc_1 {
+ groups = "clkc_grp1";
+ function = "clkc_m1";
+ };
+ };
+
+ gn_gnss_i2c_pmx: gn_gnss_i2c@0 {
+ gn_gnss_i2c {
+ groups = "gn_gnss_i2c_grp";
+ function = "gn_gnss_i2c";
+ };
+ };
+
+ gn_gnss_uart_nopause_pmx: gn_gnss_uart_nopause@0 {
+ gn_gnss_uart_nopause {
+ groups = "gn_gnss_uart_nopause_grp";
+ function = "gn_gnss_uart_nopause";
+ };
+ };
+
+ gn_gnss_uart_pmx: gn_gnss_uart@0 {
+ gn_gnss_uart {
+ groups = "gn_gnss_uart_grp";
+ function = "gn_gnss_uart";
+ };
+ };
+
+ gn_trg_spi_pmx0: gn_trg_spi@0 {
+ gn_trg_spi_0 {
+ groups = "gn_trg_spi_grp0";
+ function = "gn_trg_spi_m0";
+ };
+ };
+
+ gn_trg_spi_pmx1: gn_trg_spi@1 {
+ gn_trg_spi_1 {
+ groups = "gn_trg_spi_grp1";
+ function = "gn_trg_spi_m1";
+ };
+ };
+
+ cvbs_dbg_pmx: cvbs_dbg@0 {
+ cvbs_dbg {
+ groups = "cvbs_dbg_grp";
+ function = "cvbs_dbg";
+ };
+ };
+
+ cvbs_dbg_test_pmx0: cvbs_dbg_test@0 {
+ cvbs_dbg_test_0 {
+ groups = "cvbs_dbg_test_grp0";
+ function = "cvbs_dbg_test_m0";
+ };
+ };
+
+ cvbs_dbg_test_pmx1: cvbs_dbg_test@1 {
+ cvbs_dbg_test_1 {
+ groups = "cvbs_dbg_test_grp1";
+ function = "cvbs_dbg_test_m1";
+ };
+ };
+
+ cvbs_dbg_test_pmx2: cvbs_dbg_test@2 {
+ cvbs_dbg_test_2 {
+ groups = "cvbs_dbg_test_grp2";
+ function = "cvbs_dbg_test_m2";
+ };
+ };
+
+ cvbs_dbg_test_pmx3: cvbs_dbg_test@3 {
+ cvbs_dbg_test_3 {
+ groups = "cvbs_dbg_test_grp3";
+ function = "cvbs_dbg_test_m3";
+ };
+ };
+
+ cvbs_dbg_test_pmx4: cvbs_dbg_test@4 {
+ cvbs_dbg_test_4 {
+ groups = "cvbs_dbg_test_grp4";
+ function = "cvbs_dbg_test_m4";
+ };
+ };
+
+ cvbs_dbg_test_pmx5: cvbs_dbg_test@5 {
+ cvbs_dbg_test_5 {
+ groups = "cvbs_dbg_test_grp5";
+ function = "cvbs_dbg_test_m5";
+ };
+ };
+
+ cvbs_dbg_test_pmx6: cvbs_dbg_test@6 {
+ cvbs_dbg_test_6 {
+ groups = "cvbs_dbg_test_grp6";
+ function = "cvbs_dbg_test_m6";
+ };
+ };
+
+ cvbs_dbg_test_pmx7: cvbs_dbg_test@7 {
+ cvbs_dbg_test_7 {
+ groups = "cvbs_dbg_test_grp7";
+ function = "cvbs_dbg_test_m7";
+ };
+ };
+
+ cvbs_dbg_test_pmx8: cvbs_dbg_test@8 {
+ cvbs_dbg_test_8 {
+ groups = "cvbs_dbg_test_grp8";
+ function = "cvbs_dbg_test_m8";
+ };
+ };
+
+ cvbs_dbg_test_pmx9: cvbs_dbg_test@9 {
+ cvbs_dbg_test_9 {
+ groups = "cvbs_dbg_test_grp9";
+ function = "cvbs_dbg_test_m9";
+ };
+ };
+
+ cvbs_dbg_test_pmx10: cvbs_dbg_test@10 {
+ cvbs_dbg_test_10 {
+ groups = "cvbs_dbg_test_grp10";
+ function = "cvbs_dbg_test_m10";
+ };
+ };
+
+ cvbs_dbg_test_pmx11: cvbs_dbg_test@11 {
+ cvbs_dbg_test_11 {
+ groups = "cvbs_dbg_test_grp11";
+ function = "cvbs_dbg_test_m11";
+ };
+ };
+
+ cvbs_dbg_test_pmx12: cvbs_dbg_test@12 {
+ cvbs_dbg_test_12 {
+ groups = "cvbs_dbg_test_grp12";
+ function = "cvbs_dbg_test_m12";
+ };
+ };
+
+ cvbs_dbg_test_pmx13: cvbs_dbg_test@13 {
+ cvbs_dbg_test_13 {
+ groups = "cvbs_dbg_test_grp13";
+ function = "cvbs_dbg_test_m13";
+ };
+ };
+
+ cvbs_dbg_test_pmx14: cvbs_dbg_test@14 {
+ cvbs_dbg_test_14 {
+ groups = "cvbs_dbg_test_grp14";
+ function = "cvbs_dbg_test_m14";
+ };
+ };
+
+ cvbs_dbg_test_pmx15: cvbs_dbg_test@15 {
+ cvbs_dbg_test_15 {
+ groups = "cvbs_dbg_test_grp15";
+ function = "cvbs_dbg_test_m15";
+ };
+ };
+
+ gn_gnss_power_pmx: gn_gnss_power@0 {
+ gn_gnss_power {
+ groups = "gn_gnss_power_grp";
+ function = "gn_gnss_power";
+ };
+ };
+
+ gn_gnss_sw_status_pmx: gn_gnss_sw_status@0 {
+ gn_gnss_sw_status {
+ groups = "gn_gnss_sw_status_grp";
+ function = "gn_gnss_sw_status";
+ };
+ };
+
+ gn_gnss_eclk_pmx: gn_gnss_eclk@0 {
+ gn_gnss_eclk {
+ groups = "gn_gnss_eclk_grp";
+ function = "gn_gnss_eclk";
+ };
+ };
+
+ gn_gnss_irq1_pmx0: gn_gnss_irq1@0 {
+ gn_gnss_irq1_0 {
+ groups = "gn_gnss_irq1_grp0";
+ function = "gn_gnss_irq1_m0";
+ };
+ };
+
+ gn_gnss_irq2_pmx0: gn_gnss_irq2@0 {
+ gn_gnss_irq2_0 {
+ groups = "gn_gnss_irq2_grp0";
+ function = "gn_gnss_irq2_m0";
+ };
+ };
+
+ gn_gnss_tm_pmx: gn_gnss_tm@0 {
+ gn_gnss_tm {
+ groups = "gn_gnss_tm_grp";
+ function = "gn_gnss_tm";
+ };
+ };
+
+ gn_gnss_tsync_pmx: gn_gnss_tsync@0 {
+ gn_gnss_tsync {
+ groups = "gn_gnss_tsync_grp";
+ function = "gn_gnss_tsync";
+ };
+ };
+
+ gn_io_gnsssys_sw_cfg_pmx: gn_io_gnsssys_sw_cfg@0 {
+ gn_io_gnsssys_sw_cfg {
+ groups = "gn_io_gnsssys_sw_cfg_grp";
+ function = "gn_io_gnsssys_sw_cfg";
+ };
+ };
+
+ gn_trg_pmx0: gn_trg@0 {
+ gn_trg_0 {
+ groups = "gn_trg_grp0";
+ function = "gn_trg_m0";
+ };
+ };
+
+ gn_trg_pmx1: gn_trg@1 {
+ gn_trg_1 {
+ groups = "gn_trg_grp1";
+ function = "gn_trg_m1";
+ };
+ };
+
+ gn_trg_shutdown_pmx0: gn_trg_shutdown@0 {
+ gn_trg_shutdown_0 {
+ groups = "gn_trg_shutdown_grp0";
+ function = "gn_trg_shutdown_m0";
+ };
+ };
+
+ gn_trg_shutdown_pmx1: gn_trg_shutdown@1 {
+ gn_trg_shutdown_1 {
+ groups = "gn_trg_shutdown_grp1";
+ function = "gn_trg_shutdown_m1";
+ };
+ };
+
+ gn_trg_shutdown_pmx2: gn_trg_shutdown@2 {
+ gn_trg_shutdown_2 {
+ groups = "gn_trg_shutdown_grp2";
+ function = "gn_trg_shutdown_m2";
+ };
+ };
+
+ gn_trg_shutdown_pmx3: gn_trg_shutdown@3 {
+ gn_trg_shutdown_3 {
+ groups = "gn_trg_shutdown_grp3";
+ function = "gn_trg_shutdown_m3";
+ };
+ };
+
+ i2c0_pmx: i2c0@0 {
+ i2c0 {
+ groups = "i2c0_grp";
+ function = "i2c0";
+ };
+ };
+
+ i2c1_pmx: i2c1@0 {
+ i2c1 {
+ groups = "i2c1_grp";
+ function = "i2c1";
+ };
+ };
+
+ jtag_pmx0: jtag@0 {
+ jtag_0 {
+ groups = "jtag_grp0";
+ function = "jtag_m0";
+ };
+ };
+
+ ks_kas_spi_pmx0: ks_kas_spi@0 {
+ ks_kas_spi_0 {
+ groups = "ks_kas_spi_grp0";
+ function = "ks_kas_spi_m0";
+ };
+ };
+
+ ld_ldd_pmx: ld_ldd@0 {
+ ld_ldd {
+ groups = "ld_ldd_grp";
+ function = "ld_ldd";
+ };
+ };
+
+ ld_ldd_16bit_pmx: ld_ldd_16bit@0 {
+ ld_ldd_16bit {
+ groups = "ld_ldd_16bit_grp";
+ function = "ld_ldd_16bit";
+ };
+ };
+
+ ld_ldd_fck_pmx: ld_ldd_fck@0 {
+ ld_ldd_fck {
+ groups = "ld_ldd_fck_grp";
+ function = "ld_ldd_fck";
+ };
+ };
+
+ ld_ldd_lck_pmx: ld_ldd_lck@0 {
+ ld_ldd_lck {
+ groups = "ld_ldd_lck_grp";
+ function = "ld_ldd_lck";
+ };
+ };
+
+ lr_lcdrom_pmx: lr_lcdrom@0 {
+ lr_lcdrom {
+ groups = "lr_lcdrom_grp";
+ function = "lr_lcdrom";
+ };
+ };
+
+ lvds_analog_pmx: lvds_analog@0 {
+ lvds_analog {
+ groups = "lvds_analog_grp";
+ function = "lvds_analog";
+ };
+ };
+
+ nd_df_pmx: nd_df@0 {
+ nd_df {
+ groups = "nd_df_grp";
+ function = "nd_df";
+ };
+ };
+
+ nd_df_nowp_pmx: nd_df_nowp@0 {
+ nd_df_nowp {
+ groups = "nd_df_nowp_grp";
+ function = "nd_df_nowp";
+ };
+ };
+
+ ps_pmx: ps@0 {
+ ps {
+ groups = "ps_grp";
+ function = "ps";
+ };
+ };
+
+ pwc_core_on_pmx: pwc_core_on@0 {
+ pwc_core_on {
+ groups = "pwc_core_on_grp";
+ function = "pwc_core_on";
+ };
+ };
+
+ pwc_ext_on_pmx: pwc_ext_on@0 {
+ pwc_ext_on {
+ groups = "pwc_ext_on_grp";
+ function = "pwc_ext_on";
+ };
+ };
+
+ pwc_gpio3_clk_pmx: pwc_gpio3_clk@0 {
+ pwc_gpio3_clk {
+ groups = "pwc_gpio3_clk_grp";
+ function = "pwc_gpio3_clk";
+ };
+ };
+
+ pwc_io_on_pmx: pwc_io_on@0 {
+ pwc_io_on {
+ groups = "pwc_io_on_grp";
+ function = "pwc_io_on";
+ };
+ };
+
+ pwc_lowbatt_b_pmx0: pwc_lowbatt_b@0 {
+ pwc_lowbatt_b_0 {
+ groups = "pwc_lowbatt_b_grp0";
+ function = "pwc_lowbatt_b_m0";
+ };
+ };
+
+ pwc_mem_on_pmx: pwc_mem_on@0 {
+ pwc_mem_on {
+ groups = "pwc_mem_on_grp";
+ function = "pwc_mem_on";
+ };
+ };
+
+ pwc_on_key_b_pmx0: pwc_on_key_b@0 {
+ pwc_on_key_b_0 {
+ groups = "pwc_on_key_b_grp0";
+ function = "pwc_on_key_b_m0";
+ };
+ };
+
+ pwc_wakeup_src0_pmx: pwc_wakeup_src0@0 {
+ pwc_wakeup_src0 {
+ groups = "pwc_wakeup_src0_grp";
+ function = "pwc_wakeup_src0";
+ };
+ };
+
+ pwc_wakeup_src1_pmx: pwc_wakeup_src1@0 {
+ pwc_wakeup_src1 {
+ groups = "pwc_wakeup_src1_grp";
+ function = "pwc_wakeup_src1";
+ };
+ };
+
+ pwc_wakeup_src2_pmx: pwc_wakeup_src2@0 {
+ pwc_wakeup_src2 {
+ groups = "pwc_wakeup_src2_grp";
+ function = "pwc_wakeup_src2";
+ };
+ };
+
+ pwc_wakeup_src3_pmx: pwc_wakeup_src3@0 {
+ pwc_wakeup_src3 {
+ groups = "pwc_wakeup_src3_grp";
+ function = "pwc_wakeup_src3";
+ };
+ };
+
+ pw_cko0_pmx0: pw_cko0@0 {
+ pw_cko0_0 {
+ groups = "pw_cko0_grp0";
+ function = "pw_cko0_m0";
+ };
+ };
+
+ pw_cko0_pmx1: pw_cko0@1 {
+ pw_cko0_1 {
+ groups = "pw_cko0_grp1";
+ function = "pw_cko0_m1";
+ };
+ };
+
+ pw_cko0_pmx2: pw_cko0@2 {
+ pw_cko0_2 {
+ groups = "pw_cko0_grp2";
+ function = "pw_cko0_m2";
+ };
+ };
+
+ pw_cko1_pmx0: pw_cko1@0 {
+ pw_cko1_0 {
+ groups = "pw_cko1_grp0";
+ function = "pw_cko1_m0";
+ };
+ };
+
+ pw_cko1_pmx1: pw_cko1@1 {
+ pw_cko1_1 {
+ groups = "pw_cko1_grp1";
+ function = "pw_cko1_m1";
+ };
+ };
+
+ pw_i2s01_clk_pmx0: pw_i2s01_clk@0 {
+ pw_i2s01_clk_0 {
+ groups = "pw_i2s01_clk_grp0";
+ function = "pw_i2s01_clk_m0";
+ };
+ };
+
+ pw_i2s01_clk_pmx1: pw_i2s01_clk@1 {
+ pw_i2s01_clk_1 {
+ groups = "pw_i2s01_clk_grp1";
+ function = "pw_i2s01_clk_m1";
+ };
+ };
+
+ pw_pwm0_pmx: pw_pwm0@0 {
+ pw_pwm0 {
+ groups = "pw_pwm0_grp";
+ function = "pw_pwm0";
+ };
+ };
+
+ pw_pwm1_pmx: pw_pwm1@0 {
+ pw_pwm1 {
+ groups = "pw_pwm1_grp";
+ function = "pw_pwm1";
+ };
+ };
+
+ pw_pwm2_pmx0: pw_pwm2@0 {
+ pw_pwm2_0 {
+ groups = "pw_pwm2_grp0";
+ function = "pw_pwm2_m0";
+ };
+ };
+
+ pw_pwm2_pmx1: pw_pwm2@1 {
+ pw_pwm2_1 {
+ groups = "pw_pwm2_grp1";
+ function = "pw_pwm2_m1";
+ };
+ };
+
+ pw_pwm3_pmx0: pw_pwm3@0 {
+ pw_pwm3_0 {
+ groups = "pw_pwm3_grp0";
+ function = "pw_pwm3_m0";
+ };
+ };
+
+ pw_pwm3_pmx1: pw_pwm3@1 {
+ pw_pwm3_1 {
+ groups = "pw_pwm3_grp1";
+ function = "pw_pwm3_m1";
+ };
+ };
+
+ pw_pwm_cpu_vol_pmx0: pw_pwm_cpu_vol@0 {
+ pw_pwm_cpu_vol_0 {
+ groups = "pw_pwm_cpu_vol_grp0";
+ function = "pw_pwm_cpu_vol_m0";
+ };
+ };
+
+ pw_pwm_cpu_vol_pmx1: pw_pwm_cpu_vol@1 {
+ pw_pwm_cpu_vol_1 {
+ groups = "pw_pwm_cpu_vol_grp1";
+ function = "pw_pwm_cpu_vol_m1";
+ };
+ };
+
+ pw_backlight_pmx0: pw_backlight@0 {
+ pw_backlight_0 {
+ groups = "pw_backlight_grp0";
+ function = "pw_backlight_m0";
+ };
+ };
+
+ pw_backlight_pmx1: pw_backlight@1 {
+ pw_backlight_1 {
+ groups = "pw_backlight_grp1";
+ function = "pw_backlight_m1";
+ };
+ };
+
+ rg_eth_mac_pmx: rg_eth_mac@0 {
+ rg_eth_mac {
+ groups = "rg_eth_mac_grp";
+ function = "rg_eth_mac";
+ };
+ };
+
+ rg_gmac_phy_intr_n_pmx: rg_gmac_phy_intr_n@0 {
+ rg_gmac_phy_intr_n {
+ groups = "rg_gmac_phy_intr_n_grp";
+ function = "rg_gmac_phy_intr_n";
+ };
+ };
+
+ rg_rgmii_mac_pmx: rg_rgmii_mac@0 {
+ rg_rgmii_mac {
+ groups = "rg_rgmii_mac_grp";
+ function = "rg_rgmii_mac";
+ };
+ };
+
+ rg_rgmii_phy_ref_clk_pmx0: rg_rgmii_phy_ref_clk@0 {
+ rg_rgmii_phy_ref_clk_0 {
+ groups =
+ "rg_rgmii_phy_ref_clk_grp0";
+ function =
+ "rg_rgmii_phy_ref_clk_m0";
+ };
+ };
+
+ rg_rgmii_phy_ref_clk_pmx1: rg_rgmii_phy_ref_clk@1 {
+ rg_rgmii_phy_ref_clk_1 {
+ groups =
+ "rg_rgmii_phy_ref_clk_grp1";
+ function =
+ "rg_rgmii_phy_ref_clk_m1";
+ };
+ };
+
+ sd0_pmx: sd0@0 {
+ sd0 {
+ groups = "sd0_grp";
+ function = "sd0";
+ };
+ };
+
+ sd0_4bit_pmx: sd0_4bit@0 {
+ sd0_4bit {
+ groups = "sd0_4bit_grp";
+ function = "sd0_4bit";
+ };
+ };
+
+ sd1_pmx: sd1@0 {
+ sd1 {
+ groups = "sd1_grp";
+ function = "sd1";
+ };
+ };
+
+ sd1_4bit_pmx0: sd1_4bit@0 {
+ sd1_4bit_0 {
+ groups = "sd1_4bit_grp0";
+ function = "sd1_4bit_m0";
+ };
+ };
+
+ sd1_4bit_pmx1: sd1_4bit@1 {
+ sd1_4bit_1 {
+ groups = "sd1_4bit_grp1";
+ function = "sd1_4bit_m1";
+ };
+ };
+
+ sd2_pmx0: sd2@0 {
+ sd2_0 {
+ groups = "sd2_grp0";
+ function = "sd2_m0";
+ };
+ };
+
+ sd2_no_cdb_pmx0: sd2_no_cdb@0 {
+ sd2_no_cdb_0 {
+ groups = "sd2_no_cdb_grp0";
+ function = "sd2_no_cdb_m0";
+ };
+ };
+
+ sd3_pmx: sd3@0 {
+ sd3 {
+ groups = "sd3_grp";
+ function = "sd3";
+ };
+ };
+
+ sd5_pmx: sd5@0 {
+ sd5 {
+ groups = "sd5_grp";
+ function = "sd5";
+ };
+ };
+
+ sd6_pmx0: sd6@0 {
+ sd6_0 {
+ groups = "sd6_grp0";
+ function = "sd6_m0";
+ };
+ };
+
+ sd6_pmx1: sd6@1 {
+ sd6_1 {
+ groups = "sd6_grp1";
+ function = "sd6_m1";
+ };
+ };
+
+ sp0_ext_ldo_on_pmx: sp0_ext_ldo_on@0 {
+ sp0_ext_ldo_on {
+ groups = "sp0_ext_ldo_on_grp";
+ function = "sp0_ext_ldo_on";
+ };
+ };
+
+ sp0_qspi_pmx: sp0_qspi@0 {
+ sp0_qspi {
+ groups = "sp0_qspi_grp";
+ function = "sp0_qspi";
+ };
+ };
+
+ sp1_spi_pmx: sp1_spi@0 {
+ sp1_spi {
+ groups = "sp1_spi_grp";
+ function = "sp1_spi";
+ };
+ };
+
+ tpiu_trace_pmx: tpiu_trace@0 {
+ tpiu_trace {
+ groups = "tpiu_trace_grp";
+ function = "tpiu_trace";
+ };
+ };
+
+ uart0_pmx: uart0@0 {
+ uart0 {
+ groups = "uart0_grp";
+ function = "uart0";
+ };
+ };
+
+ uart0_nopause_pmx: uart0_nopause@0 {
+ uart0_nopause {
+ groups = "uart0_nopause_grp";
+ function = "uart0_nopause";
+ };
+ };
+
+ uart1_pmx: uart1@0 {
+ uart1 {
+ groups = "uart1_grp";
+ function = "uart1";
+ };
+ };
+
+ uart2_pmx: uart2@0 {
+ uart2 {
+ groups = "uart2_grp";
+ function = "uart2";
+ };
+ };
+
+ uart3_pmx0: uart3@0 {
+ uart3_0 {
+ groups = "uart3_grp0";
+ function = "uart3_m0";
+ };
+ };
+
+ uart3_pmx1: uart3@1 {
+ uart3_1 {
+ groups = "uart3_grp1";
+ function = "uart3_m1";
+ };
+ };
+
+ uart3_pmx2: uart3@2 {
+ uart3_2 {
+ groups = "uart3_grp2";
+ function = "uart3_m2";
+ };
+ };
+
+ uart3_pmx3: uart3@3 {
+ uart3_3 {
+ groups = "uart3_grp3";
+ function = "uart3_m3";
+ };
+ };
+
+ uart3_nopause_pmx0: uart3_nopause@0 {
+ uart3_nopause_0 {
+ groups = "uart3_nopause_grp0";
+ function = "uart3_nopause_m0";
+ };
+ };
+
+ uart3_nopause_pmx1: uart3_nopause@1 {
+ uart3_nopause_1 {
+ groups = "uart3_nopause_grp1";
+ function = "uart3_nopause_m1";
+ };
+ };
+
+ uart4_pmx0: uart4@0 {
+ uart4_0 {
+ groups = "uart4_grp0";
+ function = "uart4_m0";
+ };
+ };
+
+ uart4_pmx1: uart4@1 {
+ uart4_1 {
+ groups = "uart4_grp1";
+ function = "uart4_m1";
+ };
+ };
+
+ uart4_pmx2: uart4@2 {
+ uart4_2 {
+ groups = "uart4_grp2";
+ function = "uart4_m2";
+ };
+ };
+
+ uart4_nopause_pmx: uart4_nopause@0 {
+ uart4_nopause {
+ groups = "uart4_nopause_grp";
+ function = "uart4_nopause";
+ };
+ };
+
+ usb0_drvvbus_pmx: usb0_drvvbus@0 {
+ usb0_drvvbus {
+ groups = "usb0_drvvbus_grp";
+ function = "usb0_drvvbus";
+ };
+ };
+
+ usb1_drvvbus_pmx: usb1_drvvbus@0 {
+ usb1_drvvbus {
+ groups = "usb1_drvvbus_grp";
+ function = "usb1_drvvbus";
+ };
+ };
+
+ visbus_dout_pmx: visbus_dout@0 {
+ visbus_dout {
+ groups = "visbus_dout_grp";
+ function = "visbus_dout";
+ };
+ };
+
+ vi_vip1_pmx: vi_vip1@0 {
+ vi_vip1 {
+ groups = "vi_vip1_grp";
+ function = "vi_vip1";
+ };
+ };
+
+ vi_vip1_ext_pmx: vi_vip1_ext@0 {
+ vi_vip1_ext {
+ groups = "vi_vip1_ext_grp";
+ function = "vi_vip1_ext";
+ };
+ };
+
+ vi_vip1_low8bit_pmx: vi_vip1_low8bit@0 {
+ vi_vip1_low8bit {
+ groups = "vi_vip1_low8bit_grp";
+ function = "vi_vip1_low8bit";
+ };
+ };
+
+ vi_vip1_high8bit_pmx: vi_vip1_high8bit@0 {
+ vi_vip1_high8bit {
+ groups = "vi_vip1_high8bit_grp";
+ function = "vi_vip1_high8bit";
+ };
+ };
};
pmipc {
@@ -356,6 +1375,12 @@
clock-names = "gpio0_io";
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <2>;
+ gpio-ranges = <&pinctrl 0 0 0>,
+ <&pinctrl 32 0 0>;
+ gpio-ranges-group-names = "lvds_gpio_grp",
+ "uart_nand_gpio_grp";
};
nand@17050000 {
@@ -461,11 +1486,22 @@
#interrupt-cells = <2>;
compatible = "sirf,atlas7-gpio";
reg = <0x13300000 0x1000>;
- interrupts = <0 43 0>, <0 44 0>, <0 45 0>;
+ interrupts = <0 43 0>, <0 44 0>,
+ <0 45 0>, <0 46 0>;
clocks = <&car 84>;
clock-names = "gpio1_io";
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <4>;
+ gpio-ranges = <&pinctrl 0 0 0>,
+ <&pinctrl 32 0 0>,
+ <&pinctrl 64 0 0>,
+ <&pinctrl 96 0 0>;
+ gpio-ranges-group-names = "gnss_gpio_grp",
+ "lcd_vip_gpio_grp",
+ "sdio_i2s_gpio_grp",
+ "sp_rgmii_gpio_grp";
};
sd2: sdhci@14200000 {
@@ -744,6 +1780,10 @@
interrupts = <0 47 0>;
gpio-controller;
interrupt-controller;
+
+ gpio-banks = <1>;
+ gpio-ranges = <&pinctrl 0 0 0>;
+ gpio-ranges-group-names = "rtc_gpio_grp";
};
rtc-iobg@18840000 {
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
index 107395c32d82..17f63f7dfd9e 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
@@ -150,6 +150,16 @@
interface-type = "ace";
reg = <0x5000 0x1000>;
};
+
+ pmu@9000 {
+ compatible = "arm,cci-400-pmu,r0";
+ reg = <0x9000 0x5000>;
+ interrupts = <0 105 4>,
+ <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>;
+ };
};
memory-controller@7ffd0000 {
@@ -187,11 +197,22 @@
<1 10 0xf08>;
};
- pmu {
+ pmu_a15 {
compatible = "arm,cortex-a15-pmu";
interrupts = <0 68 4>,
<0 69 4>;
- interrupt-affinity = <&cpu0>, <&cpu1>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>;
+ };
+
+ pmu_a7 {
+ compatible = "arm,cortex-a7-pmu";
+ interrupts = <0 128 4>,
+ <0 129 4>,
+ <0 130 4>;
+ interrupt-affinity = <&cpu2>,
+ <&cpu3>,
+ <&cpu4>;
};
oscclk6a: oscclk6a {
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 92e54d7c6f46..2b25b6038f66 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -65,14 +65,10 @@ static int mcpm_cpu_kill(unsigned int cpu)
return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
}
-static int mcpm_cpu_disable(unsigned int cpu)
+static bool mcpm_cpu_can_disable(unsigned int cpu)
{
- /*
- * We assume all CPUs may be shut down.
- * This would be the hook to use for eventual Secure
- * OS migration requests as described in the PSCI spec.
- */
- return 0;
+ /* We assume all CPUs may be shut down. */
+ return true;
}
static void mcpm_cpu_die(unsigned int cpu)
@@ -92,7 +88,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
.smp_secondary_init = mcpm_secondary_init,
#ifdef CONFIG_HOTPLUG_CPU
.cpu_kill = mcpm_cpu_kill,
- .cpu_disable = mcpm_cpu_disable,
+ .cpu_can_disable = mcpm_cpu_can_disable,
.cpu_die = mcpm_cpu_die,
#endif
};
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 6d83a1bf0c74..5fd8df6f50ea 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -353,7 +353,6 @@ CONFIG_POWER_RESET_AS3722=y
CONFIG_POWER_RESET_GPIO=y
CONFIG_POWER_RESET_GPIO_RESTART=y
CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_SUN6I=y
CONFIG_POWER_RESET_RMOBILE=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM95245=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 8ecba00dcd83..7ebc346bf9fa 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -2,6 +2,7 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_PERF_EVENTS=y
+CONFIG_MODULES=y
CONFIG_ARCH_SUNXI=y
CONFIG_SMP=y
CONFIG_NR_CPUS=8
@@ -77,7 +78,6 @@ CONFIG_SPI_SUN6I=y
CONFIG_GPIO_SYSFS=y
CONFIG_POWER_SUPPLY=y
CONFIG_POWER_RESET=y
-CONFIG_POWER_RESET_SUN6I=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
CONFIG_WATCHDOG=y
@@ -87,6 +87,10 @@ CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_GPIO=y
+CONFIG_FB=y
+CONFIG_FB_SIMPLE=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 83c50193626c..517ef6dd22b9 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -12,7 +12,6 @@ generic-y += irq_regs.h
generic-y += kdebug.h
generic-y += local.h
generic-y += local64.h
-generic-y += mcs_spinlock.h
generic-y += msgbuf.h
generic-y += param.h
generic-y += parport.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5a5504f90d5f..7bbf325a4f31 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -449,6 +449,53 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_disable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_DISABLE
+ mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_enable, tmp, isb=1
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Whenever we re-enter userspace, the domains should always be
+ * set appropriately.
+ */
+ mov \tmp, #DACR_UACCESS_ENABLE
+ mcr p15, 0, \tmp, c3, c0, 0
+ .if \isb
+ instr_sync
+ .endif
+#endif
+ .endm
+
+ .macro uaccess_save, tmp
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ mrc p15, 0, \tmp, c3, c0, 0
+ str \tmp, [sp, #S_FRAME_SIZE]
+#endif
+ .endm
+
+ .macro uaccess_restore
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ ldr r0, [sp, #S_FRAME_SIZE]
+ mcr p15, 0, r0, c3, c0, 0
+#endif
+ .endm
+
+ .macro uaccess_save_and_disable, tmp
+ uaccess_save \tmp
+ uaccess_disable \tmp
+ .endm
+
.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
.macro ret\c, reg
#if __LINUX_ARM_ARCH__ < 6
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 6c2327e1c732..3d8f1d3ad9a7 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -2,7 +2,6 @@
#define __ASM_BARRIER_H
#ifndef __ASSEMBLY__
-#include <asm/outercache.h>
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
@@ -37,12 +36,20 @@
#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
+#ifdef CONFIG_ARM_HEAVY_MB
+extern void (*soc_mb)(void);
+extern void arm_heavy_mb(void);
+#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0)
+#else
+#define __arm_heavy_mb(x...) dsb(x)
+#endif
+
#ifdef CONFIG_ARCH_HAS_BARRIERS
#include <mach/barriers.h>
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
-#define mb() do { dsb(); outer_sync(); } while (0)
+#define mb() __arm_heavy_mb()
#define rmb() dsb()
-#define wmb() do { dsb(st); outer_sync(); } while (0)
+#define wmb() __arm_heavy_mb(st)
#define dma_rmb() dmb(osh)
#define dma_wmb() dmb(oshst)
#else
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index 56380995f4c3..e943e6cee254 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -35,9 +35,9 @@
static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p |= mask;
@@ -47,9 +47,9 @@ static inline void ____atomic_set_bit(unsigned int bit, volatile unsigned long *
static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p &= ~mask;
@@ -59,9 +59,9 @@ static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long
static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
*p ^= mask;
@@ -73,9 +73,9 @@ ____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -90,9 +90,9 @@ ____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
@@ -107,9 +107,9 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p)
{
unsigned long flags;
unsigned int res;
- unsigned long mask = 1UL << (bit & 31);
+ unsigned long mask = BIT_MASK(bit);
- p += bit >> 5;
+ p += BIT_WORD(bit);
raw_local_irq_save(flags);
res = *p;
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 4812cda8fd17..d5525bfc7e3e 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -140,8 +140,6 @@ extern struct cpu_cache_fns cpu_cache;
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-#define dmac_map_area cpu_cache.dma_map_area
-#define dmac_unmap_area cpu_cache.dma_unmap_area
#define dmac_flush_range cpu_cache.dma_flush_range
#else
@@ -161,8 +159,6 @@ extern void __cpuc_flush_dcache_area(void *, size_t);
* is visible to DMA, or data written by DMA to system memory is
* visible to the CPU.
*/
-extern void dmac_map_area(const void *, size_t, int);
-extern void dmac_unmap_area(const void *, size_t, int);
extern void dmac_flush_range(const void *, const void *);
#endif
@@ -506,4 +502,21 @@ static inline void set_kernel_text_ro(void) { }
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
+/**
+ * secure_flush_area - ensure coherency across the secure boundary
+ * @addr: virtual address
+ * @size: size of region
+ *
+ * Ensure that the specified area of memory is coherent across the secure
+ * boundary from the non-secure side. This is used when calling secure
+ * firmware where the secure firmware does not ensure coherency.
+ */
+static inline void secure_flush_area(const void *addr, size_t size)
+{
+ phys_addr_t phys = __pa(addr);
+
+ __cpuc_flush_dcache_area((void *)addr, size);
+ outer_flush_range(phys, phys + size);
+}
+
#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index b52101d37ec7..a68b9d8a71fe 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -14,7 +14,7 @@
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
-#define DMA_ERROR_CODE (~0)
+#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
extern struct dma_map_ops arm_dma_ops;
extern struct dma_map_ops arm_coherent_dma_ops;
diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
index 6ddbe446425e..e878129f2fee 100644
--- a/arch/arm/include/asm/domain.h
+++ b/arch/arm/include/asm/domain.h
@@ -34,15 +34,14 @@
*/
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
-#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2
-#define DOMAIN_TABLE 2
#define DOMAIN_USER 1
#define DOMAIN_IO 0
#endif
+#define DOMAIN_VECTORS 3
/*
* Domain types
@@ -55,11 +54,46 @@
#define DOMAIN_MANAGER 1
#endif
-#define domain_val(dom,type) ((type) << (2*(dom)))
+#define domain_mask(dom) ((3) << (2 * (dom)))
+#define domain_val(dom,type) ((type) << (2 * (dom)))
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_NOACCESS) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#else
+#define DACR_INIT \
+ (domain_val(DOMAIN_USER, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT))
+#endif
+
+#define __DACR_DEFAULT \
+ domain_val(DOMAIN_KERNEL, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_IO, DOMAIN_CLIENT) | \
+ domain_val(DOMAIN_VECTORS, DOMAIN_CLIENT)
+
+#define DACR_UACCESS_DISABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+#define DACR_UACCESS_ENABLE \
+ (__DACR_DEFAULT | domain_val(DOMAIN_USER, DOMAIN_CLIENT))
#ifndef __ASSEMBLY__
-#ifdef CONFIG_CPU_USE_DOMAINS
+static inline unsigned int get_domain(void)
+{
+ unsigned int domain;
+
+ asm(
+ "mrc p15, 0, %0, c3, c0 @ get domain"
+ : "=r" (domain));
+
+ return domain;
+}
+
static inline void set_domain(unsigned val)
{
asm volatile(
@@ -68,17 +102,16 @@ static inline void set_domain(unsigned val)
isb();
}
+#ifdef CONFIG_CPU_USE_DOMAINS
#define modify_domain(dom,type) \
do { \
- struct thread_info *thread = current_thread_info(); \
- unsigned int domain = thread->cpu_domain; \
- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
- thread->cpu_domain = domain | domain_val(dom, type); \
- set_domain(thread->cpu_domain); \
+ unsigned int domain = get_domain(); \
+ domain &= ~domain_mask(dom); \
+ domain = domain | domain_val(dom, type); \
+ set_domain(domain); \
} while (0)
#else
-static inline void set_domain(unsigned val) { }
static inline void modify_domain(unsigned dom, unsigned type) { }
#endif
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 0415eae1df27..58cfe9f1a687 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
enum fixed_addresses {
- FIX_KMAP_BEGIN,
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */
@@ -18,7 +22,16 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
+#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
+
+#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
+
+/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
+#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
+#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
+
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
+void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h>
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 5eed82809d82..6795368ad023 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -22,8 +22,11 @@
#ifdef CONFIG_SMP
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags; \
smp_mb(); \
prefetchw(uaddr); \
+ __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: ldrex %1, [%3]\n" \
" " insn "\n" \
@@ -34,12 +37,15 @@
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret;
u32 val;
@@ -49,6 +55,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrex %1, [%4]\n"
" teq %1, %2\n"
@@ -61,6 +68,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
smp_mb();
*uval = val;
@@ -73,6 +81,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
+({ \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
__asm__ __volatile__( \
"1: " TUSER(ldr) " %1, [%3]\n" \
" " insn "\n" \
@@ -81,12 +91,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
__futex_atomic_ex_table("%5") \
: "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \
: "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "cc", "memory"); \
+ uaccess_restore(__ua_flags); \
+})
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
+ unsigned int __ua_flags;
int ret = 0;
u32 val;
@@ -94,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
preempt_disable();
+ __ua_flags = uaccess_save_and_enable();
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: " TUSER(ldr) " %1, [%4]\n"
" teq %1, %2\n"
@@ -103,6 +117,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "+r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ uaccess_restore(__ua_flags);
*uval = val;
preempt_enable();
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index a3c24cd5b7c8..cab07f69382d 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -158,8 +158,6 @@ static inline void nop_dma_unmap_area(const void *s, size_t l, int f) { }
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
-#define dmac_map_area __glue(_CACHE,_dma_map_area)
-#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
#endif
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 1c3938f26beb..485982084fe9 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -140,16 +140,11 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
* The _caller variety takes a __builtin_return_address(0) value for
* /proc/vmalloc to use - and should only be used in non-inline functions.
*/
-extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
- size_t, unsigned int, void *);
extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
void *);
-
extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
-extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
extern void __iounmap(volatile void __iomem *addr);
-extern void __arm_iounmap(volatile void __iomem *addr);
extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *);
@@ -321,21 +316,24 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
static inline void memset_io(volatile void __iomem *dst, unsigned c,
size_t count)
{
- memset((void __force *)dst, c, count);
+ extern void mmioset(void *, unsigned int, size_t);
+ mmioset((void __force *)dst, c, count);
}
#define memset_io(dst,c,count) memset_io(dst,c,count)
static inline void memcpy_fromio(void *to, const volatile void __iomem *from,
size_t count)
{
- memcpy(to, (const void __force *)from, count);
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy(to, (const void __force *)from, count);
}
#define memcpy_fromio(to,from,count) memcpy_fromio(to,from,count)
static inline void memcpy_toio(volatile void __iomem *to, const void *from,
size_t count)
{
- memcpy((void __force *)to, from, count);
+ extern void mmiocpy(void *, const void *, size_t);
+ mmiocpy((void __force *)to, from, count);
}
#define memcpy_toio(to,from,count) memcpy_toio(to,from,count)
@@ -348,18 +346,61 @@ static inline void memcpy_toio(volatile void __iomem *to, const void *from,
#endif /* readl */
/*
- * ioremap and friends.
+ * ioremap() and friends.
+ *
+ * ioremap() takes a resource address, and size. Due to the ARM memory
+ * types, it is important to use the correct ioremap() function as each
+ * mapping has specific properties.
+ *
+ * Function Memory type Cacheability Cache hint
+ * ioremap() Device n/a n/a
+ * ioremap_nocache() Device n/a n/a
+ * ioremap_cache() Normal Writeback Read allocate
+ * ioremap_wc() Normal Non-cacheable n/a
+ * ioremap_wt() Normal Non-cacheable n/a
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
+ * - writes may be delayed before they hit the endpoint device
*
- * ioremap takes a PCI memory address, as specified in
- * Documentation/io-mapping.txt.
+ * ioremap_nocache() is the same as ioremap() as there are too many device
+ * drivers using this for device registers, and documentation which tells
+ * people to use it for such for this to be any different. This is not a
+ * safe fallback for memory-like mappings, or memory regions where the
+ * compiler may generate unaligned accesses - eg, via inlining its own
+ * memcpy.
*
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ * - ordering is not guaranteed without explicit dependencies or barrier
+ * instructions
+ * - writes may be delayed before they hit the endpoint memory
+ *
+ * The cache hint is only a performance hint: CPUs may alias these hints.
+ * Eg, a CPU not implementing read allocate but implementing write allocate
+ * will provide a write allocate mapping instead.
*/
-#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
-#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
-#define ioremap_wt(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define iounmap __arm_iounmap
+void __iomem *ioremap(resource_size_t res_cookie, size_t size);
+#define ioremap ioremap
+#define ioremap_nocache ioremap
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size);
+#define ioremap_cache ioremap_cache
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size);
+#define ioremap_wc ioremap_wc
+#define ioremap_wt ioremap_wc
+
+void iounmap(volatile void __iomem *iomem_cookie);
+#define iounmap iounmap
/*
* io{read,write}{16,32}be() macros
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 3a72d69b3255..b7f6fb462ea0 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -275,7 +275,7 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define __pa(x) __virt_to_phys((unsigned long)(x))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
-#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
+#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
@@ -286,7 +286,7 @@ extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
*/
static inline phys_addr_t __virt_to_idmap(unsigned long x)
{
- if (arch_virt_to_idmap)
+ if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
return arch_virt_to_idmap(x);
else
return __virt_to_phys(x);
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
index 563b92fc2f41..c2bf24f40177 100644
--- a/arch/arm/include/asm/outercache.h
+++ b/arch/arm/include/asm/outercache.h
@@ -129,21 +129,4 @@ static inline void outer_resume(void) { }
#endif
-#ifdef CONFIG_OUTER_CACHE_SYNC
-/**
- * outer_sync - perform a sync point for outer cache
- *
- * Ensure that all outer cache operations are complete and any store
- * buffers are drained.
- */
-static inline void outer_sync(void)
-{
- if (outer_cache.sync)
- outer_cache.sync();
-}
-#else
-static inline void outer_sync(void)
-{ }
-#endif
-
#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5e68278e953e..d0131ee6f6af 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -23,6 +23,7 @@
#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
+#define PMD_DOMAIN_MASK PMD_DOMAIN(0x0f)
#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
/*
* - section
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index bfd662e49a25..aeddd28b3595 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -129,7 +129,36 @@
/*
* These are the memory types, defined to be compatible with
- * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
+ * pre-ARMv6 CPUs cacheable and bufferable bits: n/a,n/a,C,B
+ * ARMv6+ without TEX remapping, they are a table index.
+ * ARMv6+ with TEX remapping, they correspond to n/a,TEX(0),C,B
+ *
+ * MT type Pre-ARMv6 ARMv6+ type / cacheable status
+ * UNCACHED Uncached Strongly ordered
+ * BUFFERABLE Bufferable Normal memory / non-cacheable
+ * WRITETHROUGH Writethrough Normal memory / write through
+ * WRITEBACK Writeback Normal memory / write back, read alloc
+ * MINICACHE Minicache N/A
+ * WRITEALLOC Writeback Normal memory / write back, write alloc
+ * DEV_SHARED Uncached Device memory (shared)
+ * DEV_NONSHARED Uncached Device memory (non-shared)
+ * DEV_WC Bufferable Normal memory / non-cacheable
+ * DEV_CACHED Writeback Normal memory / write back, read alloc
+ * VECTORS Variable Normal memory / variable
+ *
+ * All normal memory mappings have the following properties:
+ * - reads can be repeated with no side effects
+ * - repeated reads return the last value written
+ * - reads can fetch additional locations without side effects
+ * - writes can be repeated (in certain cases) with no side effects
+ * - writes can be merged before accessing the target
+ * - unaligned accesses can be supported
+ *
+ * All device mappings have the following properties:
+ * - no access speculation
+ * - no repetition (eg, on return from an exception)
+ * - number, order and size of accesses are maintained
+ * - unaligned accesses are "unpredictable"
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 2f3ac1ba6fb4..ef356659b4f4 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -74,7 +74,6 @@ extern void secondary_startup_arm(void);
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
-extern void cpu_die(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -105,6 +104,7 @@ struct smp_operations {
#ifdef CONFIG_HOTPLUG_CPU
int (*cpu_kill)(unsigned int cpu);
void (*cpu_die)(unsigned int cpu);
+ bool (*cpu_can_disable)(unsigned int cpu);
int (*cpu_disable)(unsigned int cpu);
#endif
#endif
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 993e5224d8f7..f9080717fc88 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -107,4 +107,13 @@ static inline u32 mpidr_hash_size(void)
extern int platform_can_secondary_boot(void);
extern int platform_can_cpu_hotplug(void);
+#ifdef CONFIG_HOTPLUG_CPU
+extern int platform_can_hotplug_cpu(unsigned int cpu);
+#else
+static inline int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index c99e259469f7..12ebfcc1d539 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -10,7 +10,9 @@
* CPU.
*/
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
-#define finish_arch_switch(prev) dsb(ish)
+#define __complete_pending_tlbi() dsb(ish)
+#else
+#define __complete_pending_tlbi()
#endif
/*
@@ -22,6 +24,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
+ __complete_pending_tlbi(); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 71e0ffcedf8e..d0a1119dcaf3 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -74,9 +74,6 @@ struct thread_info {
.flags = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
}
#define init_thread_info (init_thread_union.thread_info)
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 4cf54ebe408a..8cc85a4ebec2 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -50,6 +50,35 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two functions allow hooking accesses to userspace to increase
+ * system integrity by ensuring that the kernel can not inadvertantly
+ * perform such accesses (eg, via list poison values) which could then
+ * be exploited for priviledge escalation.
+ */
+static inline unsigned int uaccess_save_and_enable(void)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ unsigned int old_domain = get_domain();
+
+ /* Set the current domain access to permit user accesses */
+ set_domain((old_domain & ~domain_mask(DOMAIN_USER)) |
+ domain_val(DOMAIN_USER, DOMAIN_CLIENT));
+
+ return old_domain;
+#else
+ return 0;
+#endif
+}
+
+static inline void uaccess_restore(unsigned int flags)
+{
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /* Restore the user access mask */
+ set_domain(flags);
+#endif
+}
+
+/*
* These two are intentionally not defined anywhere - if the kernel
* code generates any references to them, that's a bug.
*/
@@ -165,6 +194,7 @@ extern int __get_user_64t_4(void *);
register typeof(x) __r2 asm("r2"); \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
if (sizeof((x)) >= 8) \
@@ -192,6 +222,7 @@ extern int __get_user_64t_4(void *);
break; \
default: __e = __get_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
x = (typeof(*(p))) __r2; \
__e; \
})
@@ -224,6 +255,7 @@ extern int __put_user_8(void *, unsigned long long);
register const typeof(*(p)) __user *__p asm("r0") = __tmp_p; \
register unsigned long __l asm("r1") = __limit; \
register int __e asm("r0"); \
+ unsigned int __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(__p))) { \
case 1: \
__put_user_x(__r2, __p, __e, __l, 1); \
@@ -239,6 +271,7 @@ extern int __put_user_8(void *, unsigned long long);
break; \
default: __e = __put_user_bad(); break; \
} \
+ uaccess_restore(__ua_flags); \
__e; \
})
@@ -300,14 +333,17 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_addr = (unsigned long)(ptr); \
unsigned long __gu_val; \
+ unsigned int __ua_flags; \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \
case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \
case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \
default: (__gu_val) = __get_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
(x) = (__typeof__(*(ptr)))__gu_val; \
} while (0)
@@ -369,9 +405,11 @@ do { \
#define __put_user_err(x, ptr, err) \
do { \
unsigned long __pu_addr = (unsigned long)(ptr); \
+ unsigned int __ua_flags; \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
might_fault(); \
+ __ua_flags = uaccess_save_and_enable(); \
switch (sizeof(*(ptr))) { \
case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \
case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \
@@ -379,6 +417,7 @@ do { \
case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \
default: __put_user_bad(); \
} \
+ uaccess_restore(__ua_flags); \
} while (0)
#define __put_user_asm(x, __pu_addr, err, instr) \
@@ -451,11 +490,46 @@ do { \
#ifdef CONFIG_MMU
-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+arm_copy_from_user(void *to, const void __user *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_from_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_copy_to_user(void __user *to, const void *from, unsigned long n);
+extern unsigned long __must_check
+__copy_to_user_std(void __user *to, const void *from, unsigned long n);
+
+static inline unsigned long __must_check
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_copy_to_user(to, from, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
+extern unsigned long __must_check
+arm_clear_user(void __user *addr, unsigned long n);
+extern unsigned long __must_check
+__clear_user_std(void __user *addr, unsigned long n);
+
+static inline unsigned long __must_check
+__clear_user(void __user *addr, unsigned long n)
+{
+ unsigned int __ua_flags = uaccess_save_and_enable();
+ n = arm_clear_user(addr, n);
+ uaccess_restore(__ua_flags);
+ return n;
+}
+
#else
#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
@@ -488,6 +562,7 @@ static inline unsigned long __must_check clear_user(void __user *to, unsigned lo
return n;
}
+/* These are from lib/ code, and use __get_user() and friends */
extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index a88671cfe1ff..f89811fb9a55 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -50,6 +50,9 @@ extern void __aeabi_ulcmp(void);
extern void fpundefinstr(void);
+void mmioset(void *, unsigned int, size_t);
+void mmiocpy(void *, const void *, size_t);
+
/* platform dependent support */
EXPORT_SYMBOL(arm_delay_ops);
@@ -88,12 +91,15 @@ EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
+EXPORT_SYMBOL(mmioset);
+EXPORT_SYMBOL(mmiocpy);
+
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
-EXPORT_SYMBOL(__copy_from_user);
-EXPORT_SYMBOL(__copy_to_user);
-EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(arm_copy_from_user);
+EXPORT_SYMBOL(arm_copy_to_user);
+EXPORT_SYMBOL(arm_clear_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7dac3086e361..3e1c26eb32b4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -149,10 +149,10 @@ ENDPROC(__und_invalid)
#define SPFIX(code...)
#endif
- .macro svc_entry, stack_hole=0, trace=1
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.save {r0 - pc} )
- sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ sub sp, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
#ifdef CONFIG_THUMB2_KERNEL
SPFIX( str r0, [sp] ) @ temporarily saved
SPFIX( mov r0, sp )
@@ -167,7 +167,7 @@ ENDPROC(__und_invalid)
ldmia r0, {r3 - r5}
add r7, sp, #S_SP - 4 @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
+ add r2, sp, #(S_FRAME_SIZE + 8 + \stack_hole - 4)
SPFIX( addeq r2, r2, #4 )
str r3, [sp, #-4]! @ save the "real" r0 copied
@ from the exception stack
@@ -185,6 +185,11 @@ ENDPROC(__und_invalid)
@
stmia r7, {r2 - r6}
+ uaccess_save r0
+ .if \uaccess
+ uaccess_disable r0
+ .endif
+
.if \trace
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
@@ -194,7 +199,7 @@ ENDPROC(__und_invalid)
.align 5
__dabt_svc:
- svc_entry
+ svc_entry uaccess=0
mov r2, sp
dabt_helper
THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
@@ -368,7 +373,7 @@ ENDPROC(__fiq_abt)
#error "sizeof(struct pt_regs) must be a multiple of 8"
#endif
- .macro usr_entry, trace=1
+ .macro usr_entry, trace=1, uaccess=1
UNWIND(.fnstart )
UNWIND(.cantunwind ) @ don't unwind the user space
sub sp, sp, #S_FRAME_SIZE
@@ -400,6 +405,10 @@ ENDPROC(__fiq_abt)
ARM( stmdb r0, {sp, lr}^ )
THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
+ .if \uaccess
+ uaccess_disable ip
+ .endif
+
@ Enable the alignment trap while in kernel mode
ATRAP( teq r8, r7)
ATRAP( mcrne p15, 0, r8, c1, c0, 0)
@@ -410,7 +419,7 @@ ENDPROC(__fiq_abt)
zero_fp
.if \trace
-#ifdef CONFIG_IRQSOFF_TRACER
+#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit save = 0
@@ -435,7 +444,7 @@ ENDPROC(__fiq_abt)
.align 5
__dabt_usr:
- usr_entry
+ usr_entry uaccess=0
kuser_cmpxchg_check
mov r2, sp
dabt_helper
@@ -458,7 +467,7 @@ ENDPROC(__irq_usr)
.align 5
__und_usr:
- usr_entry
+ usr_entry uaccess=0
mov r2, r4
mov r3, r5
@@ -484,6 +493,8 @@ __und_usr:
1: ldrt r0, [r4]
ARM_BE8(rev r0, r0) @ little endian instruction
+ uaccess_disable ip
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@@ -518,9 +529,10 @@ __und_usr_thumb:
2: ldrht r5, [r4]
ARM_BE8(rev16 r5, r5) @ little endian instruction
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_fault_16 @ 16bit undefined instruction
+ blo __und_usr_fault_16_pan @ 16bit undefined instruction
3: ldrht r0, [r2]
ARM_BE8(rev16 r0, r0) @ little endian instruction
+ uaccess_disable ip
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
@@ -715,6 +727,8 @@ ENDPROC(no_fp)
__und_usr_fault_32:
mov r1, #4
b 1f
+__und_usr_fault_16_pan:
+ uaccess_disable ip
__und_usr_fault_16:
mov r1, #2
1: mov r0, sp
@@ -770,6 +784,8 @@ ENTRY(__switch_to)
ldr r4, [r2, #TI_TP_VALUE]
ldr r5, [r2, #TI_TP_VALUE + 4]
#ifdef CONFIG_CPU_USE_DOMAINS
+ mrc p15, 0, r6, c3, c0, 0 @ Get domain register
+ str r6, [r1, #TI_CPU_DOMAIN] @ Save old domain register
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index d83a40d8e055..30a7228eaceb 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -81,6 +81,7 @@ slow_work_pending:
movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
ldmia sp, {r0 - r6} @ have to reload r0 - r6
b local_restart @ ... and off we go
+ENDPROC(ret_fast_syscall)
/*
* "slow" syscall return path. "why" tells us if this was a real syscall.
@@ -196,6 +197,8 @@ ENTRY(vector_swi)
USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif
+ uaccess_disable tbl
+
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 1a0045abead7..0d22ad206d52 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -196,7 +196,7 @@
msr cpsr_c, \rtemp @ switch back to the SVC mode
.endm
-#ifndef CONFIG_THUMB2_KERNEL
+
.macro svc_exit, rpsr, irq = 0
.if \irq != 0
@ IRQs already off
@@ -215,6 +215,10 @@
blne trace_hardirqs_off
#endif
.endif
+ uaccess_restore
+
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode SVC restore
msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@@ -222,6 +226,20 @@
strex r1, r2, [r0] @ clear the exclusive monitor
#endif
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
+#else
+ @ Thumb mode SVC restore
+ ldr lr, [sp, #S_SP] @ top of the stack
+ ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
+
+ @ We must avoid clrex due to Cortex-A15 erratum #830321
+ strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
+
+ stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
+ ldmia sp, {r0 - r12}
+ mov sp, lr
+ ldr lr, [sp], #4
+ rfeia sp!
+#endif
.endm
@
@@ -241,6 +259,9 @@
@ on the stack remains correct).
@
.macro svc_exit_via_fiq
+ uaccess_restore
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r0, sp
ldmib r0, {r1 - r14} @ abort is deadly from here onward (it will
@ clobber state restored below)
@@ -250,9 +271,27 @@
msr spsr_cxsf, r9
ldr r0, [r0, #S_R0]
ldmia r8, {pc}^
+#else
+ @ Thumb mode restore
+ add r0, sp, #S_R2
+ ldr lr, [sp, #S_LR]
+ ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
+ @ clobber state restored below)
+ ldmia r0, {r2 - r12}
+ mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
+ msr cpsr_c, r1
+ sub r0, #S_R2
+ add r8, r0, #S_PC
+ ldmia r0, {r0 - r1}
+ rfeia r8
+#endif
.endm
+
.macro restore_user_regs, fast = 0, offset = 0
+ uaccess_enable r1, isb=0
+#ifndef CONFIG_THUMB2_KERNEL
+ @ ARM mode restore
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
@@ -270,72 +309,16 @@
@ after ldm {}^
add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-
-#else /* CONFIG_THUMB2_KERNEL */
- .macro svc_exit, rpsr, irq = 0
- .if \irq != 0
- @ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
- @ The parent context IRQs must have been enabled to get here in
- @ the first place, so there's no point checking the PSR I bit.
- bl trace_hardirqs_on
-#endif
- .else
- @ IRQs off again before pulling preserved data off the stack
- disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
- tst \rpsr, #PSR_I_BIT
- bleq trace_hardirqs_on
- tst \rpsr, #PSR_I_BIT
- blne trace_hardirqs_off
-#endif
- .endif
- ldr lr, [sp, #S_SP] @ top of the stack
- ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
-
- @ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
-
- stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
- ldmia sp, {r0 - r12}
- mov sp, lr
- ldr lr, [sp], #4
- rfeia sp!
- .endm
-
- @
- @ svc_exit_via_fiq - like svc_exit but switches to FIQ mode before exit
- @
- @ For full details see non-Thumb implementation above.
- @
- .macro svc_exit_via_fiq
- add r0, sp, #S_R2
- ldr lr, [sp, #S_LR]
- ldr sp, [sp, #S_SP] @ abort is deadly from here onward (it will
- @ clobber state restored below)
- ldmia r0, {r2 - r12}
- mov r1, #FIQ_MODE | PSR_I_BIT | PSR_F_BIT
- msr cpsr_c, r1
- sub r0, #S_R2
- add r8, r0, #S_PC
- ldmia r0, {r0 - r1}
- rfeia r8
- .endm
-
-#ifdef CONFIG_CPU_V7M
- /*
- * Note we don't need to do clrex here as clearing the local monitor is
- * part of each exception entry and exit sequence.
- */
- .macro restore_user_regs, fast = 0, offset = 0
+#elif defined(CONFIG_CPU_V7M)
+ @ V7M restore.
+ @ Note that we don't need to do clrex here as clearing the local
+ @ monitor is part of the exception entry and exit sequence.
.if \offset
add sp, #\offset
.endif
v7m_exception_slow_exit ret_r0 = \fast
- .endm
-#else /* ifdef CONFIG_CPU_V7M */
- .macro restore_user_regs, fast = 0, offset = 0
+#else
+ @ Thumb mode restore
mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
@@ -353,9 +336,8 @@
.endif
add sp, sp, #S_FRAME_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
- .endm
-#endif /* ifdef CONFIG_CPU_V7M / else */
#endif /* !CONFIG_THUMB2_KERNEL */
+ .endm
/*
* Context tracking subsystem. Used to instrument transitions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..04286fd9e09c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
+ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
+ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
badr lr, __enable_mmu @ return address
mov r13, r12 @ __secondary_switched address
@@ -461,10 +464,7 @@ __enable_mmu:
#ifdef CONFIG_ARM_LPAE
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
- mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+ mov r5, #DACR_INIT
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 350f188c92d2..b96c8ed1723a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -39,6 +39,7 @@
#include <linux/export.h>
#include <asm/hardware/cache-l2x0.h>
+#include <asm/outercache.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 357f57ea83f4..7d5379c1c443 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -795,8 +795,10 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Don't bother with PPIs; they're already affine */
irq = platform_get_irq(pdev, 0);
- if (irq >= 0 && irq_is_percpu(irq))
+ if (irq >= 0 && irq_is_percpu(irq)) {
+ cpumask_setall(&pmu->supported_cpus);
return 0;
+ }
irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
@@ -818,12 +820,13 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
break;
- of_node_put(dn);
if (cpu >= nr_cpu_ids) {
pr_warn("Failed to find logical CPU for %s\n",
dn->name);
+ of_node_put(dn);
break;
}
+ of_node_put(dn);
irqs[i] = cpu;
cpumask_set_cpu(cpu, &pmu->supported_cpus);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f192a2a41719..a3089bacb8d8 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -91,13 +91,6 @@ void arch_cpu_idle_exit(void)
ledtrig_cpu(CPU_LED_IDLE_END);
}
-#ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead(void)
-{
- cpu_die();
-}
-#endif
-
void __show_regs(struct pt_regs *regs)
{
unsigned long flags;
@@ -129,12 +122,36 @@ void __show_regs(struct pt_regs *regs)
buf[4] = '\0';
#ifndef CONFIG_CPU_V7M
- printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
- buf, interrupts_enabled(regs) ? "n" : "ff",
- fast_interrupts_enabled(regs) ? "n" : "ff",
- processor_modes[processor_mode(regs)],
- isa_modes[isa_mode(regs)],
- get_fs() == get_ds() ? "kernel" : "user");
+ {
+ unsigned int domain = get_domain();
+ const char *segment;
+
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ /*
+ * Get the domain register for the parent context. In user
+ * mode, we don't save the DACR, so lets use what it should
+ * be. For other modes, we place it after the pt_regs struct.
+ */
+ if (user_mode(regs))
+ domain = DACR_UACCESS_ENABLE;
+ else
+ domain = *(unsigned int *)(regs + 1);
+#endif
+
+ if ((domain & domain_mask(DOMAIN_USER)) ==
+ domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
+ segment = "none";
+ else if (get_fs() == get_ds())
+ segment = "kernel";
+ else
+ segment = "user";
+
+ printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
+ buf, interrupts_enabled(regs) ? "n" : "ff",
+ fast_interrupts_enabled(regs) ? "n" : "ff",
+ processor_modes[processor_mode(regs)],
+ isa_modes[isa_mode(regs)], segment);
+ }
#else
printk("xPSR: %08lx\n", regs->ARM_cpsr);
#endif
@@ -146,10 +163,9 @@ void __show_regs(struct pt_regs *regs)
buf[0] = '\0';
#ifdef CONFIG_CPU_CP15_MMU
{
- unsigned int transbase, dac;
+ unsigned int transbase, dac = get_domain();
asm("mrc p15, 0, %0, c2, c0\n\t"
- "mrc p15, 0, %1, c3, c0\n"
- : "=r" (transbase), "=r" (dac));
+ : "=r" (transbase));
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
transbase, dac);
}
@@ -210,6 +226,14 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
+ /*
+ * Copy the initial value of the domain access control register
+ * from the current thread: thread->addr_limit will have been
+ * copied from the current thread via setup_thread_stack() in
+ * kernel/fork.c
+ */
+ thread->cpu_domain = get_domain();
+
if (likely(!(p->flags & PF_KTHREAD))) {
*childregs = *current_pt_regs();
childregs->ARM_r0 = 0;
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 1a4d232796be..38269358fd25 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
flush_cache_all();
/* Switch to the identity mapping. */
- phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
+ phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
phys_reset((unsigned long)addr);
/* Should never get here. */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 36c18b73c1f4..e2ecee6b70ca 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
+#include <asm/fixmap.h>
#include <asm/procinfo.h>
#include <asm/psci.h>
#include <asm/sections.h>
@@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p)
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line;
+ if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
+ early_fixmap_init();
+
parse_early_param();
#ifdef CONFIG_MMU
@@ -1015,7 +1019,7 @@ static int __init topology_init(void)
for_each_possible_cpu(cpu) {
struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
- cpuinfo->cpu.hotpluggable = 1;
+ cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
register_cpu(&cpuinfo->cpu, cpu);
}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 90dfbedfbfb8..ba0063c539c3 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -175,13 +175,26 @@ static int platform_cpu_disable(unsigned int cpu)
if (smp_ops.cpu_disable)
return smp_ops.cpu_disable(cpu);
+ return 0;
+}
+
+int platform_can_hotplug_cpu(unsigned int cpu)
+{
+ /* cpu_die must be specified to support hotplug */
+ if (!smp_ops.cpu_die)
+ return 0;
+
+ if (smp_ops.cpu_can_disable)
+ return smp_ops.cpu_can_disable(cpu);
+
/*
* By default, allow disabling all CPUs except the first one,
* since this is special on a lot of platforms, e.g. because
* of clock tick interrupts.
*/
- return cpu == 0 ? -EPERM : 0;
+ return cpu != 0;
}
+
/*
* __cpu_disable runs on the processor to be shutdown.
*/
@@ -253,7 +266,7 @@ void __cpu_die(unsigned int cpu)
* of the other hotplug-cpu capable cores, so presumably coming
* out of idle fixes this.
*/
-void __ref cpu_die(void)
+void arch_cpu_idle_dead(void)
{
unsigned int cpu = smp_processor_id();
@@ -578,7 +591,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
if ((unsigned)ipinr < NR_IPI) {
- trace_ipi_entry(ipi_types[ipinr]);
+ trace_ipi_entry_rcuidle(ipi_types[ipinr]);
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
}
@@ -637,7 +650,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
}
if ((unsigned)ipinr < NR_IPI)
- trace_ipi_exit(ipi_types[ipinr]);
+ trace_ipi_exit_rcuidle(ipi_types[ipinr]);
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 1361756782c7..5b26e7efa9ea 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -141,11 +141,14 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
while (1) {
unsigned long temp;
+ unsigned int __ua_flags;
+ __ua_flags = uaccess_save_and_enable();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
+ uaccess_restore(__ua_flags);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d358226236f2..969f9d9e665f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -870,7 +870,6 @@ void __init early_trap_init(void *vectors_base)
kuser_init(vectors_base);
flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
* on V7-M there is no need to copy the vector table to a dedicated
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
struct timespec64 *wtm = &tk->wall_to_monotonic;
if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
- xtime_coarse = __current_kernel_time();
vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
index 1710fd7db2d5..970d6c043774 100644
--- a/arch/arm/lib/clear_user.S
+++ b/arch/arm/lib/clear_user.S
@@ -12,14 +12,14 @@
.text
-/* Prototype: int __clear_user(void *addr, size_t sz)
+/* Prototype: unsigned long arm_clear_user(void *addr, size_t sz)
* Purpose : clear some user memory
* Params : addr - user memory address to clear
* : sz - number of bytes to clear
* Returns : number of bytes NOT cleared
*/
ENTRY(__clear_user_std)
-WEAK(__clear_user)
+WEAK(arm_clear_user)
stmfd sp!, {r1, lr}
mov r2, #0
cmp r1, #4
@@ -44,7 +44,7 @@ WEAK(__clear_user)
USER( strnebt r2, [r0])
mov r0, #0
ldmfd sp!, {r1, pc}
-ENDPROC(__clear_user)
+ENDPROC(arm_clear_user)
ENDPROC(__clear_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 7a235b9952be..1512bebfbf1b 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_from_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_from_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -89,11 +89,11 @@
.text
-ENTRY(__copy_from_user)
+ENTRY(arm_copy_from_user)
#include "copy_template.S"
-ENDPROC(__copy_from_user)
+ENDPROC(arm_copy_from_user)
.pushsection .fixup,"ax"
.align 0
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index 9648b0675a3e..caf5019d8161 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -17,7 +17,7 @@
/*
* Prototype:
*
- * size_t __copy_to_user(void *to, const void *from, size_t n)
+ * size_t arm_copy_to_user(void *to, const void *from, size_t n)
*
* Purpose:
*
@@ -93,11 +93,11 @@
.text
ENTRY(__copy_to_user_std)
-WEAK(__copy_to_user)
+WEAK(arm_copy_to_user)
#include "copy_template.S"
-ENDPROC(__copy_to_user)
+ENDPROC(arm_copy_to_user)
ENDPROC(__copy_to_user_std)
.pushsection .text.fixup,"ax"
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
index 1d0957e61f89..1712f132b80d 100644
--- a/arch/arm/lib/csumpartialcopyuser.S
+++ b/arch/arm/lib/csumpartialcopyuser.S
@@ -17,6 +17,19 @@
.text
+#ifdef CONFIG_CPU_SW_DOMAIN_PAN
+ .macro save_regs
+ mrc p15, 0, ip, c3, c0, 0
+ stmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ uaccess_enable ip
+ .endm
+
+ .macro load_regs
+ ldmfd sp!, {r1, r2, r4 - r8, ip, lr}
+ mcr p15, 0, ip, c3, c0, 0
+ ret lr
+ .endm
+#else
.macro save_regs
stmfd sp!, {r1, r2, r4 - r8, lr}
.endm
@@ -24,6 +37,7 @@
.macro load_regs
ldmfd sp!, {r1, r2, r4 - r8, pc}
.endm
+#endif
.macro load1b, reg1
ldrusr \reg1, r0, 1
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index 7797e81e40e0..64111bd4440b 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -61,8 +61,10 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+ENTRY(mmiocpy)
ENTRY(memcpy)
#include "copy_template.S"
ENDPROC(memcpy)
+ENDPROC(mmiocpy)
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index a4ee97b5a2bf..3c65e3bd790f 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -16,6 +16,7 @@
.text
.align 5
+ENTRY(mmioset)
ENTRY(memset)
UNWIND( .fnstart )
ands r3, r0, #3 @ 1 unaligned?
@@ -133,3 +134,4 @@ UNWIND( .fnstart )
b 1b
UNWIND( .fnend )
ENDPROC(memset)
+ENDPROC(mmioset)
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 3e58d710013c..d72b90905132 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -96,7 +96,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
}
/* the mmap semaphore is taken only if not in an atomic context */
- atomic = in_atomic();
+ atomic = faulthandler_disabled();
if (!atomic)
down_read(&current->mm->mmap_sem);
@@ -136,7 +136,7 @@ out:
}
unsigned long
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+arm_copy_to_user(void __user *to, const void *from, unsigned long n)
{
/*
* This test is stubbed out of the main function above to keep
@@ -190,7 +190,7 @@ out:
return n;
}
-unsigned long __clear_user(void __user *addr, unsigned long n)
+unsigned long arm_clear_user(void __user *addr, unsigned long n)
{
/* See rational for this in __copy_to_user() above. */
if (n < 64)
diff --git a/arch/arm/mach-mmp/pm-pxa910.c b/arch/arm/mach-mmp/pm-pxa910.c
index 04c9daf9f8d7..7db5870d127f 100644
--- a/arch/arm/mach-mmp/pm-pxa910.c
+++ b/arch/arm/mach-mmp/pm-pxa910.c
@@ -18,6 +18,7 @@
#include <linux/io.h>
#include <linux/irq.h>
#include <asm/mach-types.h>
+#include <asm/outercache.h>
#include <mach/hardware.h>
#include <mach/cputype.h>
#include <mach/addr-map.h>
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index ecc04ff13e95..8427997e09c4 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -29,6 +29,7 @@ config ARCH_OMAP4
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
select OMAP_INTERCONNECT
+ select OMAP_INTERCONNECT_BARRIER
select PL310_ERRATA_588369 if CACHE_L2X0
select PL310_ERRATA_727915 if CACHE_L2X0
select PM_OPP if PM
@@ -46,6 +47,7 @@ config SOC_OMAP5
select HAVE_ARM_TWD if SMP
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
+ select OMAP_INTERCONNECT_BARRIER
config SOC_AM33XX
bool "TI AM33XX"
@@ -70,6 +72,7 @@ config SOC_DRA7XX
select HAVE_ARM_ARCH_TIMER
select IRQ_CROSSBAR
select ARM_ERRATA_798181 if SMP
+ select OMAP_INTERCONNECT_BARRIER
config ARCH_OMAP2PLUS
bool
@@ -91,6 +94,10 @@ config ARCH_OMAP2PLUS
help
Systems based on OMAP2, OMAP3, OMAP4 or OMAP5
+config OMAP_INTERCONNECT_BARRIER
+ bool
+ select ARM_HEAVY_MB
+
if ARCH_OMAP2PLUS
diff --git a/arch/arm/mach-omap2/common.c b/arch/arm/mach-omap2/common.c
index eae6a0e87c90..484cdadfb187 100644
--- a/arch/arm/mach-omap2/common.c
+++ b/arch/arm/mach-omap2/common.c
@@ -30,4 +30,5 @@ int __weak omap_secure_ram_reserve_memblock(void)
void __init omap_reserve(void)
{
omap_secure_ram_reserve_memblock();
+ omap_barrier_reserve_memblock();
}
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index cf3cf22ecd42..82f88b4ec15f 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -189,6 +189,15 @@ static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
}
#endif
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+void omap_barrier_reserve_memblock(void);
+void omap_barriers_init(void);
+#else
+static inline void omap_barrier_reserve_memblock(void)
+{
+}
+#endif
+
/* This gets called from mach-omap2/io.c, do not call this */
void __init omap2_set_globals_tap(u32 class, void __iomem *tap);
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index e1a56d87599e..1ed4be184a29 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -117,7 +117,6 @@ static void omap2_show_dma_caps(void)
u8 revision = dma_read(REVISION, 0) & 0xff;
printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
revision >> 4, revision & 0xf);
- return;
}
static unsigned configure_dma_errata(void)
diff --git a/arch/arm/mach-omap2/include/mach/barriers.h b/arch/arm/mach-omap2/include/mach/barriers.h
deleted file mode 100644
index 1c582a8592b9..000000000000
--- a/arch/arm/mach-omap2/include/mach/barriers.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * OMAP memory barrier header.
- *
- * Copyright (C) 2011 Texas Instruments, Inc.
- * Santosh Shilimkar <santosh.shilimkar@ti.com>
- * Richard Woodruff <r-woodruff2@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef __MACH_BARRIERS_H
-#define __MACH_BARRIERS_H
-
-#include <asm/outercache.h>
-
-extern void omap_bus_sync(void);
-
-#define rmb() dsb()
-#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
-#define mb() wmb()
-
-#endif /* __MACH_BARRIERS_H */
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 820dde8b5b04..7743e3672f98 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -306,6 +306,7 @@ void __init am33xx_map_io(void)
void __init omap4_map_io(void)
{
iotable_init(omap44xx_io_desc, ARRAY_SIZE(omap44xx_io_desc));
+ omap_barriers_init();
}
#endif
@@ -313,6 +314,7 @@ void __init omap4_map_io(void)
void __init omap5_map_io(void)
{
iotable_init(omap54xx_io_desc, ARRAY_SIZE(omap54xx_io_desc));
+ omap_barriers_init();
}
#endif
/*
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 16350eefa66c..949696b6f17b 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -51,6 +51,127 @@ static void __iomem *twd_base;
#define IRQ_LOCALTIMER 29
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
+
+/* Used to implement memory barrier on DRAM path */
+#define OMAP4_DRAM_BARRIER_VA 0xfe600000
+
+static void __iomem *dram_sync, *sram_sync;
+static phys_addr_t dram_sync_paddr;
+static u32 dram_sync_size;
+
+/*
+ * The OMAP4 bus structure contains asynchrnous bridges which can buffer
+ * data writes from the MPU. These asynchronous bridges can be found on
+ * paths between the MPU to EMIF, and the MPU to L3 interconnects.
+ *
+ * We need to be careful about re-ordering which can happen as a result
+ * of different accesses being performed via different paths, and
+ * therefore different asynchronous bridges.
+ */
+
+/*
+ * OMAP4 interconnect barrier which is called for each mb() and wmb().
+ * This is to ensure that normal paths to DRAM (normal memory, cacheable
+ * accesses) are properly synchronised with writes to DMA coherent memory
+ * (normal memory, uncacheable) and device writes.
+ *
+ * The mb() and wmb() barriers only operate only on the MPU->MA->EMIF
+ * path, as we need to ensure that data is visible to other system
+ * masters prior to writes to those system masters being seen.
+ *
+ * Note: the SRAM path is not synchronised via mb() and wmb().
+ */
+static void omap4_mb(void)
+{
+ if (dram_sync)
+ writel_relaxed(0, dram_sync);
+}
+
+/*
+ * OMAP4 Errata i688 - asynchronous bridge corruption when entering WFI.
+ *
+ * If a data is stalled inside asynchronous bridge because of back
+ * pressure, it may be accepted multiple times, creating pointer
+ * misalignment that will corrupt next transfers on that data path until
+ * next reset of the system. No recovery procedure once the issue is hit,
+ * the path remains consistently broken.
+ *
+ * Async bridges can be found on paths between MPU to EMIF and MPU to L3
+ * interconnects.
+ *
+ * This situation can happen only when the idle is initiated by a Master
+ * Request Disconnection (which is trigged by software when executing WFI
+ * on the CPU).
+ *
+ * The work-around for this errata needs all the initiators connected
+ * through an async bridge to ensure that data path is properly drained
+ * before issuing WFI. This condition will be met if one Strongly ordered
+ * access is performed to the target right before executing the WFI.
+ *
+ * In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
+ * IO barrier ensure that there is no synchronisation loss on initiators
+ * operating on both interconnect port simultaneously.
+ *
+ * This is a stronger version of the OMAP4 memory barrier below, and
+ * operates on both the MPU->MA->EMIF path but also the MPU->OCP path
+ * as well, and is necessary prior to executing a WFI.
+ */
+void omap_interconnect_sync(void)
+{
+ if (dram_sync && sram_sync) {
+ writel_relaxed(readl_relaxed(dram_sync), dram_sync);
+ writel_relaxed(readl_relaxed(sram_sync), sram_sync);
+ isb();
+ }
+}
+
+static int __init omap4_sram_init(void)
+{
+ struct device_node *np;
+ struct gen_pool *sram_pool;
+
+ np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu");
+ if (!np)
+ pr_warn("%s:Unable to allocate sram needed to handle errata I688\n",
+ __func__);
+ sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!sram_pool)
+ pr_warn("%s:Unable to get sram pool needed to handle errata I688\n",
+ __func__);
+ else
+ sram_sync = (void *)gen_pool_alloc(sram_pool, PAGE_SIZE);
+
+ return 0;
+}
+omap_arch_initcall(omap4_sram_init);
+
+/* Steal one page physical memory for barrier implementation */
+void __init omap_barrier_reserve_memblock(void)
+{
+ dram_sync_size = ALIGN(PAGE_SIZE, SZ_1M);
+ dram_sync_paddr = arm_memblock_steal(dram_sync_size, SZ_1M);
+}
+
+void __init omap_barriers_init(void)
+{
+ struct map_desc dram_io_desc[1];
+
+ dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
+ dram_io_desc[0].pfn = __phys_to_pfn(dram_sync_paddr);
+ dram_io_desc[0].length = dram_sync_size;
+ dram_io_desc[0].type = MT_MEMORY_RW_SO;
+ iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
+ dram_sync = (void __iomem *) dram_io_desc[0].virtual;
+
+ pr_info("OMAP4: Map %pa to %p for dram barrier\n",
+ &dram_sync_paddr, dram_sync);
+
+ soc_mb = omap4_mb;
+}
+
+#endif
+
void gic_dist_disable(void)
{
if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/sleep44xx.S b/arch/arm/mach-omap2/sleep44xx.S
index ad1bb9431e94..9b09d85d811a 100644
--- a/arch/arm/mach-omap2/sleep44xx.S
+++ b/arch/arm/mach-omap2/sleep44xx.S
@@ -333,14 +333,12 @@ ENDPROC(omap4_cpu_resume)
#endif /* defined(CONFIG_SMP) && defined(CONFIG_PM) */
-ENTRY(omap_bus_sync)
- ret lr
-ENDPROC(omap_bus_sync)
-
ENTRY(omap_do_wfi)
stmfd sp!, {lr}
+#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
/* Drain interconnect write buffers. */
- bl omap_bus_sync
+ bl omap_interconnect_sync
+#endif
/*
* Execute an ISB instruction to ensure that all of the
diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
index e03d8b5c9ad0..9ab8932403e5 100644
--- a/arch/arm/mach-prima2/Kconfig
+++ b/arch/arm/mach-prima2/Kconfig
@@ -4,6 +4,7 @@ menuconfig ARCH_SIRF
select ARCH_REQUIRE_GPIOLIB
select GENERIC_IRQ_CHIP
select NO_IOPORT_MAP
+ select REGMAP
select PINCTRL
select PINCTRL_SIRF
help
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index d99d08eeb966..83e94c95e314 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -16,6 +16,7 @@
#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/rtc/sirfsoc_rtciobrg.h>
+#include <asm/outercache.h>
#include <asm/suspend.h>
#include <asm/hardware/cache-l2x0.h>
diff --git a/arch/arm/mach-prima2/rtciobrg.c b/arch/arm/mach-prima2/rtciobrg.c
index 8f66d8f7ca75..d4852d24dc7d 100644
--- a/arch/arm/mach-prima2/rtciobrg.c
+++ b/arch/arm/mach-prima2/rtciobrg.c
@@ -1,5 +1,5 @@
/*
- * RTC I/O Bridge interfaces for CSR SiRFprimaII
+ * RTC I/O Bridge interfaces for CSR SiRFprimaII/atlas7
* ARM access the registers of SYSRTC, GPSRTC and PWRC through this module
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/regmap.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -66,6 +67,7 @@ u32 sirfsoc_rtc_iobrg_readl(u32 addr)
{
unsigned long flags, val;
+ /* TODO: add hwspinlock to sync with M3 */
spin_lock_irqsave(&rtciobrg_lock, flags);
val = __sirfsoc_rtc_iobrg_readl(addr);
@@ -90,6 +92,7 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
{
unsigned long flags;
+ /* TODO: add hwspinlock to sync with M3 */
spin_lock_irqsave(&rtciobrg_lock, flags);
sirfsoc_rtc_iobrg_pre_writel(val, addr);
@@ -102,6 +105,45 @@ void sirfsoc_rtc_iobrg_writel(u32 val, u32 addr)
}
EXPORT_SYMBOL_GPL(sirfsoc_rtc_iobrg_writel);
+
+static int regmap_iobg_regwrite(void *context, unsigned int reg,
+ unsigned int val)
+{
+ sirfsoc_rtc_iobrg_writel(val, reg);
+ return 0;
+}
+
+static int regmap_iobg_regread(void *context, unsigned int reg,
+ unsigned int *val)
+{
+ *val = (u32)sirfsoc_rtc_iobrg_readl(reg);
+ return 0;
+}
+
+static struct regmap_bus regmap_iobg = {
+ .reg_write = regmap_iobg_regwrite,
+ .reg_read = regmap_iobg_regread,
+};
+
+/**
+ * devm_regmap_init_iobg(): Initialise managed register map
+ *
+ * @iobg: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+struct regmap *devm_regmap_init_iobg(struct device *dev,
+ const struct regmap_config *config)
+{
+ const struct regmap_bus *bus = &regmap_iobg;
+
+ return devm_regmap_init(dev, bus, dev, config);
+}
+EXPORT_SYMBOL_GPL(devm_regmap_init_iobg);
+
static const struct of_device_id rtciobrg_ids[] = {
{ .compatible = "sirf,prima2-rtciobg" },
{}
@@ -132,7 +174,7 @@ static int __init sirfsoc_rtciobrg_init(void)
}
postcore_initcall(sirfsoc_rtciobrg_init);
-MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>, "
- "Barry Song <baohua.song@csr.com>");
+MODULE_AUTHOR("Zhiwu Song <zhiwu.song@csr.com>");
+MODULE_AUTHOR("Barry Song <baohua.song@csr.com>");
MODULE_DESCRIPTION("CSR SiRFprimaII rtc io bridge");
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 476092b86c6e..8d27ec546a35 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -13,7 +13,7 @@ extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void);
extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
unsigned long arg);
-extern int shmobile_smp_cpu_disable(unsigned int cpu);
+extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
extern void shmobile_boot_scu(void);
extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index 3923e09e966d..b23378f3d7e1 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -31,8 +31,8 @@ void shmobile_smp_hook(unsigned int cpu, unsigned long fn, unsigned long arg)
}
#ifdef CONFIG_HOTPLUG_CPU
-int shmobile_smp_cpu_disable(unsigned int cpu)
+bool shmobile_smp_cpu_can_disable(unsigned int cpu)
{
- return 0; /* Hotplug of any CPU is supported */
+ return true; /* Hotplug of any CPU is supported */
}
#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7790.c b/arch/arm/mach-shmobile/smp-r8a7790.c
index 930f45cbc08a..947e437cab68 100644
--- a/arch/arm/mach-shmobile/smp-r8a7790.c
+++ b/arch/arm/mach-shmobile/smp-r8a7790.c
@@ -64,7 +64,7 @@ struct smp_operations r8a7790_smp_ops __initdata = {
.smp_prepare_cpus = r8a7790_smp_prepare_cpus,
.smp_boot_secondary = shmobile_smp_apmu_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif
diff --git a/arch/arm/mach-shmobile/smp-r8a7791.c b/arch/arm/mach-shmobile/smp-r8a7791.c
index 5e2d1db79afa..b2508c0d276b 100644
--- a/arch/arm/mach-shmobile/smp-r8a7791.c
+++ b/arch/arm/mach-shmobile/smp-r8a7791.c
@@ -58,7 +58,7 @@ struct smp_operations r8a7791_smp_ops __initdata = {
.smp_prepare_cpus = r8a7791_smp_prepare_cpus,
.smp_boot_secondary = r8a7791_smp_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_apmu_cpu_die,
.cpu_kill = shmobile_smp_apmu_cpu_kill,
#endif
diff --git a/arch/arm/mach-shmobile/smp-sh73a0.c b/arch/arm/mach-shmobile/smp-sh73a0.c
index 2106d6b76a06..ae7c764fd6b4 100644
--- a/arch/arm/mach-shmobile/smp-sh73a0.c
+++ b/arch/arm/mach-shmobile/smp-sh73a0.c
@@ -68,7 +68,7 @@ struct smp_operations sh73a0_smp_ops __initdata = {
.smp_prepare_cpus = sh73a0_smp_prepare_cpus,
.smp_boot_secondary = sh73a0_boot_secondary,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = shmobile_smp_cpu_disable,
+ .cpu_can_disable = shmobile_smp_cpu_can_disable,
.cpu_die = shmobile_smp_scu_cpu_die,
.cpu_kill = shmobile_smp_scu_cpu_kill,
#endif
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index 81502b90dd91..4efe2d43a126 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -35,7 +35,7 @@ config MACH_SUN7I
select SUN5I_HSTIMER
config MACH_SUN8I
- bool "Allwinner A23 (sun8i) SoCs support"
+ bool "Allwinner sun8i Family SoCs support"
default ARCH_SUNXI
select ARM_GIC
select MFD_SUN6I_PRCM
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 1bc811a74a9f..65bab2876343 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -67,10 +67,13 @@ MACHINE_END
static const char * const sun8i_board_dt_compat[] = {
"allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-h3",
NULL,
};
-DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i (A23) Family")
+DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
+ .init_time = sun6i_timer_init,
.dt_compat = sun8i_board_dt_compat,
.init_late = sunxi_dt_cpufreq_init,
MACHINE_END
diff --git a/arch/arm/mach-ux500/cache-l2x0.c b/arch/arm/mach-ux500/cache-l2x0.c
index 7557bede7ae6..780bd13cd7e3 100644
--- a/arch/arm/mach-ux500/cache-l2x0.c
+++ b/arch/arm/mach-ux500/cache-l2x0.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
+#include <asm/outercache.h>
#include <asm/hardware/cache-l2x0.h>
#include "db8500-regs.h"
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7c6b976ab8d3..df7537f12469 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -883,6 +883,7 @@ config OUTER_CACHE
config OUTER_CACHE_SYNC
bool
+ select ARM_HEAVY_MB
help
The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache.
@@ -1031,6 +1032,9 @@ config ARCH_HAS_BARRIERS
This option allows the use of custom mandatory barriers
included via the mach/barriers.h file.
+config ARM_HEAVY_MB
+ bool
+
config ARCH_SUPPORTS_BIG_ENDIAN
bool
help
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 54473cd4aba9..b3b31e30cadd 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -19,6 +19,7 @@ ENTRY(v4_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
ldr r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable userspace access
bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
tst r3, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index a0908d4653a3..a6a381a6caa5 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -21,8 +21,10 @@ ENTRY(v5t_early_abort)
mrc p15, 0, r0, c6, c0, 0 @ get FAR
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
+ uaccess_disable ip @ disable user access
bic r1, r1, #1 << 11 @ clear bits 11 of FSR
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ check write
orreq r1, r1, #1 << 11
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index 4006b7a61264..00ab011bef58 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -24,7 +24,9 @@ ENTRY(v5tj_early_abort)
bne do_DataAbort
do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
ldreq r3, [r4] @ read aborted ARM instruction
- do_ldrd_abort tmp=ip, insn=r3
+ uaccess_disable ip @ disable userspace access
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq do_DataAbort @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8c48c5c22a33..8801a15aa105 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -26,16 +26,18 @@ ENTRY(v6_early_abort)
ldr ip, =0x4107b36
mrc p15, 0, r3, c0, c0, 0 @ get processor id
teq ip, r3, lsr #4 @ r0 ARM1136?
- bne do_DataAbort
+ bne 1f
tst r5, #PSR_J_BIT @ Java?
tsteq r5, #PSR_T_BIT @ Thumb?
- bne do_DataAbort
+ bne 1f
bic r1, r1, #1 << 11 @ clear bit 11 of FSR
ldr r3, [r4] @ read aborted ARM instruction
ARM_BE8(rev r3, r3)
- do_ldrd_abort tmp=ip, insn=r3
+ teq_ldrd tmp=ip, insn=r3 @ insn was LDRD?
+ beq 1f @ yes
tst r3, #1 << 20 @ L = 0 -> write
orreq r1, r1, #1 << 11 @ yes.
#endif
+1: uaccess_disable ip @ disable userspace access
b do_DataAbort
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index 4812ad054214..e8d0e08c227f 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -15,6 +15,7 @@
ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
+ uaccess_disable ip @ disable userspace access
/*
* V6 code adjusts the returned DFSR.
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index f3982580c273..6d8e8e3365d1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -26,6 +26,7 @@ ENTRY(v4t_late_abort)
#endif
bne .data_thumb_abort
ldr r8, [r4] @ read arm instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 20 @ L = 1 -> write?
orreq r1, r1, #1 << 11 @ yes.
and r7, r8, #15 << 24
@@ -155,6 +156,7 @@ ENTRY(v4t_late_abort)
.data_thumb_abort:
ldrh r8, [r4] @ read instruction
+ uaccess_disable ip @ disable userspace access
tst r8, #1 << 11 @ L = 1 -> write?
orreq r1, r1, #1 << 8 @ yes
and r7, r8, #15 << 12
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index 2cbf68ef0e83..4509bee4e081 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -13,6 +13,7 @@
tst \psr, #PSR_T_BIT
beq not_thumb
ldrh \tmp, [\pc] @ Read aborted Thumb instruction
+ uaccess_disable ip @ disable userspace access
and \tmp, \tmp, # 0xfe00 @ Mask opcode field
cmp \tmp, # 0x5600 @ Is it ldrsb?
orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
@@ -29,12 +30,9 @@ not_thumb:
* [7:4] == 1101
* [20] == 0
*/
- .macro do_ldrd_abort, tmp, insn
- tst \insn, #0x0e100000 @ [27:25,20] == 0
- bne not_ldrd
- and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
- cmp \tmp, #0x000000d0
- beq do_DataAbort
-not_ldrd:
+ .macro teq_ldrd, tmp, insn
+ mov \tmp, #0x0e100000
+ orr \tmp, #0x000000f0
+ and \tmp, \insn, \tmp
+ teq \tmp, #0x000000d0
.endm
-
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 097181e08c25..5c1b7a7b9af6 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -368,7 +368,6 @@ int __init feroceon_of_init(void)
struct device_node *node;
void __iomem *base;
bool l2_wt_override = false;
- struct resource res;
#if defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
l2_wt_override = true;
@@ -376,10 +375,7 @@ int __init feroceon_of_init(void)
node = of_find_matching_node(NULL, feroceon_ids);
if (node && of_device_is_compatible(node, "marvell,kirkwood-cache")) {
- if (of_address_to_resource(node, 0, &res))
- return -ENODEV;
-
- base = ioremap(res.start, resource_size(&res));
+ base = of_iomap(node, 0);
if (!base)
return -ENOMEM;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 71b3d3309024..493692d838c6 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -1171,6 +1171,11 @@ static void __init l2c310_of_parse(const struct device_node *np,
}
}
+ if (of_property_read_bool(np, "arm,shared-override")) {
+ *aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
+ *aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
+ }
+
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1ced8a0f7a52..9f509b264346 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -39,6 +39,7 @@
#include <asm/system_info.h>
#include <asm/dma-contiguous.h>
+#include "dma.h"
#include "mm.h"
/*
@@ -648,14 +649,18 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
size = PAGE_ALIGN(size);
want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
- if (is_coherent || nommu())
+ if (nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+ else if (dev_get_cma_area(dev) && (gfp & __GFP_WAIT))
+ addr = __alloc_from_contiguous(dev, size, prot, &page,
+ caller, want_vaddr);
+ else if (is_coherent)
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!dev_get_cma_area(dev))
- addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr);
else
- addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr);
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
+ caller, want_vaddr);
if (page)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -683,13 +688,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
{
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
- return __dma_alloc(dev, size, handle, gfp, prot, true,
+ return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
}
@@ -753,12 +757,12 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
size = PAGE_ALIGN(size);
- if (is_coherent || nommu()) {
+ if (nommu()) {
__dma_free_buffer(page, size);
- } else if (__free_from_pool(cpu_addr, size)) {
+ } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
return;
} else if (!dev_get_cma_area(dev)) {
- if (want_vaddr)
+ if (want_vaddr && !is_coherent)
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
@@ -1971,7 +1975,7 @@ static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
{
int next_bitmap;
- if (mapping->nr_bitmaps > mapping->extensions)
+ if (mapping->nr_bitmaps >= mapping->extensions)
return -EINVAL;
next_bitmap = mapping->nr_bitmaps;
diff --git a/arch/arm/mm/dma.h b/arch/arm/mm/dma.h
new file mode 100644
index 000000000000..70ea6852f94e
--- /dev/null
+++ b/arch/arm/mm/dma.h
@@ -0,0 +1,32 @@
+#ifndef DMA_H
+#define DMA_H
+
+#include <asm/glue-cache.h>
+
+#ifndef MULTI_CACHE
+#define dmac_map_area __glue(_CACHE,_dma_map_area)
+#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+extern void dmac_map_area(const void *, size_t, int);
+extern void dmac_unmap_area(const void *, size_t, int);
+
+#else
+
+/*
+ * These are private to the dma-mapping API. Do not use directly.
+ * Their sole purpose is to ensure that data held in the cache
+ * is visible to DMA, or data written by DMA to system memory is
+ * visible to the CPU.
+ */
+#define dmac_map_area cpu_cache.dma_map_area
+#define dmac_unmap_area cpu_cache.dma_unmap_area
+
+#endif
+
+#endif
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 34b66af516ea..1ec8e7590fc6 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -21,6 +21,21 @@
#include "mm.h"
+#ifdef CONFIG_ARM_HEAVY_MB
+void (*soc_mb)(void);
+
+void arm_heavy_mb(void)
+{
+#ifdef CONFIG_OUTER_CACHE_SYNC
+ if (outer_cache.sync)
+ outer_cache.sync();
+#endif
+ if (soc_mb)
+ soc_mb();
+}
+EXPORT_SYMBOL(arm_heavy_mb);
+#endif
+
#ifdef CONFIG_CPU_CACHE_VIPT
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa793989..9df5f09585ca 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
return page_address(page);
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index d1e5ad7ab3bc..0c81056c1dd7 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -255,7 +255,7 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
-void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
@@ -363,7 +363,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
unsigned int mtype)
{
return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
- __builtin_return_address(0));
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
@@ -371,13 +371,26 @@ void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
unsigned int, void *) =
__arm_ioremap_caller;
-void __iomem *
-__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
{
- return arch_ioremap_caller(phys_addr, size, mtype,
- __builtin_return_address(0));
+ return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
}
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap_wc);
/*
* Remap an arbitrary physical address space into the kernel virtual
@@ -431,11 +444,11 @@ void __iounmap(volatile void __iomem *io_addr)
void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
-void __arm_iounmap(volatile void __iomem *io_addr)
+void iounmap(volatile void __iomem *cookie)
{
- arch_iounmap(io_addr);
+ arch_iounmap(cookie);
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
#ifdef CONFIG_PCI
static int pci_ioremap_mem_type = MT_DEVICE;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..7cd15143a507 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -291,13 +291,13 @@ static struct mem_type mem_types[] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_HIGH_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_USER | L_PTE_RDONLY,
.prot_l1 = PMD_TYPE_TABLE,
- .domain = DOMAIN_USER,
+ .domain = DOMAIN_VECTORS,
},
[MT_MEMORY_RWX] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
}
EXPORT_SYMBOL(get_mem_type);
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud = pud_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+ pmd_t *pmd;
+
+ /*
+ * The early fixmap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ != FIXADDR_TOP >> PMD_SHIFT);
+
+ pmd = fixmap_pmd(FIXADDR_TOP);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
/*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1072,6 +1113,7 @@ void __init sanity_check_meminfo(void)
int highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
struct memblock_region *reg;
+ bool should_use_highmem = false;
for_each_memblock(memory, reg) {
phys_addr_t block_start = reg->base;
@@ -1090,6 +1132,7 @@ void __init sanity_check_meminfo(void)
pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
&block_start, &block_end);
memblock_remove(reg->base, reg->size);
+ should_use_highmem = true;
continue;
}
@@ -1100,6 +1143,7 @@ void __init sanity_check_meminfo(void)
&block_start, &block_end, &vmalloc_limit);
memblock_remove(vmalloc_limit, overlap_size);
block_end = vmalloc_limit;
+ should_use_highmem = true;
}
}
@@ -1134,6 +1178,9 @@ void __init sanity_check_meminfo(void)
}
}
+ if (should_use_highmem)
+ pr_notice("Consider using a HIGHMEM enabled kernel.\n");
+
high_memory = __va(arm_lowmem_limit - 1) + 1;
/*
@@ -1213,10 +1260,10 @@ void __init arm_mm_memblock_reserve(void)
/*
* Set up the device mappings. Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings. This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
*/
static void __init devicemaps_init(const struct machine_desc *mdesc)
{
@@ -1231,7 +1278,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
early_trap_init(vectors);
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ /*
+ * Clear page table except top pmd used by early fixmaps
+ */
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -1483,6 +1533,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif
+static void __init early_fixmap_shutdown(void)
+{
+ int i;
+ unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+ pte_offset_fixmap = pte_offset_late_fixmap;
+ pmd_clear(fixmap_pmd(va));
+ local_flush_tlb_kernel_page(va);
+
+ for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+ pte_t *pte;
+ struct map_desc map;
+
+ map.virtual = fix_to_virt(i);
+ pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+ /* Only i/o device mappings are supported ATM */
+ if (pte_none(*pte) ||
+ (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+ continue;
+
+ map.pfn = pte_pfn(*pte);
+ map.type = MT_DEVICE;
+ map.length = PAGE_SIZE;
+
+ create_mapping(&map);
+ }
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1494,7 +1573,9 @@ void __init paging_init(const struct machine_desc *mdesc)
build_mem_type_table();
prepare_page_table();
map_lowmem();
+ memblock_set_current_limit(arm_lowmem_limit);
dma_contiguous_remap();
+ early_fixmap_shutdown();
devicemaps_init(mdesc);
kmap_init();
tcm_init();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index afd7e05d95f1..1dd10936d68d 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -351,30 +351,43 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
- size_t size, unsigned int mtype, void *caller)
+void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return (void __iomem *)phys_addr;
}
-void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
- unsigned int mtype)
+void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+
+void __iomem *ioremap(resource_size_t res_cookie, size_t size)
{
- return (void __iomem *)phys_addr;
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
+ __builtin_return_address(0));
}
-EXPORT_SYMBOL(__arm_ioremap);
+EXPORT_SYMBOL(ioremap);
-void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
+void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
-void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
- unsigned int mtype, void *caller)
+void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
+{
+ return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iounmap(volatile void __iomem *addr)
{
- return __arm_ioremap(phys_addr, size, mtype);
}
+EXPORT_SYMBOL(__iounmap);
void (*arch_iounmap)(volatile void __iomem *);
-void __arm_iounmap(volatile void __iomem *addr)
+void iounmap(volatile void __iomem *addr)
{
}
-EXPORT_SYMBOL(__arm_iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index a3681f11dd9f..e683db1b90a3 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -84,6 +84,16 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
if (!new_pte)
goto no_pte;
+#ifndef CONFIG_ARM_LPAE
+ /*
+ * Modify the PTE pointer to have the correct domain. This
+ * needs to be the vectors domain to avoid the low vectors
+ * being unmapped.
+ */
+ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK;
+ pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS);
+#endif
+
init_pud = pud_offset(init_pgd, 0);
init_pmd = pmd_offset(init_pud, 0);
init_pte = pte_offset_map(init_pmd, 0);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0716bbe19872..de2b246fed38 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -274,7 +274,10 @@ __v7_ca15mp_setup:
__v7_b15mp_setup:
__v7_ca17mp_setup:
mov r10, #0
-1:
+1: adr r12, __v7_setup_stack @ the local stack
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
+ bl v7_invalidate_l1
+ ldmia r12, {r0-r5, lr}
#ifdef CONFIG_SMP
ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
@@ -283,7 +286,7 @@ __v7_ca17mp_setup:
orreq r0, r0, r10 @ Enable CPU-specific SMP bits
mcreq p15, 0, r0, c1, c0, 1
#endif
- b __v7_setup
+ b __v7_setup_cont
/*
* Errata:
@@ -413,10 +416,11 @@ __v7_pj4b_setup:
__v7_setup:
adr r12, __v7_setup_stack @ the local stack
- stmia r12, {r0-r5, r7, r9, r11, lr}
+ stmia r12, {r0-r5, lr} @ v7_invalidate_l1 touches r0-r6
bl v7_invalidate_l1
- ldmia r12, {r0-r5, r7, r9, r11, lr}
+ ldmia r12, {r0-r5, lr}
+__v7_setup_cont:
and r0, r9, #0xff000000 @ ARM?
teq r0, #0x41000000
bne __errata_finish
@@ -480,7 +484,7 @@ ENDPROC(__v7_setup)
.align 2
__v7_setup_stack:
- .space 4 * 11 @ 11 registers
+ .space 4 * 7 @ 12 registers
__INITDATA
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
VDSO_LDFLAGS += -nostdlib -shared
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
-VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd)
+VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
obj-$(CONFIG_VDSO) += vdso.o
extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm/vdso/vdsomunge.c b/arch/arm/vdso/vdsomunge.c
index 9005b07296c8..aedec81d1198 100644
--- a/arch/arm/vdso/vdsomunge.c
+++ b/arch/arm/vdso/vdsomunge.c
@@ -45,13 +45,11 @@
* it does.
*/
-#define _GNU_SOURCE
-
#include <byteswap.h>
#include <elf.h>
#include <errno.h>
-#include <error.h>
#include <fcntl.h>
+#include <stdarg.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
@@ -82,11 +80,25 @@
#define EF_ARM_ABI_FLOAT_HARD 0x400
#endif
+static int failed;
+static const char *argv0;
static const char *outfile;
+static void fail(const char *fmt, ...)
+{
+ va_list ap;
+
+ failed = 1;
+ fprintf(stderr, "%s: ", argv0);
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(EXIT_FAILURE);
+}
+
static void cleanup(void)
{
- if (error_message_count > 0 && outfile != NULL)
+ if (failed && outfile != NULL)
unlink(outfile);
}
@@ -119,68 +131,66 @@ int main(int argc, char **argv)
int infd;
atexit(cleanup);
+ argv0 = argv[0];
if (argc != 3)
- error(EXIT_FAILURE, 0, "Usage: %s [infile] [outfile]", argv[0]);
+ fail("Usage: %s [infile] [outfile]\n", argv[0]);
infile = argv[1];
outfile = argv[2];
infd = open(infile, O_RDONLY);
if (infd < 0)
- error(EXIT_FAILURE, errno, "Cannot open %s", infile);
+ fail("Cannot open %s: %s\n", infile, strerror(errno));
if (fstat(infd, &stat) != 0)
- error(EXIT_FAILURE, errno, "Failed stat for %s", infile);
+ fail("Failed stat for %s: %s\n", infile, strerror(errno));
inbuf = mmap(NULL, stat.st_size, PROT_READ, MAP_PRIVATE, infd, 0);
if (inbuf == MAP_FAILED)
- error(EXIT_FAILURE, errno, "Failed to map %s", infile);
+ fail("Failed to map %s: %s\n", infile, strerror(errno));
close(infd);
inhdr = inbuf;
if (memcmp(&inhdr->e_ident, ELFMAG, SELFMAG) != 0)
- error(EXIT_FAILURE, 0, "Not an ELF file");
+ fail("Not an ELF file\n");
if (inhdr->e_ident[EI_CLASS] != ELFCLASS32)
- error(EXIT_FAILURE, 0, "Unsupported ELF class");
+ fail("Unsupported ELF class\n");
swap = inhdr->e_ident[EI_DATA] != HOST_ORDER;
if (read_elf_half(inhdr->e_type, swap) != ET_DYN)
- error(EXIT_FAILURE, 0, "Not a shared object");
+ fail("Not a shared object\n");
- if (read_elf_half(inhdr->e_machine, swap) != EM_ARM) {
- error(EXIT_FAILURE, 0, "Unsupported architecture %#x",
- inhdr->e_machine);
- }
+ if (read_elf_half(inhdr->e_machine, swap) != EM_ARM)
+ fail("Unsupported architecture %#x\n", inhdr->e_machine);
e_flags = read_elf_word(inhdr->e_flags, swap);
if (EF_ARM_EABI_VERSION(e_flags) != EF_ARM_EABI_VER5) {
- error(EXIT_FAILURE, 0, "Unsupported EABI version %#x",
- EF_ARM_EABI_VERSION(e_flags));
+ fail("Unsupported EABI version %#x\n",
+ EF_ARM_EABI_VERSION(e_flags));
}
if (e_flags & EF_ARM_ABI_FLOAT_HARD)
- error(EXIT_FAILURE, 0,
- "Unexpected hard-float flag set in e_flags");
+ fail("Unexpected hard-float flag set in e_flags\n");
clear_soft_float = !!(e_flags & EF_ARM_ABI_FLOAT_SOFT);
outfd = open(outfile, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
if (outfd < 0)
- error(EXIT_FAILURE, errno, "Cannot open %s", outfile);
+ fail("Cannot open %s: %s\n", outfile, strerror(errno));
if (ftruncate(outfd, stat.st_size) != 0)
- error(EXIT_FAILURE, errno, "Cannot truncate %s", outfile);
+ fail("Cannot truncate %s: %s\n", outfile, strerror(errno));
outbuf = mmap(NULL, stat.st_size, PROT_READ | PROT_WRITE, MAP_SHARED,
outfd, 0);
if (outbuf == MAP_FAILED)
- error(EXIT_FAILURE, errno, "Failed to map %s", outfile);
+ fail("Failed to map %s: %s\n", outfile, strerror(errno));
close(outfd);
@@ -195,7 +205,7 @@ int main(int argc, char **argv)
}
if (msync(outbuf, stat.st_size, MS_SYNC) != 0)
- error(EXIT_FAILURE, errno, "Failed to sync %s", outfile);
+ fail("Failed to sync %s: %s\n", outfile, strerror(errno));
return EXIT_SUCCESS;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 0f6edb14b7e4..318175f62c24 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -23,9 +23,9 @@ config ARM64
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
- select EDAC_SUPPORT
select CPU_PM if (SUSPEND || CPU_IDLE)
select DCACHE_WORD_ACCESS
+ select EDAC_SUPPORT
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/arch/arm64/boot/dts/apm/apm-mustang.dts b/arch/arm64/boot/dts/apm/apm-mustang.dts
index 83578e766b94..4c55833d8a41 100644
--- a/arch/arm64/boot/dts/apm/apm-mustang.dts
+++ b/arch/arm64/boot/dts/apm/apm-mustang.dts
@@ -23,6 +23,16 @@
device_type = "memory";
reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
};
+
+ gpio-keys {
+ compatible = "gpio-keys";
+ button@1 {
+ label = "POWER";
+ linux,code = <116>;
+ linux,input-type = <0x1>;
+ interrupts = <0x0 0x2d 0x1>;
+ };
+ };
};
&pcie0clk {
diff --git a/arch/arm64/boot/dts/arm/Makefile b/arch/arm64/boot/dts/arm/Makefile
index c5c98b91514e..bb3c07209676 100644
--- a/arch/arm64/boot/dts/arm/Makefile
+++ b/arch/arm64/boot/dts/arm/Makefile
@@ -1,6 +1,7 @@
dtb-$(CONFIG_ARCH_VEXPRESS) += foundation-v8.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += juno.dtb juno-r1.dtb
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb
+dtb-$(CONFIG_ARCH_VEXPRESS) += vexpress-v2f-1xv7-ca53x2.dtb
always := $(dtb-y)
subdir-y := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
new file mode 100644
index 000000000000..5b1d0181023b
--- /dev/null
+++ b/arch/arm64/boot/dts/arm/vexpress-v2f-1xv7-ca53x2.dts
@@ -0,0 +1,191 @@
+/*
+ * ARM Ltd. Versatile Express
+ *
+ * LogicTile Express 20MG
+ * V2F-1XV7
+ *
+ * Cortex-A53 (2 cores) Soft Macrocell Model
+ *
+ * HBI-0247C
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ model = "V2F-1XV7 Cortex-A53x2 SMM";
+ arm,hbi = <0x247>;
+ arm,vexpress,site = <0xf>;
+ compatible = "arm,vexpress,v2f-1xv7,ca53x2", "arm,vexpress,v2f-1xv7", "arm,vexpress";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen {
+ stdout-path = "serial0:38400n8";
+ };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ i2c0 = &v2m_i2c_dvi;
+ i2c1 = &v2m_i2c_pcie;
+ };
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 0>;
+ next-level-cache = <&L2_0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0 1>;
+ next-level-cache = <&L2_0>;
+ };
+
+ L2_0: l2-cache0 {
+ compatible = "cache";
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x80000000>; /* 2GB @ 2GB */
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x2000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ smbclk: osc@4 {
+ /* SMC clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 4>;
+ freq-range = <40000000 40000000>;
+ #clock-cells = <0>;
+ clock-output-names = "smclk";
+ };
+
+ volt@0 {
+ /* VIO to expansion board above */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "VIO_UP";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ };
+
+ volt@1 {
+ /* 12V from power connector J6 */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 1>;
+ regulator-name = "12";
+ regulator-always-on;
+ };
+
+ temp@0 {
+ /* FPGA temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "FPGA";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 1 &gic GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 2 &gic GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 3 &gic GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 4 &gic GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 5 &gic GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 6 &gic GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 7 &gic GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 8 &gic GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 9 &gic GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 10 &gic GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 11 &gic GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 12 &gic GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 13 &gic GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 14 &gic GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 15 &gic GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 16 &gic GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 17 &gic GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 18 &gic GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 19 &gic GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 20 &gic GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 21 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 22 &gic GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 23 &gic GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 24 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 25 &gic GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 26 &gic GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 27 &gic GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 28 &gic GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 29 &gic GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 30 &gic GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 31 &gic GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 32 &gic GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 33 &gic GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 34 &gic GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 35 &gic GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 36 &gic GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 37 &gic GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 38 &gic GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 39 &gic GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 40 &gic GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 41 &gic GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 42 &gic GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
+
+ /include/ "../../../../arm/boot/dts/vexpress-v2m-rs1.dtsi"
+ };
+};
diff --git a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
index d8c0bdc51882..9cb7cf94284a 100644
--- a/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
+++ b/arch/arm64/boot/dts/cavium/thunder-88xx.dtsi
@@ -376,10 +376,19 @@
gic0: interrupt-controller@8010,00000000 {
compatible = "arm,gic-v3";
#interrupt-cells = <3>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
interrupt-controller;
reg = <0x8010 0x00000000 0x0 0x010000>, /* GICD */
<0x8010 0x80000000 0x0 0x600000>; /* GICR */
interrupts = <1 9 0xf04>;
+
+ its: gic-its@8010,00020000 {
+ compatible = "arm,gic-v3-its";
+ msi-controller;
+ reg = <0x8010 0x20000 0x0 0x200000>;
+ };
};
uaa0: serial@87e0,24000000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index f38c94f1d898..4e17e7ede33d 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -83,6 +83,7 @@ CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_CEVA=y
CONFIG_AHCI_XGENE=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 39248d3adf5d..406485ed110a 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -19,6 +19,14 @@
#include <asm/psci.h>
#include <asm/smp_plat.h>
+/* Macros for consistency checks of the GICC subtable of MADT */
+#define ACPI_MADT_GICC_LENGTH \
+ (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
+
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
+ (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
/* ACPI table mapping after acpi_gbl_permanent_mmap is set */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a7691a378668..f860bfda454a 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -352,8 +352,8 @@ el1_inv:
// TODO: add support for undefined instructions in kernel mode
enable_dbg
mov x0, sp
+ mov x2, x1
mov x1, #BAD_SYNC
- mrs x2, esr_el1
b bad_mode
ENDPROC(el1_sync)
@@ -553,7 +553,7 @@ el0_inv:
ct_user_exit
mov x0, sp
mov x1, #BAD_SYNC
- mrs x2, esr_el1
+ mov x2, x25
bl bad_mode
b ret_to_user
ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S
index bd9bfaa9269b..f332d5d1f6b4 100644
--- a/arch/arm64/kernel/entry32.S
+++ b/arch/arm64/kernel/entry32.S
@@ -32,13 +32,11 @@
ENTRY(compat_sys_sigreturn_wrapper)
mov x0, sp
- mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_sigreturn
ENDPROC(compat_sys_sigreturn_wrapper)
ENTRY(compat_sys_rt_sigreturn_wrapper)
mov x0, sp
- mov x27, #0 // prevent syscall restart handling (why)
b compat_sys_rt_sigreturn
ENDPROC(compat_sys_rt_sigreturn_wrapper)
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 695801a54ca5..50fb4696654e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -438,7 +438,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
struct acpi_madt_generic_interrupt *processor;
processor = (struct acpi_madt_generic_interrupt *)header;
- if (BAD_MADT_ENTRY(processor, end))
+ if (BAD_MADT_GICC_ENTRY(processor, end))
return -EINVAL;
acpi_table_print_madt_entry(header);
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 9d84feb41a16..773d37a14039 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -4,5 +4,3 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
context.o proc.o pageattr.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_ARM64_PTDUMP) += dump.o
-
-CFLAGS_mmu.o := -I$(srctree)/scripts/dtc/libfdt/
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 4dda9bd6b8fb..e989cee77414 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -1464,7 +1464,7 @@ static inline void handle_rx_packet(struct sync_port *port)
if (port->write_ts_idx == NBR_IN_DESCR)
port->write_ts_idx = 0;
idx = port->write_ts_idx++;
- do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+ ktime_get_ts(&port->timestamp[idx]);
port->in_buffer_len += port->inbufchunk;
}
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2a14585c90d2..aab7e46cadd5 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2231,7 +2231,7 @@ config MIPS_CMP
config MIPS_CPS
bool "MIPS Coherent Processing System support"
- depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
+ depends on SYS_SUPPORTS_MIPS_CPS
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index 37c08a27b4f0..c9f7e231e66b 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- * Insititute of Computing Technology
+ * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 2b25d1ba1ea0..16f1ea9ab191 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,6 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
+extern cpumask_t cpu_foreign_map;
#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index c0c5e5972256..d8f9b357b222 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -600,7 +600,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case blezl_op: /* not really i_format */
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case blez_op:
/*
@@ -635,7 +635,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
goto sigill_r6;
case bgtz_op:
/*
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index 55b759a0019e..1b6ca634e646 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -60,7 +60,7 @@ LEAF(mips_cps_core_entry)
nop
/* This is an NMI */
- la k0, nmi_handler
+ PTR_LA k0, nmi_handler
jr k0
nop
@@ -107,10 +107,10 @@ not_nmi:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- add a1, a0, t1
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
1: cache Index_Store_Tag_I, 0(a0)
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
bne a0, a1, 1b
nop
icache_done:
@@ -134,12 +134,12 @@ icache_done:
mul t1, t1, t0
mul t1, t1, t2
- li a0, KSEG0
- addu a1, a0, t1
- subu a1, a1, t0
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
1: cache Index_Store_Tag_D, 0(a0)
bne a0, a1, 1b
- add a0, a0, t0
+ PTR_ADD a0, a0, t0
dcache_done:
/* Set Kseg0 CCA to that in s0 */
@@ -152,11 +152,11 @@ dcache_done:
/* Enter the coherent domain */
li t0, 0xff
- sw t0, GCR_CL_COHERENCE_OFS(v1)
+ PTR_S t0, GCR_CL_COHERENCE_OFS(v1)
ehb
/* Jump to kseg0 */
- la t0, 1f
+ PTR_LA t0, 1f
jr t0
nop
@@ -178,9 +178,9 @@ dcache_done:
nop
/* Off we go! */
- lw t1, VPEBOOTCFG_PC(v0)
- lw gp, VPEBOOTCFG_GP(v0)
- lw sp, VPEBOOTCFG_SP(v0)
+ PTR_L t1, VPEBOOTCFG_PC(v0)
+ PTR_L gp, VPEBOOTCFG_GP(v0)
+ PTR_L sp, VPEBOOTCFG_SP(v0)
jr t1
nop
END(mips_cps_core_entry)
@@ -217,7 +217,7 @@ LEAF(excep_intex)
.org 0x480
LEAF(excep_ejtag)
- la k0, ejtag_debug_handler
+ PTR_LA k0, ejtag_debug_handler
jr k0
nop
END(excep_ejtag)
@@ -229,7 +229,7 @@ LEAF(mips_cps_core_init)
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
/* Only allow 1 TC per VPE to execute... */
@@ -237,7 +237,7 @@ LEAF(mips_cps_core_init)
/* ...and for the moment only 1 VPE */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
@@ -250,25 +250,25 @@ LEAF(mips_cps_core_init)
mfc0 t0, CP0_MVPCONF0
srl t0, t0, MVPCONF0_PVPE_SHIFT
andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
- addiu t7, t0, 1
+ addiu ta3, t0, 1
/* If there's only 1, we're done */
beqz t0, 2f
nop
/* Loop through each VPE within this core */
- li t5, 1
+ li ta1, 1
1: /* Operate on the appropriate TC */
- mtc0 t5, CP0_VPECONTROL
+ mtc0 ta1, CP0_VPECONTROL
ehb
/* Bind TC to VPE (1:1 TC:VPE mapping) */
- mttc0 t5, CP0_TCBIND
+ mttc0 ta1, CP0_TCBIND
/* Set exclusive TC, non-active, master */
li t0, VPECONF0_MVP
- sll t1, t5, VPECONF0_XTC_SHIFT
+ sll t1, ta1, VPECONF0_XTC_SHIFT
or t0, t0, t1
mttc0 t0, CP0_VPECONF0
@@ -280,8 +280,8 @@ LEAF(mips_cps_core_init)
mttc0 t0, CP0_TCHALT
/* Next VPE */
- addiu t5, t5, 1
- slt t0, t5, t7
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
bnez t0, 1b
nop
@@ -298,19 +298,19 @@ LEAF(mips_cps_core_init)
LEAF(mips_cps_boot_vpes)
/* Retrieve CM base address */
- la t0, mips_cm_base
- lw t0, 0(t0)
+ PTR_LA t0, mips_cm_base
+ PTR_L t0, 0(t0)
/* Calculate a pointer to this cores struct core_boot_config */
- lw t0, GCR_CL_ID_OFS(t0)
+ PTR_L t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
- la t1, mips_cps_core_bootcfg
- lw t1, 0(t1)
- addu t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU t0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
- has_mt t6, 1f
+ has_mt ta2, 1f
li t9, 0
/* Find the number of VPEs present in the core */
@@ -334,24 +334,24 @@ LEAF(mips_cps_boot_vpes)
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
mul v0, t9, t1
- lw t7, COREBOOTCFG_VPECONFIG(t0)
- addu v0, v0, t7
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
+ PTR_ADDU v0, v0, ta3
#ifdef CONFIG_MIPS_MT
/* If the core doesn't support MT then return */
- bnez t6, 1f
+ bnez ta2, 1f
nop
jr ra
nop
.set push
- .set mips32r2
+ .set mips64r2
.set mt
1: /* Enter VPE configuration state */
dvpe
- la t1, 1f
+ PTR_LA t1, 1f
jr.hb t1
nop
1: mfc0 t1, CP0_MVPCONTROL
@@ -360,12 +360,12 @@ LEAF(mips_cps_boot_vpes)
ehb
/* Loop through each VPE */
- lw t6, COREBOOTCFG_VPEMASK(t0)
- move t8, t6
- li t5, 0
+ PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
+ move t8, ta2
+ li ta1, 0
/* Check whether the VPE should be running. If not, skip it */
-1: andi t0, t6, 1
+1: andi t0, ta2, 1
beqz t0, 2f
nop
@@ -373,7 +373,7 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_VPECONTROL
ori t0, t0, VPECONTROL_TARGTC
xori t0, t0, VPECONTROL_TARGTC
- or t0, t0, t5
+ or t0, t0, ta1
mtc0 t0, CP0_VPECONTROL
ehb
@@ -384,8 +384,8 @@ LEAF(mips_cps_boot_vpes)
/* Calculate a pointer to the VPEs struct vpe_boot_config */
li t0, VPEBOOTCFG_SIZE
- mul t0, t0, t5
- addu t0, t0, t7
+ mul t0, t0, ta1
+ addu t0, t0, ta3
/* Set the TC restart PC */
lw t1, VPEBOOTCFG_PC(t0)
@@ -423,9 +423,9 @@ LEAF(mips_cps_boot_vpes)
mttc0 t0, CP0_VPECONF0
/* Next VPE */
-2: srl t6, t6, 1
- addiu t5, t5, 1
- bnez t6, 1b
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
nop
/* Leave VPE configuration state */
@@ -445,7 +445,7 @@ LEAF(mips_cps_boot_vpes)
/* This VPE should be offline, halt the TC */
li t0, TCHALT_H
mtc0 t0, CP0_TCHALT
- la t0, 1f
+ PTR_LA t0, 1f
1: jr.hb t0
nop
@@ -466,10 +466,10 @@ LEAF(mips_cps_boot_vpes)
.set noat
lw $1, TI_CPU(gp)
sll $1, $1, LONGLOG
- la \dest, __per_cpu_offset
+ PTR_LA \dest, __per_cpu_offset
addu $1, $1, \dest
lw $1, 0($1)
- la \dest, cps_cpu_state
+ PTR_LA \dest, cps_cpu_state
addu \dest, \dest, $1
.set pop
.endm
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 6e8de80bb446..4cc13508d967 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -73,10 +73,11 @@ NESTED(handle_sys, PT_SIZE, sp)
.set noreorder
.set nomacro
-1: user_lw(t5, 16(t0)) # argument #5 from usp
-4: user_lw(t6, 20(t0)) # argument #6 from usp
-3: user_lw(t7, 24(t0)) # argument #7 from usp
-2: user_lw(t8, 28(t0)) # argument #8 from usp
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
sw t5, 16(sp) # argument #5 to ksp
sw t6, 20(sp) # argument #6 to ksp
@@ -85,10 +86,10 @@ NESTED(handle_sys, PT_SIZE, sp)
.set pop
.section __ex_table,"a"
- PTR 1b,bad_stack
- PTR 2b,bad_stack
- PTR 3b,bad_stack
- PTR 4b,bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -153,8 +154,8 @@ syscall_trace_entry:
/* ------------------------------------------------------------------------ */
/*
- * The stackpointer for a call with more than 4 arguments is bad.
- * We probably should handle this case a bit more drastic.
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
*/
bad_stack:
li v0, EFAULT
@@ -163,6 +164,22 @@ bad_stack:
sw t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
/*
* The system call does not exist in this kernel
*/
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index d07b210fbeff..f543ff4feef9 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -69,16 +69,17 @@ NESTED(handle_sys, PT_SIZE, sp)
daddu t1, t0, 32
bltz t1, bad_stack
-1: lw a4, 16(t0) # argument #5 from usp
-2: lw a5, 20(t0) # argument #6 from usp
-3: lw a6, 24(t0) # argument #7 from usp
-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
.section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- PTR 3b, bad_stack
- PTR 4b, bad_stack
+ PTR load_a4, bad_stack_a4
+ PTR load_a5, bad_stack_a5
+ PTR load_a6, bad_stack_a6
+ PTR load_a7, bad_stack_a7
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -167,6 +168,22 @@ bad_stack:
sd t0, PT_R7(sp)
j o32_syscall_exit
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
not_o32_scall:
/*
* This is not an o32 compatibility syscall, pass it on
@@ -383,7 +400,7 @@ EXPORT(sys32_call_table)
PTR sys_connect /* 4170 */
PTR sys_getpeername
PTR sys_getsockname
- PTR sys_getsockopt
+ PTR compat_sys_getsockopt
PTR sys_listen
PTR compat_sys_recv /* 4175 */
PTR compat_sys_recvfrom
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index be73c491182b..008b3378653a 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -337,6 +337,11 @@ static void __init bootmem_init(void)
min_low_pfn = start;
if (end <= reserved_end)
continue;
+#ifdef CONFIG_BLK_DEV_INITRD
+ /* mapstart should be after initrd_end */
+ if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
+ continue;
+#endif
if (start >= mapstart)
continue;
mapstart = max(reserved_end, start);
@@ -366,14 +371,6 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * mapstart should be after initrd_end
- */
- if (initrd_end)
- mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
-#endif
-
/*
* Initialize the boot-time allocator with low memory only.
*/
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 4251d390b5b6..c88937745b4e 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -133,7 +133,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus)
/*
* Patch the start of mips_cps_core_entry to provide:
*
- * v0 = CM base address
+ * v1 = CM base address
* s0 = kseg0 CCA
*/
entry_code = (u32 *)&mips_cps_core_entry;
@@ -369,7 +369,7 @@ void play_dead(void)
static void wait_for_sibling_halt(void *ptr_cpu)
{
- unsigned cpu = (unsigned)ptr_cpu;
+ unsigned cpu = (unsigned long)ptr_cpu;
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned halted;
unsigned long flags;
@@ -430,7 +430,7 @@ static void cps_cpu_die(unsigned int cpu)
*/
err = smp_call_function_single(cpu_death_sibling,
wait_for_sibling_halt,
- (void *)cpu, 1);
+ (void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index faa46ebd9dda..d0744cc77ea7 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -63,6 +63,13 @@ EXPORT_SYMBOL(cpu_sibling_map);
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
@@ -103,6 +110,29 @@ static inline void set_cpu_core_map(int cpu)
}
}
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+static inline void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpu_data[i].package == cpu_data[k].package &&
+ cpu_data[i].core == cpu_data[k].core)
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+}
+
struct plat_smp_ops *mp_ops;
EXPORT_SYMBOL(mp_ops);
@@ -146,6 +176,8 @@ asmlinkage void start_secondary(void)
set_cpu_sibling_map(cpu);
set_cpu_core_map(cpu);
+ calculate_cpu_foreign_map();
+
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
@@ -173,9 +205,18 @@ void __irq_entry smp_call_function_interrupt(void)
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU:
+ * Remove this CPU. Be a bit slow here and
+ * set the bits for every online CPU so we don't miss
+ * any IPI whilst taking this VPE down.
*/
+
+ cpumask_copy(&cpu_foreign_map, cpu_online_mask);
+
+ /* Make it visible to every other CPU */
+ smp_mb();
+
set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
local_irq_disable();
while (1);
}
@@ -197,6 +238,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
#ifndef CONFIG_HOTPLUG_CPU
init_cpu_present(cpu_possible_mask);
#endif
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 2a7b38ed23f0..e207a43b5f8f 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2130,10 +2130,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
- /* Boot CPU's cache setup in setup_arch(). */
- if (!is_boot_cpu)
- cpu_cache_init();
- tlb_init();
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
TLBMISS_HANDLER_SETUP();
}
diff --git a/arch/mips/loongson64/common/bonito-irq.c b/arch/mips/loongson64/common/bonito-irq.c
index cc0e4fd548e6..4e116d23bab3 100644
--- a/arch/mips/loongson64/common/bonito-irq.c
+++ b/arch/mips/loongson64/common/bonito-irq.c
@@ -3,7 +3,7 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
* Copyright (C) 2000, 2001 Ralf Baechle (ralf@gnu.org)
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/cmdline.c b/arch/mips/loongson64/common/cmdline.c
index 72fed003a536..01fbed137028 100644
--- a/arch/mips/loongson64/common/cmdline.c
+++ b/arch/mips/loongson64/common/cmdline.c
@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn>
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
index 12c75db23420..875037063a80 100644
--- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c
@@ -1,7 +1,7 @@
/*
* CS5536 General timer functions
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index 22f04ca2ff3e..f6c44dd332e2 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -6,7 +6,7 @@
* Copyright 2003 ICT CAS
* Author: Michael Guo <guoyi@ict.ac.cn>
*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* Copyright (C) 2009 Lemote Inc.
diff --git a/arch/mips/loongson64/common/irq.c b/arch/mips/loongson64/common/irq.c
index 687003b19b45..d36d969a4a87 100644
--- a/arch/mips/loongson64/common/irq.c
+++ b/arch/mips/loongson64/common/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/common/setup.c b/arch/mips/loongson64/common/setup.c
index d477dd6bb326..2dc5122f0e09 100644
--- a/arch/mips/loongson64/common/setup.c
+++ b/arch/mips/loongson64/common/setup.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/fuloong-2e/irq.c b/arch/mips/loongson64/fuloong-2e/irq.c
index ef5ec8f3de5f..892963f860b7 100644
--- a/arch/mips/loongson64/fuloong-2e/irq.c
+++ b/arch/mips/loongson64/fuloong-2e/irq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2007 Lemote Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/arch/mips/loongson64/lemote-2f/clock.c b/arch/mips/loongson64/lemote-2f/clock.c
index 462e34d46b4a..a78fb657068c 100644
--- a/arch/mips/loongson64/lemote-2f/clock.c
+++ b/arch/mips/loongson64/lemote-2f/clock.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 - 2008 Lemote Inc. & Insititute of Computing Technology
+ * Copyright (C) 2006 - 2008 Lemote Inc. & Institute of Computing Technology
* Author: Yanhua, yanh@lemote.com
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -15,7 +15,7 @@
#include <linux/spinlock.h>
#include <asm/clock.h>
-#include <asm/mach-loongson/loongson.h>
+#include <asm/mach-loongson64/loongson.h>
static LIST_HEAD(clock_list);
static DEFINE_SPINLOCK(clock_lock);
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 12d14ed48778..6f9e010cec4d 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2010 Loongson Inc. & Lemote Inc. &
- * Insititute of Computing Technology
+ * Institute of Computing Technology
* Author: Xiang Gao, gaoxiang@ict.ac.cn
* Huacai Chen, chenhc@lemote.com
* Xiaofu Meng, Shuangshuang Zhang
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 22b9b2cb9219..712f17a2ecf2 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -451,7 +451,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
/* Fall through */
case jr_op:
/* For R6, JR already emulated in jalr_op */
- if (NO_R6EMU && insn.r_format.opcode == jr_op)
+ if (NO_R6EMU && insn.r_format.func == jr_op)
break;
*contpc = regs->regs[insn.r_format.rs];
return 1;
@@ -551,7 +551,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
case blezl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
break;
case blez_op:
@@ -588,7 +588,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
case bgtzl_op:
- if (NO_R6EMU)
+ if (!insn.i_format.rt && NO_R6EMU)
break;
case bgtz_op:
/*
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 7f660dc67596..fbea4432f3f2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -37,6 +37,7 @@
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
#include <asm/dma-coherence.h>
+#include <asm/mips-cm.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -51,9 +52,16 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
preempt_disable();
-#ifndef CONFIG_MIPS_MT_SMP
- smp_call_function(func, info, 1);
-#endif
+ /*
+ * The Coherent Manager propagates address-based cache ops to other
+ * cores but not index-based ops. However, r4k_on_each_cpu is used
+ * in both cases so there is no easy way to tell what kind of op is
+ * executed to the other cores. The best we can probably do is
+ * to restrict that call when a CM is not present because both
+ * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
+ */
+ if (!mips_cm_present())
+ smp_call_function_many(&cpu_foreign_map, func, info, 1);
func(info);
preempt_enable();
}
@@ -937,7 +945,9 @@ static void b5k_instruction_hazard(void)
}
static char *way_string[] = { NULL, "direct mapped", "2-way",
- "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+ "9-way", "10-way", "11-way", "12-way",
+ "13-way", "14-way", "15-way", "16-way",
};
static void probe_pcache(void)
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 185e68261f45..5625b190edc0 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -119,18 +119,24 @@ void read_persistent_clock(struct timespec *ts)
int get_c0_fdc_int(void)
{
- int mips_cpu_fdc_irq;
+ /*
+ * Some cores claim the FDC is routable through the GIC, but it doesn't
+ * actually seem to be connected for those Malta bitstreams.
+ */
+ switch (current_cpu_type()) {
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ return -1;
+ };
if (cpu_has_veic)
- mips_cpu_fdc_irq = -1;
+ return -1;
else if (gic_present)
- mips_cpu_fdc_irq = gic_get_c0_fdc_int();
+ return gic_get_c0_fdc_int();
else if (cp0_fdc_irq >= 0)
- mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
+ return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
else
- mips_cpu_fdc_irq = -1;
-
- return mips_cpu_fdc_irq;
+ return -1;
}
int get_c0_perfcount_int(void)
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index d2dc836523a3..8bd8ebb20a72 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -63,13 +63,19 @@ void __init plat_mem_setup(void)
plat_setup_iocoherency();
}
-#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
+#define DEFAULT_CDMM_BASE_ADDR 0x1bdd0000
phys_addr_t mips_cpc_default_phys_base(void)
{
return DEFAULT_CPC_BASE_ADDR;
}
+phys_addr_t mips_cdmm_phys_base(void)
+{
+ return DEFAULT_CDMM_BASE_ADDR;
+}
+
static void __init mips_nmi_setup(void)
{
void *base;
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 67889fcea8aa..7c73fcb92a10 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -27,6 +27,11 @@ int get_c0_perfcount_int(void)
return gic_get_c0_perfcount_int();
}
+int get_c0_fdc_int(void)
+{
+ return gic_get_c0_fdc_int();
+}
+
void __init plat_time_init(void)
{
struct device_node *np;
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index 0a183756d6ec..f93c4a4e6580 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -16,7 +16,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
-extern spinlock_t pa_dbit_lock;
+extern spinlock_t pa_tlb_lock;
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
@@ -33,6 +33,19 @@ extern spinlock_t pa_dbit_lock;
*/
#define kern_addr_valid(addr) (1)
+/* Purge data and instruction TLB entries. Must be called holding
+ * the pa_tlb_lock. The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+
+static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
+{
+ mtsp(mm->context, 1);
+ pdtlb(addr);
+ if (unlikely(split_tlb))
+ pitlb(addr);
+}
+
/* Certain architectures need to do special things when PTEs
* within a page table are directly modified. Thus, the following
* hook is made available.
@@ -42,15 +55,20 @@ extern spinlock_t pa_dbit_lock;
*(pteptr) = (pteval); \
} while(0)
-extern void purge_tlb_entries(struct mm_struct *, unsigned long);
+#define pte_inserted(x) \
+ ((pte_val(x) & (_PAGE_PRESENT|_PAGE_ACCESSED)) \
+ == (_PAGE_PRESENT|_PAGE_ACCESSED))
-#define set_pte_at(mm, addr, ptep, pteval) \
- do { \
+#define set_pte_at(mm, addr, ptep, pteval) \
+ do { \
+ pte_t old_pte; \
unsigned long flags; \
- spin_lock_irqsave(&pa_dbit_lock, flags); \
- set_pte(ptep, pteval); \
- purge_tlb_entries(mm, addr); \
- spin_unlock_irqrestore(&pa_dbit_lock, flags); \
+ spin_lock_irqsave(&pa_tlb_lock, flags); \
+ old_pte = *ptep; \
+ set_pte(ptep, pteval); \
+ if (pte_inserted(old_pte)) \
+ purge_tlb_entries(mm, addr); \
+ spin_unlock_irqrestore(&pa_tlb_lock, flags); \
} while (0)
#endif /* !__ASSEMBLY__ */
@@ -268,7 +286,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0)
+#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -435,15 +453,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(*ptep))
return 0;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
pte = *ptep;
if (!pte_young(pte)) {
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 0;
}
set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return 1;
}
@@ -453,11 +471,12 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
unsigned long flags;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
old_pte = *ptep;
- pte_clear(mm,addr,ptep);
- purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ set_pte(ptep, __pte(0));
+ if (pte_inserted(old_pte))
+ purge_tlb_entries(mm, addr);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
return old_pte;
}
@@ -465,10 +484,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
unsigned long flags;
- spin_lock_irqsave(&pa_dbit_lock, flags);
+ spin_lock_irqsave(&pa_tlb_lock, flags);
set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr);
- spin_unlock_irqrestore(&pa_dbit_lock, flags);
+ spin_unlock_irqrestore(&pa_tlb_lock, flags);
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h
index 9d086a599fa0..e84b96478193 100644
--- a/arch/parisc/include/asm/tlbflush.h
+++ b/arch/parisc/include/asm/tlbflush.h
@@ -13,6 +13,9 @@
* active at any one time on the Merced bus. This tlb purge
* synchronisation is fairly lightweight and harmless so we activate
* it on all systems not just the N class.
+
+ * It is also used to ensure PTE updates are atomic and consistent
+ * with the TLB.
*/
extern spinlock_t pa_tlb_lock;
@@ -24,20 +27,24 @@ extern void flush_tlb_all_local(void *);
#define smp_flush_tlb_all() flush_tlb_all()
+int __flush_tlb_range(unsigned long sid,
+ unsigned long start, unsigned long end);
+
+#define flush_tlb_range(vma, start, end) \
+ __flush_tlb_range((vma)->vm_mm->context, start, end)
+
+#define flush_tlb_kernel_range(start, end) \
+ __flush_tlb_range(0, start, end)
+
/*
* flush_tlb_mm()
*
- * XXX This code is NOT valid for HP-UX compatibility processes,
- * (although it will probably work 99% of the time). HP-UX
- * processes are free to play with the space id's and save them
- * over long periods of time, etc. so we have to preserve the
- * space and just flush the entire tlb. We need to check the
- * personality in order to do that, but the personality is not
- * currently being set correctly.
- *
- * Of course, Linux processes could do the same thing, but
- * we don't support that (and the compilers, dynamic linker,
- * etc. do not do that).
+ * The code to switch to a new context is NOT valid for processes
+ * which play with the space id's. Thus, we have to preserve the
+ * space and just flush the entire tlb. However, the compilers,
+ * dynamic linker, etc, do not manipulate space id's, so there
+ * could be a significant performance benefit in switching contexts
+ * and not flushing the whole tlb.
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
@@ -45,10 +52,18 @@ static inline void flush_tlb_mm(struct mm_struct *mm)
BUG_ON(mm == &init_mm); /* Should never happen */
#if 1 || defined(CONFIG_SMP)
+ /* Except for very small threads, flushing the whole TLB is
+ * faster than using __flush_tlb_range. The pdtlb and pitlb
+ * instructions are very slow because of the TLB broadcast.
+ * It might be faster to do local range flushes on all CPUs
+ * on PA 2.0 systems.
+ */
flush_tlb_all();
#else
/* FIXME: currently broken, causing space id and protection ids
- * to go out of sync, resulting in faults on userspace accesses.
+ * to go out of sync, resulting in faults on userspace accesses.
+ * This approach needs further investigation since running many
+ * small applications (e.g., GCC testsuite) is faster on HP-UX.
*/
if (mm) {
if (mm->context != 0)
@@ -65,22 +80,12 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
{
unsigned long flags, sid;
- /* For one page, it's not worth testing the split_tlb variable */
-
- mb();
sid = vma->vm_mm->context;
purge_tlb_start(flags);
mtsp(sid, 1);
pdtlb(addr);
- pitlb(addr);
+ if (unlikely(split_tlb))
+ pitlb(addr);
purge_tlb_end(flags);
}
-
-void __flush_tlb_range(unsigned long sid,
- unsigned long start, unsigned long end);
-
-#define flush_tlb_range(vma,start,end) __flush_tlb_range((vma)->vm_mm->context,start,end)
-
-#define flush_tlb_kernel_range(start, end) __flush_tlb_range(0,start,end)
-
#endif
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index f6448c7c62b5..cda6dbbe9842 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -342,12 +342,15 @@ EXPORT_SYMBOL(flush_data_cache_local);
EXPORT_SYMBOL(flush_kernel_icache_range_asm);
#define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
-int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+static unsigned long parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
+
+#define FLUSH_TLB_THRESHOLD (2*1024*1024) /* 2MB initial TLB threshold */
+static unsigned long parisc_tlb_flush_threshold __read_mostly = FLUSH_TLB_THRESHOLD;
void __init parisc_setup_cache_timing(void)
{
unsigned long rangetime, alltime;
- unsigned long size;
+ unsigned long size, start;
alltime = mfctl(16);
flush_data_cache();
@@ -364,14 +367,43 @@ void __init parisc_setup_cache_timing(void)
/* Racy, but if we see an intermediate value, it's ok too... */
parisc_cache_flush_threshold = size * alltime / rangetime;
- parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
+ parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold);
if (!parisc_cache_flush_threshold)
parisc_cache_flush_threshold = FLUSH_THRESHOLD;
if (parisc_cache_flush_threshold > cache_info.dc_size)
parisc_cache_flush_threshold = cache_info.dc_size;
- printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
+ printk(KERN_INFO "Setting cache flush threshold to %lu kB\n",
+ parisc_cache_flush_threshold/1024);
+
+ /* calculate TLB flush threshold */
+
+ alltime = mfctl(16);
+ flush_tlb_all();
+ alltime = mfctl(16) - alltime;
+
+ size = PAGE_SIZE;
+ start = (unsigned long) _text;
+ rangetime = mfctl(16);
+ while (start < (unsigned long) _end) {
+ flush_tlb_kernel_range(start, start + PAGE_SIZE);
+ start += PAGE_SIZE;
+ size += PAGE_SIZE;
+ }
+ rangetime = mfctl(16) - rangetime;
+
+ printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n",
+ alltime, size, rangetime);
+
+ parisc_tlb_flush_threshold = size * alltime / rangetime;
+ parisc_tlb_flush_threshold *= num_online_cpus();
+ parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold);
+ if (!parisc_tlb_flush_threshold)
+ parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
+
+ printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n",
+ parisc_tlb_flush_threshold/1024);
}
extern void purge_kernel_dcache_page_asm(unsigned long);
@@ -403,48 +435,45 @@ void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
}
EXPORT_SYMBOL(copy_user_page);
-void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
-{
- unsigned long flags;
-
- /* Note: purge_tlb_entries can be called at startup with
- no context. */
-
- purge_tlb_start(flags);
- mtsp(mm->context, 1);
- pdtlb(addr);
- pitlb(addr);
- purge_tlb_end(flags);
-}
-EXPORT_SYMBOL(purge_tlb_entries);
-
-void __flush_tlb_range(unsigned long sid, unsigned long start,
- unsigned long end)
+/* __flush_tlb_range()
+ *
+ * returns 1 if all TLBs were flushed.
+ */
+int __flush_tlb_range(unsigned long sid, unsigned long start,
+ unsigned long end)
{
- unsigned long npages;
+ unsigned long flags, size;
- npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
+ size = (end - start);
+ if (size >= parisc_tlb_flush_threshold) {
flush_tlb_all();
- else {
- unsigned long flags;
+ return 1;
+ }
+ /* Purge TLB entries for small ranges using the pdtlb and
+ pitlb instructions. These instructions execute locally
+ but cause a purge request to be broadcast to other TLBs. */
+ if (likely(!split_tlb)) {
+ while (start < end) {
+ purge_tlb_start(flags);
+ mtsp(sid, 1);
+ pdtlb(start);
+ purge_tlb_end(flags);
+ start += PAGE_SIZE;
+ }
+ return 0;
+ }
+
+ /* split TLB case */
+ while (start < end) {
purge_tlb_start(flags);
mtsp(sid, 1);
- if (split_tlb) {
- while (npages--) {
- pdtlb(start);
- pitlb(start);
- start += PAGE_SIZE;
- }
- } else {
- while (npages--) {
- pdtlb(start);
- start += PAGE_SIZE;
- }
- }
+ pdtlb(start);
+ pitlb(start);
purge_tlb_end(flags);
+ start += PAGE_SIZE;
}
+ return 0;
}
static void cacheflush_h_tmp_function(void *dummy)
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 75819617f93b..c5ef4081b01d 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -45,7 +45,7 @@
.level 2.0
#endif
- .import pa_dbit_lock,data
+ .import pa_tlb_lock,data
/* space_to_prot macro creates a prot id from a space id */
@@ -420,8 +420,8 @@
SHLREG %r9,PxD_VALUE_SHIFT,\pmd
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
- shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
- LDREG %r0(\pmd),\pte /* pmd is now pte */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
+ LDREG %r0(\pmd),\pte
bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
.endm
@@ -453,57 +453,53 @@
L2_ptep \pgd,\pte,\index,\va,\fault
.endm
- /* Acquire pa_dbit_lock lock. */
- .macro dbit_lock spc,tmp,tmp1
+ /* Acquire pa_tlb_lock lock and recheck page is still present. */
+ .macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
#ifdef CONFIG_SMP
cmpib,COND(=),n 0,\spc,2f
- load32 PA(pa_dbit_lock),\tmp
+ load32 PA(pa_tlb_lock),\tmp
1: LDCW 0(\tmp),\tmp1
cmpib,COND(=) 0,\tmp1,1b
nop
+ LDREG 0(\ptp),\pte
+ bb,<,n \pte,_PAGE_PRESENT_BIT,2f
+ b \fault
+ stw \spc,0(\tmp)
2:
#endif
.endm
- /* Release pa_dbit_lock lock without reloading lock address. */
- .macro dbit_unlock0 spc,tmp
+ /* Release pa_tlb_lock lock without reloading lock address. */
+ .macro tlb_unlock0 spc,tmp
#ifdef CONFIG_SMP
or,COND(=) %r0,\spc,%r0
stw \spc,0(\tmp)
#endif
.endm
- /* Release pa_dbit_lock lock. */
- .macro dbit_unlock1 spc,tmp
+ /* Release pa_tlb_lock lock. */
+ .macro tlb_unlock1 spc,tmp
#ifdef CONFIG_SMP
- load32 PA(pa_dbit_lock),\tmp
- dbit_unlock0 \spc,\tmp
+ load32 PA(pa_tlb_lock),\tmp
+ tlb_unlock0 \spc,\tmp
#endif
.endm
/* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
* don't needlessly dirty the cache line if it was already set */
- .macro update_ptep spc,ptep,pte,tmp,tmp1
-#ifdef CONFIG_SMP
- or,COND(=) %r0,\spc,%r0
- LDREG 0(\ptep),\pte
-#endif
+ .macro update_accessed ptp,pte,tmp,tmp1
ldi _PAGE_ACCESSED,\tmp1
or \tmp1,\pte,\tmp
and,COND(<>) \tmp1,\pte,%r0
- STREG \tmp,0(\ptep)
+ STREG \tmp,0(\ptp)
.endm
/* Set the dirty bit (and accessed bit). No need to be
* clever, this is only used from the dirty fault */
- .macro update_dirty spc,ptep,pte,tmp
-#ifdef CONFIG_SMP
- or,COND(=) %r0,\spc,%r0
- LDREG 0(\ptep),\pte
-#endif
+ .macro update_dirty ptp,pte,tmp
ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
or \tmp,\pte,\pte
- STREG \pte,0(\ptep)
+ STREG \pte,0(\ptp)
.endm
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
@@ -1148,14 +1144,14 @@ dtlb_miss_20w:
L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1174,14 +1170,14 @@ nadtlb_miss_20w:
L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1202,20 +1198,20 @@ dtlb_miss_11:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1235,21 +1231,20 @@ nadtlb_miss_11:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
-
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
idtlba pte,(%sr1,va)
idtlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1269,16 +1264,16 @@ dtlb_miss_20:
L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
idtlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1297,16 +1292,16 @@ nadtlb_miss_20:
L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
- idtlbt pte,prot
- dbit_unlock1 spc,t0
+ idtlbt pte,prot
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1406,14 +1401,14 @@ itlb_miss_20w:
L3_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1430,14 +1425,14 @@ naitlb_miss_20w:
L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1458,20 +1453,20 @@ itlb_miss_11:
L2_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1482,20 +1477,20 @@ naitlb_miss_11:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_11
+ update_accessed ptp,pte,t0,t1
make_insert_tlb_11 spc,pte,prot
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
iitlba pte,(%sr1,va)
iitlbp prot,(%sr1,va)
- mtsp t0, %sr1 /* Restore sr1 */
- dbit_unlock1 spc,t0
+ mtsp t1, %sr1 /* Restore sr1 */
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1516,16 +1511,16 @@ itlb_miss_20:
L2_ptep ptp,pte,t0,va,itlb_fault
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,itlb_fault
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1536,16 +1531,16 @@ naitlb_miss_20:
L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- dbit_lock spc,t0,t1
- update_ptep spc,ptp,pte,t0,t1
+ tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
+ update_accessed ptp,pte,t0,t1
make_insert_tlb spc,pte,prot
- f_extend pte,t0
+ f_extend pte,t1
iitlbt pte,prot
- dbit_unlock1 spc,t0
+ tlb_unlock1 spc,t0
rfir
nop
@@ -1568,14 +1563,14 @@ dbit_trap_20w:
L3_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
idtlbt pte,prot
- dbit_unlock0 spc,t0
+ tlb_unlock0 spc,t0
rfir
nop
#else
@@ -1588,8 +1583,8 @@ dbit_trap_11:
L2_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb_11 spc,pte,prot
@@ -1600,8 +1595,8 @@ dbit_trap_11:
idtlbp prot,(%sr1,va)
mtsp t1, %sr1 /* Restore sr1 */
- dbit_unlock0 spc,t0
+ tlb_unlock0 spc,t0
rfir
nop
@@ -1612,16 +1607,16 @@ dbit_trap_20:
L2_ptep ptp,pte,t0,va,dbit_fault
- dbit_lock spc,t0,t1
- update_dirty spc,ptp,pte,t1
+ tlb_lock spc,ptp,pte,t0,t1,dbit_fault
+ update_dirty ptp,pte,t1
make_insert_tlb spc,pte,prot
f_extend pte,t1
- idtlbt pte,prot
- dbit_unlock0 spc,t0
+ idtlbt pte,prot
+ tlb_unlock0 spc,t0
rfir
nop
#endif
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index 6548fd1d2e62..b99b39f1da02 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -43,10 +43,6 @@
#include "../math-emu/math-emu.h" /* for handle_fpe() */
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-DEFINE_SPINLOCK(pa_dbit_lock);
-#endif
-
static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
struct pt_regs *regs);
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index ccde8f084ce4..112ccf497562 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -52,6 +52,22 @@
.text
/*
+ * Used by threads when the lock bit of core_idle_state is set.
+ * Threads will spin in HMT_LOW until the lock bit is cleared.
+ * r14 - pointer to core_idle_state
+ * r15 - used to load contents of core_idle_state
+ */
+
+core_idle_lock_held:
+ HMT_LOW
+3: lwz r15,0(r14)
+ andi. r15,r15,PNV_CORE_IDLE_LOCK_BIT
+ bne 3b
+ HMT_MEDIUM
+ lwarx r15,0,r14
+ blr
+
+/*
* Pass requested state in r3:
* r3 - PNV_THREAD_NAP/SLEEP/WINKLE
*
@@ -150,6 +166,10 @@ power7_enter_nap_mode:
ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
lwarx_loop1:
lwarx r15,0,r14
+
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
+ bnel core_idle_lock_held
+
andc r15,r15,r7 /* Clear thread bit */
andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
@@ -294,7 +314,7 @@ lwarx_loop2:
* workaround undo code or resyncing timebase or restoring context
* In either case loop until the lock bit is cleared.
*/
- bne core_idle_lock_held
+ bnel core_idle_lock_held
cmpwi cr2,r15,0
lbz r4,PACA_SUBCORE_SIBLING_MASK(r13)
@@ -319,15 +339,6 @@ lwarx_loop2:
isync
b common_exit
-core_idle_lock_held:
- HMT_LOW
-core_idle_lock_loop:
- lwz r15,0(14)
- andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
- bne core_idle_lock_loop
- HMT_MEDIUM
- b lwarx_loop2
-
first_thread_in_subcore:
/* First thread in subcore to wakeup */
ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6530f1b8874d..37de90f8a845 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,6 +297,8 @@ long machine_check_early(struct pt_regs *regs)
__this_cpu_inc(irq_stat.mce_exceptions);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6d535973b200..a67c6d781c52 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -529,6 +529,10 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
+ case 0x600:
+ printk(KERN_ALERT "Unable to handle kernel paging request for "
+ "unaligned access at address 0x%08lx\n", regs->dar);
+ break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index ec2eb20631d1..df956295c2a7 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -320,6 +320,8 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
if (!attr)
return NULL;
+ sysfs_attr_init(&attr->attr.attr);
+
attr->var = str;
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 4949ef0d9400..37f959bf392e 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -237,7 +237,7 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
return elog;
}
-static void elog_work_fn(struct work_struct *work)
+static irqreturn_t elog_event(int irq, void *data)
{
__be64 size;
__be64 id;
@@ -251,7 +251,7 @@ static void elog_work_fn(struct work_struct *work)
rc = opal_get_elog_size(&id, &size, &type);
if (rc != OPAL_SUCCESS) {
pr_err("ELOG: OPAL log info read failed\n");
- return;
+ return IRQ_HANDLED;
}
elog_size = be64_to_cpu(size);
@@ -270,16 +270,10 @@ static void elog_work_fn(struct work_struct *work)
* entries.
*/
if (kset_find_obj(elog_kset, name))
- return;
+ return IRQ_HANDLED;
create_elog_obj(log_id, elog_size, elog_type);
-}
-
-static DECLARE_WORK(elog_work, elog_work_fn);
-static irqreturn_t elog_event(int irq, void *data)
-{
- schedule_work(&elog_work);
return IRQ_HANDLED;
}
@@ -304,8 +298,8 @@ int __init opal_elog_init(void)
return irq;
}
- rc = request_irq(irq, elog_event,
- IRQ_TYPE_LEVEL_HIGH, "opal-elog", NULL);
+ rc = request_threaded_irq(irq, NULL, elog_event,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "opal-elog", NULL);
if (rc) {
pr_err("%s: Can't request OPAL event irq (%d)\n",
__func__, rc);
diff --git a/arch/powerpc/platforms/powernv/opal-prd.c b/arch/powerpc/platforms/powernv/opal-prd.c
index 46cb3feb0a13..4ece8e40dd54 100644
--- a/arch/powerpc/platforms/powernv/opal-prd.c
+++ b/arch/powerpc/platforms/powernv/opal-prd.c
@@ -112,6 +112,7 @@ static int opal_prd_open(struct inode *inode, struct file *file)
static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t addr, size;
+ pgprot_t page_prot;
int rc;
pr_devel("opal_prd_mmap(0x%016lx, 0x%016lx, 0x%lx, 0x%lx)\n",
@@ -125,13 +126,11 @@ static int opal_prd_mmap(struct file *file, struct vm_area_struct *vma)
if (!opal_prd_range_is_valid(addr, size))
return -EINVAL;
- vma->vm_page_prot = __pgprot(pgprot_val(phys_mem_access_prot(file,
- vma->vm_pgoff,
- size, vma->vm_page_prot))
- | _PAGE_SPECIAL);
+ page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
+ size, vma->vm_page_prot);
rc = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, size,
- vma->vm_page_prot);
+ page_prot);
return rc;
}
diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
index 2bc33674ebfc..87f9623ca805 100644
--- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c
@@ -18,6 +18,7 @@
#include <linux/pci.h>
#include <linux/semaphore.h>
#include <asm/msi_bitmap.h>
+#include <asm/ppc-pci.h>
struct ppc4xx_hsta_msi {
struct device *dev;
diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c
index 88c7016492c4..97bbb6060b25 100644
--- a/arch/tile/lib/memcpy_user_64.c
+++ b/arch/tile/lib/memcpy_user_64.c
@@ -28,7 +28,7 @@
#define _ST(p, inst, v) \
({ \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
@@ -41,7 +41,7 @@
({ \
unsigned long __v; \
asm("1: " #inst " %0, %1;" \
- ".pushsection .coldtext.memcpy,\"ax\";" \
+ ".pushsection .coldtext,\"ax\";" \
"2: { move r0, %2; jrp lr };" \
".section __ex_table,\"a\";" \
".align 8;" \
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 55bced17dc95..3dbb7e7909ca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -254,6 +254,11 @@ config ARCH_SUPPORTS_OPTIMIZED_INLINING
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
def_bool y
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN
+ default 0xdffffc0000000000
+
config HAVE_INTEL_TXT
def_bool y
depends on INTEL_IOMMU && ACPI
@@ -2015,7 +2020,7 @@ config CMDLINE_BOOL
To compile command line arguments into the kernel,
set this option to 'Y', then fill in the
- the boot arguments in CONFIG_CMDLINE.
+ boot arguments in CONFIG_CMDLINE.
Systems with fully functional boot loaders (i.e. non-embedded)
should leave this option set to 'N'.
diff --git a/arch/x86/include/asm/espfix.h b/arch/x86/include/asm/espfix.h
index 99efebb2f69d..ca3ce9ab9385 100644
--- a/arch/x86/include/asm/espfix.h
+++ b/arch/x86/include/asm/espfix.h
@@ -9,7 +9,7 @@ DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
DECLARE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
extern void init_espfix_bsp(void);
-extern void init_espfix_ap(void);
+extern void init_espfix_ap(int cpu);
#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 8b22422fbad8..74a2a8dc9908 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -14,15 +14,11 @@
#ifndef __ASSEMBLY__
-extern pte_t kasan_zero_pte[];
-extern pte_t kasan_zero_pmd[];
-extern pte_t kasan_zero_pud[];
-
#ifdef CONFIG_KASAN
-void __init kasan_map_early_shadow(pgd_t *pgd);
+void __init kasan_early_init(void);
void __init kasan_init(void);
#else
-static inline void kasan_map_early_shadow(pgd_t *pgd) { }
+static inline void kasan_early_init(void) { }
static inline void kasan_init(void) { }
#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 28eba2d38b15..f813261d9740 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -409,12 +409,6 @@ static void __setup_vector_irq(int cpu)
int irq, vector;
struct apic_chip_data *data;
- /*
- * vector_lock will make sure that we don't run into irq vector
- * assignments that might be happening on another cpu in parallel,
- * while we setup our initial vector to irq mappings.
- */
- raw_spin_lock(&vector_lock);
/* Mark the inuse vectors */
for_each_active_irq(irq) {
data = apic_chip_data(irq_get_irq_data(irq));
@@ -436,16 +430,16 @@ static void __setup_vector_irq(int cpu)
if (!cpumask_test_cpu(cpu, data->domain))
per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
}
- raw_spin_unlock(&vector_lock);
}
/*
- * Setup the vector to irq mappings.
+ * Setup the vector to irq mappings. Must be called with vector_lock held.
*/
void setup_vector_irq(int cpu)
{
int irq;
+ lockdep_assert_held(&vector_lock);
/*
* On most of the platforms, legacy PIC delivers the interrupts on the
* boot cpu. But there are certain platforms where PIC interrupts are
diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
index 89427d8d4fc5..eec40f595ab9 100644
--- a/arch/x86/kernel/early_printk.c
+++ b/arch/x86/kernel/early_printk.c
@@ -175,7 +175,9 @@ static __init void early_serial_init(char *s)
}
if (*s) {
- if (kstrtoul(s, 0, &baud) < 0 || baud == 0)
+ baud = simple_strtoull(s, &e, 0);
+
+ if (baud == 0 || s == e)
baud = DEFAULT_BAUD;
}
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index f5d0730e7b08..ce95676abd60 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -131,25 +131,24 @@ void __init init_espfix_bsp(void)
init_espfix_random();
/* The rest is the same as for any other processor */
- init_espfix_ap();
+ init_espfix_ap(0);
}
-void init_espfix_ap(void)
+void init_espfix_ap(int cpu)
{
- unsigned int cpu, page;
+ unsigned int page;
unsigned long addr;
pud_t pud, *pud_p;
pmd_t pmd, *pmd_p;
pte_t pte, *pte_p;
- int n;
+ int n, node;
void *stack_page;
pteval_t ptemask;
/* We only have to do this once... */
- if (likely(this_cpu_read(espfix_stack)))
+ if (likely(per_cpu(espfix_stack, cpu)))
return; /* Already initialized */
- cpu = smp_processor_id();
addr = espfix_base_addr(cpu);
page = cpu/ESPFIX_STACKS_PER_PAGE;
@@ -165,12 +164,15 @@ void init_espfix_ap(void)
if (stack_page)
goto unlock_done;
+ node = cpu_to_node(cpu);
ptemask = __supported_pte_mask;
pud_p = &espfix_pud_page[pud_index(addr)];
pud = *pud_p;
if (!pud_present(pud)) {
- pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+ pmd_p = (pmd_t *)page_address(page);
pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PUD_CLONES; n++)
@@ -180,7 +182,9 @@ void init_espfix_ap(void)
pmd_p = pmd_offset(&pud, addr);
pmd = *pmd_p;
if (!pmd_present(pmd)) {
- pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
+ struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
+
+ pte_p = (pte_t *)page_address(page);
pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
for (n = 0; n < ESPFIX_PMD_CLONES; n++)
@@ -188,7 +192,7 @@ void init_espfix_ap(void)
}
pte_p = pte_offset_kernel(&pmd, addr);
- stack_page = (void *)__get_free_page(GFP_KERNEL);
+ stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
for (n = 0; n < ESPFIX_PTE_CLONES; n++)
set_pte(&pte_p[n*PTE_STRIDE], pte);
@@ -199,7 +203,7 @@ void init_espfix_ap(void)
unlock_done:
mutex_unlock(&espfix_init_mutex);
done:
- this_cpu_write(espfix_stack, addr);
- this_cpu_write(espfix_waddr, (unsigned long)stack_page
- + (addr & ~PAGE_MASK));
+ per_cpu(espfix_stack, cpu) = addr;
+ per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
+ + (addr & ~PAGE_MASK);
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 5a4668136e98..f129a9af6357 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -161,11 +161,12 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
/* Kill off the identity-map trampoline */
reset_early_page_tables();
- kasan_map_early_shadow(early_level4_pgt);
-
- /* clear bss before set_intr_gate with early_idt_handler */
clear_bss();
+ clear_page(init_level4_pgt);
+
+ kasan_early_init();
+
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
set_intr_gate(i, early_idt_handler_array[i]);
load_idt((const struct desc_ptr *)&idt_descr);
@@ -177,12 +178,9 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
*/
load_ucode_bsp();
- clear_page(init_level4_pgt);
/* set init_level4_pgt kernel high mapping*/
init_level4_pgt[511] = early_level4_pgt[511];
- kasan_map_early_shadow(init_level4_pgt);
-
x86_64_start_reservations(real_mode_data);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e5c27f729a38..1d40ca8a73f2 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -516,38 +516,9 @@ ENTRY(phys_base)
/* This must match the first entry in level2_kernel_pgt */
.quad 0x0000000000000000
-#ifdef CONFIG_KASAN
-#define FILL(VAL, COUNT) \
- .rept (COUNT) ; \
- .quad (VAL) ; \
- .endr
-
-NEXT_PAGE(kasan_zero_pte)
- FILL(kasan_zero_page - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pmd)
- FILL(kasan_zero_pte - __START_KERNEL_map + _KERNPG_TABLE, 512)
-NEXT_PAGE(kasan_zero_pud)
- FILL(kasan_zero_pmd - __START_KERNEL_map + _KERNPG_TABLE, 512)
-
-#undef FILL
-#endif
-
-
#include "../../x86/xen/xen-head.S"
__PAGE_ALIGNED_BSS
NEXT_PAGE(empty_zero_page)
.skip PAGE_SIZE
-#ifdef CONFIG_KASAN
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-NEXT_PAGE(kasan_zero_page)
- .skip PAGE_SIZE
-#endif
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 88b366487b0e..c7dfe1be784e 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -347,14 +347,22 @@ int check_irq_vectors_for_cpu_disable(void)
if (!desc)
continue;
+ /*
+ * Protect against concurrent action removal,
+ * affinity changes etc.
+ */
+ raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
cpumask_copy(&affinity_new, data->affinity);
cpumask_clear_cpu(this_cpu, &affinity_new);
/* Do not count inactive or per-cpu irqs. */
- if (!irq_has_action(irq) || irqd_is_per_cpu(data))
+ if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
+ raw_spin_unlock(&desc->lock);
continue;
+ }
+ raw_spin_unlock(&desc->lock);
/*
* A single irq may be mapped to multiple
* cpu's vector_irq[] (for example IOAPIC cluster
@@ -385,6 +393,9 @@ int check_irq_vectors_for_cpu_disable(void)
* vector. If the vector is marked in the used vectors
* bitmap or an irq is assigned to it, we don't count
* it as available.
+ *
+ * As this is an inaccurate snapshot anyway, we can do
+ * this w/o holding vector_lock.
*/
for (vector = FIRST_EXTERNAL_VECTOR;
vector < first_system_vector; vector++) {
@@ -486,6 +497,11 @@ void fixup_irqs(void)
*/
mdelay(1);
+ /*
+ * We can walk the vector array of this cpu without holding
+ * vector_lock because the cpu is already marked !online, so
+ * nothing else will touch it.
+ */
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
@@ -497,9 +513,9 @@ void fixup_irqs(void)
irq = __this_cpu_read(vector_irq[vector]);
desc = irq_to_desc(irq);
+ raw_spin_lock(&desc->lock);
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data);
- raw_spin_lock(&desc->lock);
if (chip->irq_retrigger) {
chip->irq_retrigger(data);
__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 8add66b22f33..d3010aa79daf 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -171,11 +171,6 @@ static void smp_callin(void)
apic_ap_setup();
/*
- * Need to setup vector mappings before we enable interrupts.
- */
- setup_vector_irq(smp_processor_id());
-
- /*
* Save our processor parameters. Note: this information
* is needed for clock calibration.
*/
@@ -239,18 +234,13 @@ static void notrace start_secondary(void *unused)
check_tsc_sync_target();
/*
- * Enable the espfix hack for this CPU
- */
-#ifdef CONFIG_X86_ESPFIX64
- init_espfix_ap();
-#endif
-
- /*
- * We need to hold vector_lock so there the set of online cpus
- * does not change while we are assigning vectors to cpus. Holding
- * this lock ensures we don't half assign or remove an irq from a cpu.
+ * Lock vector_lock and initialize the vectors on this cpu
+ * before setting the cpu online. We must set it online with
+ * vector_lock held to prevent a concurrent setup/teardown
+ * from seeing a half valid vector space.
*/
lock_vector_lock();
+ setup_vector_irq(smp_processor_id());
set_cpu_online(smp_processor_id(), true);
unlock_vector_lock();
cpu_set_state_online(smp_processor_id());
@@ -854,6 +844,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
initial_code = (unsigned long)start_secondary;
stack_start = idle->thread.sp;
+ /*
+ * Enable the espfix hack for this CPU
+ */
+#ifdef CONFIG_X86_ESPFIX64
+ init_espfix_ap(cpu);
+#endif
+
/* So we see what's up */
announce_cpu(cpu, apicid);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 505449700e0c..7437b41f6a47 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -598,10 +598,19 @@ static unsigned long quick_pit_calibrate(void)
if (!pit_expect_msb(0xff-i, &delta, &d2))
break;
+ delta -= tsc;
+
+ /*
+ * Extrapolate the error and fail fast if the error will
+ * never be below 500 ppm.
+ */
+ if (i == 1 &&
+ d1 + d2 >= (delta * MAX_QUICK_PIT_ITERATIONS) >> 11)
+ return 0;
+
/*
* Iterate until the error is less than 500 ppm
*/
- delta -= tsc;
if (d1+d2 >= delta >> 11)
continue;
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index ddf9ecb53cc3..e342586db6e4 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -20,7 +20,7 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
unsigned long ret;
if (__range_not_ok(from, n, TASK_SIZE))
- return 0;
+ return n;
/*
* Even though this function is typically called from NMI/IRQ context
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 4860906c6b9f..e1840f3db5b5 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -1,3 +1,4 @@
+#define pr_fmt(fmt) "kasan: " fmt
#include <linux/bootmem.h>
#include <linux/kasan.h>
#include <linux/kdebug.h>
@@ -11,7 +12,19 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];
-extern unsigned char kasan_zero_page[PAGE_SIZE];
+static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+/*
+ * This page used as early shadow. We don't use empty_zero_page
+ * at early stages, stack instrumentation could write some garbage
+ * to this page.
+ * Latter we reuse it as zero shadow for large ranges of memory
+ * that allowed to access, but not instrumented by kasan
+ * (vmalloc/vmemmap ...).
+ */
+static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
static int __init map_range(struct range *range)
{
@@ -36,7 +49,7 @@ static void __init clear_pgds(unsigned long start,
pgd_clear(pgd_offset_k(start));
}
-void __init kasan_map_early_shadow(pgd_t *pgd)
+static void __init kasan_map_early_shadow(pgd_t *pgd)
{
int i;
unsigned long start = KASAN_SHADOW_START;
@@ -73,7 +86,7 @@ static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
WARN_ON(!pmd_none(*pmd));
set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PMD_SIZE;
pmd = pmd_offset(pud, addr);
}
@@ -99,7 +112,7 @@ static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
WARN_ON(!pud_none(*pud));
set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PUD_SIZE;
pud = pud_offset(pgd, addr);
}
@@ -124,7 +137,7 @@ static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
WARN_ON(!pgd_none(*pgd));
set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | __PAGE_KERNEL_RO));
+ | _KERNPG_TABLE));
addr += PGDIR_SIZE;
pgd = pgd_offset_k(addr);
}
@@ -166,6 +179,26 @@ static struct notifier_block kasan_die_notifier = {
};
#endif
+void __init kasan_early_init(void)
+{
+ int i;
+ pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+ pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
+ pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ kasan_zero_pte[i] = __pte(pte_val);
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ kasan_zero_pmd[i] = __pmd(pmd_val);
+
+ for (i = 0; i < PTRS_PER_PUD; i++)
+ kasan_zero_pud[i] = __pud(pud_val);
+
+ kasan_map_early_shadow(early_level4_pgt);
+ kasan_map_early_shadow(init_level4_pgt);
+}
+
void __init kasan_init(void)
{
int i;
@@ -176,6 +209,7 @@ void __init kasan_init(void)
memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
load_cr3(early_level4_pgt);
+ __flush_tlb_all();
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
@@ -202,5 +236,8 @@ void __init kasan_init(void)
memset(kasan_zero_page, 0, PAGE_SIZE);
load_cr3(init_level4_pgt);
+ __flush_tlb_all();
init_task.kasan_depth = 0;
+
+ pr_info("Kernel address sanitizer initialized\n");
}