summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig20
-rw-r--r--arch/arm64/Makefile6
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi75
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/include/asm/Kbuild1
-rw-r--r--arch/arm64/include/asm/arch_timer.h42
-rw-r--r--arch/arm64/include/asm/assembler.h31
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/compat.h14
-rw-r--r--arch/arm64/include/asm/cpu_ops.h59
-rw-r--r--arch/arm64/include/asm/dma-mapping.h14
-rw-r--r--arch/arm64/include/asm/elf.h18
-rw-r--r--arch/arm64/include/asm/hwcap.h11
-rw-r--r--arch/arm64/include/asm/io.h12
-rw-r--r--arch/arm64/include/asm/irq.h1
-rw-r--r--arch/arm64/include/asm/memory.h11
-rw-r--r--arch/arm64/include/asm/pgalloc.h9
-rw-r--r--arch/arm64/include/asm/pgtable-2level-hwdef.h4
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/processor.h5
-rw-r--r--arch/arm64/include/asm/prom.h1
-rw-r--r--arch/arm64/include/asm/psci.h19
-rw-r--r--arch/arm64/include/asm/ptrace.h1
-rw-r--r--arch/arm64/include/asm/smp.h15
-rw-r--r--arch/arm64/include/asm/spinlock.h83
-rw-r--r--arch/arm64/include/asm/spinlock_types.h15
-rw-r--r--arch/arm64/include/asm/syscall.h6
-rw-r--r--arch/arm64/include/asm/virt.h3
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h47
-rw-r--r--arch/arm64/include/uapi/asm/byteorder.h4
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/kernel/Makefile4
-rw-r--r--arch/arm64/kernel/arm64ksyms.c1
-rw-r--r--arch/arm64/kernel/cpu_ops.c87
-rw-r--r--arch/arm64/kernel/cputable.c2
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/entry.S22
-rw-r--r--arch/arm64/kernel/head.S61
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm64/kernel/irq.c61
-rw-r--r--arch/arm64/kernel/kuser32.S57
-rw-r--r--arch/arm64/kernel/module.c7
-rw-r--r--arch/arm64/kernel/perf_event.c11
-rw-r--r--arch/arm64/kernel/process.c7
-rw-r--r--arch/arm64/kernel/psci.c87
-rw-r--r--arch/arm64/kernel/setup.c80
-rw-r--r--arch/arm64/kernel/signal32.c39
-rw-r--r--arch/arm64/kernel/smp.c214
-rw-r--r--arch/arm64/kernel/smp_psci.c53
-rw-r--r--arch/arm64/kernel/smp_spin_table.c97
-rw-r--r--arch/arm64/kernel/sys32.S22
-rw-r--r--arch/arm64/kernel/time.c10
-rw-r--r--arch/arm64/kernel/vdso.c5
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S32
-rw-r--r--arch/arm64/kvm/hyp-init.S5
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/mm/init.c25
-rw-r--r--arch/arm64/mm/ioremap.c20
-rw-r--r--arch/arm64/mm/proc.S4
-rw-r--r--arch/arm64/xen/Makefile2
61 files changed, 1118 insertions, 493 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c04454876bcb..88c8b6c1341a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,7 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -14,6 +15,7 @@ config ARM64
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
@@ -61,10 +63,6 @@ config LOCKDEP_SUPPORT
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config GENERIC_LOCKBREAK
- def_bool y
- depends on SMP && PREEMPT
-
config RWSEM_GENERIC_SPINLOCK
def_bool y
@@ -138,9 +136,13 @@ config ARM64_64K_PAGES
look-up. AArch32 emulation is not available when this feature
is enabled.
+config CPU_BIG_ENDIAN
+ bool "Build big-endian kernel"
+ help
+ Say Y if you plan on running a kernel in big-endian mode.
+
config SMP
bool "Symmetric Multi-Processing"
- select USE_GENERIC_SMP_HELPERS
help
This enables support for systems with more than one CPU. If
you say N here, the kernel will run on single and
@@ -160,6 +162,13 @@ config NR_CPUS
default "8" if ARCH_XGENE
default "4"
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
source kernel/Kconfig.preempt
config HZ
@@ -211,6 +220,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64 (EXPERIMENTAL)"
depends on ARM64 && OF
+ select SWIOTLB_XEN
help
Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64.
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index d90cf79f233a..2fceb71ac3b7 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -20,9 +20,15 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
KBUILD_DEFCONFIG := defconfig
KBUILD_CFLAGS += -mgeneral-regs-only
+ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
+KBUILD_CPPFLAGS += -mbig-endian
+AS += -EB
+LD += -EB
+else
KBUILD_CPPFLAGS += -mlittle-endian
AS += -EL
LD += -EL
+endif
comma = ,
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index bfdc57834929..d37d7369e260 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -103,6 +103,81 @@
#size-cells = <2>;
ranges;
+ clocks {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
+ pcppll: pcppll@17000100 {
+ compatible = "apm,xgene-pcppll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "pcppll";
+ reg = <0x0 0x17000100 0x0 0x1000>;
+ clock-output-names = "pcppll";
+ type = <0>;
+ };
+
+ socpll: socpll@17000120 {
+ compatible = "apm,xgene-socpll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "socpll";
+ reg = <0x0 0x17000120 0x0 0x1000>;
+ clock-output-names = "socpll";
+ type = <1>;
+ };
+
+ socplldiv2: socplldiv2 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <1>;
+ clocks = <&socpll 0>;
+ clock-names = "socplldiv2";
+ clock-mult = <1>;
+ clock-div = <2>;
+ clock-output-names = "socplldiv2";
+ };
+
+ qmlclk: qmlclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "qmlclk";
+ reg = <0x0 0x1703C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "qmlclk";
+ };
+
+ ethclk: ethclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "ethclk";
+ reg = <0x0 0x17000000 0x0 0x1000>;
+ reg-names = "div-reg";
+ divider-offset = <0x238>;
+ divider-width = <0x9>;
+ divider-shift = <0x0>;
+ clock-output-names = "ethclk";
+ };
+
+ eth8clk: eth8clk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&ethclk 0>;
+ clock-names = "eth8clk";
+ reg = <0x0 0x1702C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
+ };
+
serial0: serial@1c020000 {
device_type = "serial";
compatible = "ns16550";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 31c81e9b792e..84139be62ae6 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -26,7 +26,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
-CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_PREEMPT=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index 79a642d199f2..519f89f5b6a3 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -50,3 +50,4 @@ generic-y += unaligned.h
generic-y += user.h
generic-y += vga.h
generic-y += xor.h
+generic-y += preempt.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index c9f1d2816c2b..9400596a0f39 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -92,19 +92,49 @@ static inline u32 arch_timer_get_cntfrq(void)
return val;
}
-static inline void arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
+static inline void arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
+}
+
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 5aceb83b3f5c..fd3e3924041b 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -115,3 +115,34 @@ lr .req x30 // link register
.align 7
b \label
.endm
+
+/*
+ * Select code when configured for BE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_BE(code...) code
+#else
+#define CPU_BE(code...)
+#endif
+
+/*
+ * Select code when configured for LE.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define CPU_LE(code...)
+#else
+#define CPU_LE(code...) code
+#endif
+
+/*
+ * Define a macro that constructs a 64-bit value by concatenating two
+ * 32-bit registers. Note that on big endian systems the order of the
+ * registers is swapped.
+ */
+#ifndef CONFIG_CPU_BIG_ENDIAN
+ .macro regs_to_64, rd, lbits, hbits
+#else
+ .macro regs_to_64, rd, hbits, lbits
+#endif
+ orr \rd, \lbits, \hbits, lsl #32
+ .endm
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 836364468571..01de5aaa3edc 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -126,20 +126,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
return oldval;
}
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
- unsigned long tmp, tmp2;
-
- asm volatile("// atomic_clear_mask\n"
-"1: ldxr %0, %2\n"
-" bic %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
- : "Ir" (mask)
- : "cc");
-}
-
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 8a8ce0e73a38..3914c0dcd09c 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -173,4 +173,6 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 899af807ef0f..fda2704b3f9f 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -26,7 +26,11 @@
#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
+#ifdef __AARCH64EB__
+#define COMPAT_UTS_MACHINE "armv8b\0\0"
+#else
#define COMPAT_UTS_MACHINE "armv8l\0\0"
+#endif
typedef u32 compat_size_t;
typedef s32 compat_ssize_t;
@@ -73,13 +77,23 @@ struct compat_timeval {
};
struct compat_stat {
+#ifdef __AARCH64EB__
+ short st_dev;
+ short __pad1;
+#else
compat_dev_t st_dev;
+#endif
compat_ino_t st_ino;
compat_mode_t st_mode;
compat_ushort_t st_nlink;
__compat_uid16_t st_uid;
__compat_gid16_t st_gid;
+#ifdef __AARCH64EB__
+ short st_rdev;
+ short __pad2;
+#else
compat_dev_t st_rdev;
+#endif
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
new file mode 100644
index 000000000000..c4cdb5e5b73d
--- /dev/null
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+
+struct device_node;
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the property as appears in a devicetree cpu node's
+ * enable-method property.
+ * @cpu_init: Reads any data necessary for a specific enable-method from the
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
+ * mechanism for doing so, tests whether it is possible to boot
+ * the given CPU.
+ * @cpu_boot: Boots a cpu into the kernel.
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * synchronisation. Called from the cpu being booted.
+ * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
+ * reason, which will cause the hot unplug to be aborted. Called
+ * from the cpu to be killed.
+ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
+ * cpu being killed.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+ int (*cpu_boot)(unsigned int);
+ void (*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+extern int __init cpu_read_ops(struct device_node *dn, int cpu);
+extern void __init cpu_read_bootcpu_ops(void);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 8d1810001aef..fd0c0c0e447a 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -23,11 +23,15 @@
#include <asm-generic/dma-coherent.h>
+#include <xen/xen.h>
+#include <asm/xen/hypervisor.h>
+
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
if (unlikely(!dev) || !dev->archdata.dma_ops)
return dma_ops;
@@ -35,6 +39,14 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
return dev->archdata.dma_ops;
}
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ if (xen_initial_domain())
+ return xen_dma_ops;
+ else
+ return __generic_dma_ops(dev);
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index e7fa87f9201b..01d3aab64b79 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -90,11 +90,24 @@ typedef struct user_fpsimd_state elf_fpregset_t;
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
+#ifdef __AARCH64EB__
+#define ELF_DATA ELFDATA2MSB
+#else
#define ELF_DATA ELFDATA2LSB
+#endif
#define ELF_ARCH EM_AARCH64
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
#define ELF_PLATFORM_SIZE 16
+#ifdef __AARCH64EB__
+#define ELF_PLATFORM ("aarch64_be")
+#else
#define ELF_PLATFORM ("aarch64")
+#endif
/*
* This is used to ensure we don't load something for the wrong architecture.
@@ -149,7 +162,12 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
#ifdef CONFIG_COMPAT
+
+#ifdef __AARCH64EB__
+#define COMPAT_ELF_PLATFORM ("v8b")
+#else
#define COMPAT_ELF_PLATFORM ("v8l")
+#endif
#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3))
diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h
index e2950b098e76..6cddbb0c9f54 100644
--- a/arch/arm64/include/asm/hwcap.h
+++ b/arch/arm64/include/asm/hwcap.h
@@ -30,6 +30,7 @@
#define COMPAT_HWCAP_IDIVA (1 << 17)
#define COMPAT_HWCAP_IDIVT (1 << 18)
#define COMPAT_HWCAP_IDIV (COMPAT_HWCAP_IDIVA|COMPAT_HWCAP_IDIVT)
+#define COMPAT_HWCAP_EVTSTRM (1 << 21)
#ifndef __ASSEMBLY__
/*
@@ -37,11 +38,11 @@
* instruction set this cpu supports.
*/
#define ELF_HWCAP (elf_hwcap)
-#define COMPAT_ELF_HWCAP (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
- COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
- COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
- COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
- COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP (compat_elf_hwcap)
+extern unsigned int compat_elf_hwcap;
+#endif
extern unsigned long elf_hwcap;
#endif
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 1d12f89140ba..4cc813eddacb 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -22,11 +22,14 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <linux/blk_types.h>
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
+#include <xen/xen.h>
+
/*
* Generic IO read/write. These perform native-endian accesses.
*/
@@ -224,6 +227,7 @@ extern void __memset_io(volatile void __iomem *, int, size_t);
*/
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
+extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -233,7 +237,6 @@ extern void __iounmap(volatile void __iomem *addr);
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
-#define ioremap_cached(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL))
#define iounmap __iounmap
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
@@ -263,5 +266,12 @@ extern int devmem_is_allowed(unsigned long pfn);
*/
#define xlate_dev_kmem_ptr(p) p
+struct bio_vec;
+extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
+ const struct bio_vec *vec2);
+#define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
+ (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
+ (!xen_domain() || xen_biovec_phys_mergeable(vec1, vec2)))
+
#endif /* __KERNEL__ */
#endif /* __ASM_IO_H */
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc077f6e..e1f7ecdde11f 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 20925bcf4e2a..37762175896f 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,18 +33,23 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image.
+ * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#define PAGE_OFFSET UL(0xffffffc000000000)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define VA_BITS (42)
+#else
+#define VA_BITS (39)
+#endif
+#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
-#define VA_BITS (39)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h
index f214069ec5d5..9bea6e74a001 100644
--- a/arch/arm64/include/asm/pgalloc.h
+++ b/arch/arm64/include/asm/pgalloc.h
@@ -63,9 +63,12 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
struct page *pte;
pte = alloc_pages(PGALLOC_GFP, 0);
- if (pte)
- pgtable_page_ctor(pte);
-
+ if (!pte)
+ return NULL;
+ if (!pgtable_page_ctor(pte)) {
+ __free_page(pte);
+ return NULL;
+ }
return pte;
}
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 0a8ed3f94e93..2593b490c56a 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 512GB and therefore we only use 1024 entries in the PGD.
+ * 4TB in the 64KB page configuration.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD 8192
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f0bebc5e22cd..17bd3af0a117 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -33,7 +33,7 @@
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START UL(0xffffff8000000000)
+#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index ab239b2c456f..45b20cd6cbca 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -107,6 +107,11 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
regs->pstate = COMPAT_PSR_MODE_USR;
if (pc & 1)
regs->pstate |= COMPAT_PSR_T_BIT;
+
+#ifdef __AARCH64EB__
+ regs->pstate |= COMPAT_PSR_E_BIT;
+#endif
+
regs->compat_sp = sp;
}
#endif
diff --git a/arch/arm64/include/asm/prom.h b/arch/arm64/include/asm/prom.h
deleted file mode 100644
index 68b90e682957..000000000000
--- a/arch/arm64/include/asm/prom.h
+++ /dev/null
@@ -1 +0,0 @@
-/* Empty for now */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 0604237ecd99..e5312ea0ec1a 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,25 +14,6 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-
int psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 0dacbbf9458b..0e7fa4963735 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -42,6 +42,7 @@
#define COMPAT_PSR_MODE_UND 0x0000001b
#define COMPAT_PSR_MODE_SYS 0x0000001f
#define COMPAT_PSR_T_BIT 0x00000020
+#define COMPAT_PSR_E_BIT 0x00000200
#define COMPAT_PSR_F_BIT 0x00000040
#define COMPAT_PSR_I_BIT 0x00000080
#define COMPAT_PSR_A_BIT 0x00000100
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c5d146..a498f2cd2c2a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,21 +60,14 @@ struct secondary_data {
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
+extern int __cpu_disable(void);
-struct smp_enable_ops {
- const char *name;
- int (*init_cpu)(struct device_node *, int);
- int (*prepare_cpu)(int);
-};
-
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 0defa0728a9b..3d5cf064d7a1 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
/*
* Spinlock implementation.
*
- * The old value is read exclusively and the new one, if unlocked, is written
- * exclusively. In case of failure, the loop is restarted.
- *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
- *
- * Unlocked value: 0
- * Locked value: 1
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -41,32 +34,51 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval, newval;
asm volatile(
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval;
asm volatile(
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- "1:\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+" prfm pstl1strm, %2\n"
+"1: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 2f\n"
+" add %w0, %w0, %3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b\n"
+"2:"
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ : "I" (1 << TICKET_SHIFT)
+ : "memory");
return !tmp;
}
@@ -74,9 +86,28 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (lock->lock) : "r" (0) : "memory");
+" stlrh %w1, %0\n"
+ : "=Q" (lock->owner)
+ : "r" (lock->owner + 1)
+ : "memory");
+}
+
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
}
+#define arch_spin_is_contended arch_spin_is_contended
/*
* Write lock implementation.
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a494346efed..b8d383665f56 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,19 @@
# error "please don't include this file directly"
#endif
-/* We only require natural alignment for exclusive accesses. */
-#define __lock_aligned
+#define TICKET_SHIFT 16
typedef struct {
- volatile unsigned int lock;
-} arch_spinlock_t;
+#ifdef __AARCH64EB__
+ u16 next;
+ u16 owner;
+#else
+ u16 owner;
+ u16 next;
+#endif
+} __aligned(4) arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index 89c047f9a971..70ba9d4ee978 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -59,6 +59,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -82,6 +85,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 26e310c54344..130e2be952cf 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -18,7 +18,8 @@
#ifndef __ASM__VIRT_H
#define __ASM__VIRT_H
-#define BOOT_CPU_MODE_EL2 (0x0e12b007)
+#define BOOT_CPU_MODE_EL1 (0xe11)
+#define BOOT_CPU_MODE_EL2 (0xe12)
#ifndef __ASSEMBLY__
#include <asm/cacheflush.h>
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
new file mode 100644
index 000000000000..2820f1a6eebe
--- /dev/null
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -0,0 +1,47 @@
+#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H
+#define _ASM_ARM64_XEN_PAGE_COHERENT_H
+
+#include <asm/page.h>
+#include <linux/dma-attrs.h>
+#include <linux/dma-mapping.h>
+
+static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+}
+
+static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+}
+
+static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+}
+
+static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+}
+
+static inline void xen_dma_sync_single_for_device(struct device *hwdev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+ __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+}
+#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/include/uapi/asm/byteorder.h b/arch/arm64/include/uapi/asm/byteorder.h
index 2b92046aafc5..dc19e9537f0d 100644
--- a/arch/arm64/include/uapi/asm/byteorder.h
+++ b/arch/arm64/include/uapi/asm/byteorder.h
@@ -16,6 +16,10 @@
#ifndef __ASM_BYTEORDER_H
#define __ASM_BYTEORDER_H
+#ifdef __AARCH64EB__
+#include <linux/byteorder/big_endian.h>
+#else
#include <linux/byteorder/little_endian.h>
+#endif
#endif /* __ASM_BYTEORDER_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index eea497578b87..9b12476e9c85 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -21,6 +21,7 @@
*/
#define HWCAP_FP (1 << 0)
#define HWCAP_ASIMD (1 << 1)
+#define HWCAP_EVTSTRM (1 << 2)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 7b4b564961d4..5ba2fd43a75b 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,12 +9,12 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o
+ hyp-stub.o psci.o cpu_ops.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f626d554..e7ee770c0697 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -39,6 +39,7 @@ EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 000000000000..d62d12fb36c8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,87 @@
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations cpu_psci_ops;
+
+const struct cpu_operations *cpu_ops[NR_CPUS];
+
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_SMP
+ &smp_spin_table_ops,
+ &cpu_psci_ops,
+#endif
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations **ops = supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ */
+int __init cpu_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g. when
+ * spin-table is used for secondaries). Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void __init cpu_read_bootcpu_ops(void)
+{
+ struct device_node *dn = of_get_cpu_node(0, NULL);
+ if (!dn) {
+ pr_err("Failed to find device node for boot cpu\n");
+ return;
+ }
+ cpu_read_ops(dn, 0);
+}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a43f4e..fd3993cb060f 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cbfacf7fb438..6a0a9b132d7a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -27,7 +27,6 @@
#include <linux/uaccess.h>
#include <asm/debug-monitors.h>
-#include <asm/local.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
* Keep track of debug users on each core.
* The ref counts are per-cpu so we use a local_t type.
*/
-static DEFINE_PER_CPU(local_t, mde_ref_count);
-static DEFINE_PER_CPU(local_t, kde_ref_count);
+static DEFINE_PER_CPU(int, mde_ref_count);
+static DEFINE_PER_CPU(int, kde_ref_count);
void enable_debug_monitors(enum debug_el el)
{
@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1)
+ if (this_cpu_inc_return(mde_ref_count) == 1)
enable = DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_inc_return(&__get_cpu_var(kde_ref_count)) == 1)
+ this_cpu_inc_return(kde_ref_count) == 1)
enable |= DBG_MDSCR_KDE;
if (enable && debug_enabled) {
@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
WARN_ON(preemptible());
- if (local_dec_and_test(&__get_cpu_var(mde_ref_count)))
+ if (this_cpu_dec_return(mde_ref_count) == 0)
disable = ~DBG_MDSCR_MDE;
if (el == DBG_ACTIVE_EL1 &&
- local_dec_and_test(&__get_cpu_var(kde_ref_count)))
+ this_cpu_dec_return(kde_ref_count) == 0)
disable &= ~DBG_MDSCR_KDE;
if (disable) {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 3881fd115ebb..e1166145ca29 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -311,14 +311,14 @@ el1_irq:
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x0, x24, #1 // increment it
- str x0, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w0, w24, #1 // increment it
+ str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- str x24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz x24, 1f // preempt count != 0
+ str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -509,15 +509,15 @@ el0_irq_naked:
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x23, x24, #1 // increment it
- str x23, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w23, w24, #1 // increment it
+ str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- ldr x0, [tsk, #TI_PREEMPT]
- str x24, [tsk, #TI_PREEMPT]
- cmp x0, x23
+ ldr w0, [tsk, #TI_PREEMPT]
+ str w24, [tsk, #TI_PREEMPT]
+ cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 7090c126797c..7009387348b7 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -123,8 +123,9 @@
ENTRY(stext)
mov x21, x0 // x21=FDT
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
- bl el2_setup // Drop to EL1
+ bl set_cpu_boot_mode_flag
mrs x22, midr_el1 // x22=cpuid
mov x0, x22
bl lookup_processor_type
@@ -150,21 +151,30 @@ ENDPROC(stext)
/*
* If we're fortunate enough to boot at EL2, ensure that the world is
* sane before dropping to EL1.
+ *
+ * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
+ * booted in EL1 or EL2 respectively.
*/
ENTRY(el2_setup)
mrs x0, CurrentEL
cmp x0, #PSR_MODE_EL2t
ccmp x0, #PSR_MODE_EL2h, #0x4, ne
- ldr x0, =__boot_cpu_mode // Compute __boot_cpu_mode
- add x0, x0, x28
- b.eq 1f
- str wzr, [x0] // Remember we don't have EL2...
+ b.ne 1f
+ mrs x0, sctlr_el2
+CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
+CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
+ msr sctlr_el2, x0
+ b 2f
+1: mrs x0, sctlr_el1
+CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
+CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
+ msr sctlr_el1, x0
+ mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
+ isb
ret
/* Hyp configuration. */
-1: ldr w1, =BOOT_CPU_MODE_EL2
- str w1, [x0, #4] // This CPU has EL2
- mov x0, #(1 << 31) // 64-bit EL1
+2: mov x0, #(1 << 31) // 64-bit EL1
msr hcr_el2, x0
/* Generic timers. */
@@ -181,7 +191,8 @@ ENTRY(el2_setup)
/* sctlr_el1 */
mov x0, #0x0800 // Set/clear RES{1,0} bits
- movk x0, #0x30d0, lsl #16
+CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
+CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr sctlr_el1, x0
/* Coprocessor traps. */
@@ -204,10 +215,25 @@ ENTRY(el2_setup)
PSR_MODE_EL1h)
msr spsr_el2, x0
msr elr_el2, lr
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
eret
ENDPROC(el2_setup)
/*
+ * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
+ * in x20. See arch/arm64/include/asm/virt.h for more info.
+ */
+ENTRY(set_cpu_boot_mode_flag)
+ ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
+ add x1, x1, x28
+ cmp w20, #BOOT_CPU_MODE_EL2
+ b.ne 1f
+ add x1, x1, #4
+1: str w20, [x1] // This CPU has booted in EL1
+ ret
+ENDPROC(set_cpu_boot_mode_flag)
+
+/*
* We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable.
*
@@ -225,7 +251,6 @@ ENTRY(__boot_cpu_mode)
.quad PAGE_OFFSET
#ifdef CONFIG_SMP
- .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -235,8 +260,9 @@ ENTRY(__boot_cpu_mode)
* cores are held until we're ready for them to initialise.
*/
ENTRY(secondary_holding_pen)
- bl __calc_phys_offset // x24=phys offset
- bl el2_setup // Drop to EL1
+ bl el2_setup // Drop to EL1, w20=cpu_boot_mode
+ bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
+ bl set_cpu_boot_mode_flag
mrs x0, mpidr_el1
ldr x1, =MPIDR_HWID_BITMASK
and x0, x0, x1
@@ -250,7 +276,16 @@ pen: ldr x4, [x3]
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl __calc_phys_offset // x2=phys offset
+ bl el2_setup // Drop to EL1
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 329218ca9ffb..ff516f6691e4 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
/* Breakpoint */
ctrl_reg = AARCH64_DBG_REG_BCR;
val_reg = AARCH64_DBG_REG_BVR;
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
reg_enable = !debug_info->bps_disabled;
} else {
/* Watchpoint */
ctrl_reg = AARCH64_DBG_REG_WCR;
val_reg = AARCH64_DBG_REG_WVR;
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
reg_enable = !debug_info->wps_disabled;
}
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
/* Breakpoint */
base = AARCH64_DBG_REG_BCR;
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
} else {
/* Watchpoint */
base = AARCH64_DBG_REG_WCR;
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
}
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
switch (reg) {
case AARCH64_DBG_REG_BCR:
- slots = __get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
max_slots = core_num_brps;
break;
case AARCH64_DBG_REG_WCR:
- slots = __get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
max_slots = core_num_wrps;
break;
default:
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
struct debug_info *debug_info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
+ slots = this_cpu_ptr(bp_on_reg);
addr = instruction_pointer(regs);
debug_info = &current->thread.debug;
@@ -596,7 +596,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
struct arch_hw_breakpoint *info;
struct arch_hw_breakpoint_ctrl ctrl;
- slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
+ slots = this_cpu_ptr(wp_on_reg);
debug_info = &current->thread.debug;
for (i = 0; i < core_num_wrps; ++i) {
@@ -698,7 +698,7 @@ unlock:
user_enable_single_step(current);
} else {
toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
if (*kernel_step != ARM_KERNEL_STEP_NONE)
return 0;
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
struct debug_info *debug_info = &current->thread.debug;
int handled_exception = 0, *kernel_step;
- kernel_step = &__get_cpu_var(stepping_kernel_bp);
+ kernel_step = this_cpu_ptr(&stepping_kernel_bp);
/*
* Called from single-step exception handler.
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354292ed..473e5dbf8f39 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@ void __init init_IRQ(void)
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 8b69ecb1d8bc..63c48ffdf230 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -27,6 +27,9 @@
*
* See Documentation/arm/kernel_user_helpers.txt for formal definitions.
*/
+
+#include <asm/unistd32.h>
+
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -35,33 +38,30 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
+ .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1923f9f // 1: ldrex r3, [r2]
+ .inst 0xe1923e9f // 1: ldaex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
@@ -75,3 +75,42 @@ __kuser_helper_version: // 0xffff0ffc
.word ((__kuser_helper_end - __kuser_helper_start) >> 5)
.globl __kuser_helper_end
__kuser_helper_end:
+
+/*
+ * AArch32 sigreturn code
+ *
+ * For ARM syscalls, the syscall number has to be loaded into r7.
+ * We do not support an OABI userspace.
+ *
+ * For Thumb syscalls, we also pass the syscall number via r7. We therefore
+ * need two 16-bit instructions.
+ */
+ .globl __aarch32_sigret_code_start
+__aarch32_sigret_code_start:
+
+ /*
+ * ARM Code
+ */
+ .byte __NR_compat_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_sigreturn, 0x27 // svc #__NR_compat_sigreturn
+ .byte __NR_compat_sigreturn, 0xdf // mov r7, #__NR_compat_sigreturn
+
+ /*
+ * ARM code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x70, 0xa0, 0xe3 // mov r7, #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0x00, 0x00, 0xef // svc #__NR_compat_rt_sigreturn
+
+ /*
+ * Thumb code
+ */
+ .byte __NR_compat_rt_sigreturn, 0x27 // svc #__NR_compat_rt_sigreturn
+ .byte __NR_compat_rt_sigreturn, 0xdf // mov r7, #__NR_compat_rt_sigreturn
+
+ .globl __aarch32_sigret_code_end
+__aarch32_sigret_code_end:
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index ca0e3d55da99..e2ad0d87721f 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -29,7 +29,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}
@@ -111,6 +111,9 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
u32 immlo, immhi, lomask, himask, mask;
int shift;
+ /* The instruction stream is always little endian. */
+ insn = le32_to_cpu(insn);
+
switch (type) {
case INSN_IMM_MOVNZ:
/*
@@ -179,7 +182,7 @@ static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
insn &= ~(mask << shift);
insn |= (imm & mask) << shift;
- return insn;
+ return cpu_to_le32(insn);
}
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cea1594ff933..0e63c98d224c 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -784,8 +784,8 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
-#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
/*
* Event filters for PMUv3
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
*/
regs = get_irq_regs();
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
@@ -1175,7 +1175,8 @@ static void armv8pmu_reset(void *info)
static int armv8_pmuv3_map_event(struct perf_event *event)
{
return map_cpu_event(event, &armv8_pmuv3_perf_map,
- &armv8_pmuv3_perf_cache_map, 0xFF);
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
}
static struct arm_pmu armv8pmu = {
@@ -1257,7 +1258,7 @@ device_initcall(register_pmu_driver);
static struct pmu_hw_events *armpmu_get_cpu_events(void)
{
- return &__get_cpu_var(cpu_hw_events);
+ return this_cpu_ptr(&cpu_hw_events);
}
static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 7ae8a1f00c3c..de17c89985db 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -102,6 +102,13 @@ void arch_cpu_idle(void)
local_irq_enable();
}
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+#endif
+
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 14f73c445ff5..4f97db3d7363 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -17,12 +17,32 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/smp.h>
#include <asm/compiler.h>
+#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
-struct psci_operations psci_ops;
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
@@ -209,3 +229,68 @@ out_put_node:
of_node_put(np);
return err;
}
+
+#ifdef CONFIG_SMP
+
+static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ if (err)
+ pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
+
+ return err;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ int ret;
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ struct psci_power_state state = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ ret = psci_ops.cpu_off(state);
+
+ pr_crit("psci: unable to power off CPU%u (%d)\n", cpu, ret);
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+#endif
+};
+
+#endif
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 055cfb80e05c..0bc5e4cbc017 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -45,6 +45,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -60,6 +61,16 @@ EXPORT_SYMBOL(processor_id);
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
+#ifdef CONFIG_COMPAT
+#define COMPAT_ELF_HWCAP_DEFAULT \
+ (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
+ COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
+ COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
+ COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
+ COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV)
+unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
+#endif
+
static const char *cpu_name;
static const char *machine_name;
phys_addr_t __fdt_pointer __initdata;
@@ -97,6 +108,11 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
@@ -118,76 +134,24 @@ static void __init setup_processor(void)
printk("CPU: %s [%08x] revision %d\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
- sprintf(init_utsname()->machine, "aarch64");
+ sprintf(init_utsname()->machine, ELF_PLATFORM);
elf_hwcap = 0;
}
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
- struct boot_param_header *devtree;
- unsigned long dt_root;
-
- /* Check we have a non-NULL DT pointer */
- if (!dt_phys) {
- early_print("\n"
- "Error: NULL or invalid device tree blob\n"
- "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
- "\nPlease check your bootloader.\n");
-
- while (true)
- cpu_relax();
-
- }
-
- devtree = phys_to_virt(dt_phys);
-
- /* Check device tree validity */
- if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) {
+ if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) {
early_print("\n"
"Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n"
- "Expected 0x%x, found 0x%x\n"
+ "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n"
"\nPlease check your bootloader.\n",
- dt_phys, devtree, OF_DT_HEADER,
- be32_to_cpu(devtree->magic));
+ dt_phys, phys_to_virt(dt_phys));
while (true)
cpu_relax();
}
- initial_boot_params = devtree;
- dt_root = of_get_flat_dt_root();
-
- machine_name = of_get_flat_dt_prop(dt_root, "model", NULL);
- if (!machine_name)
- machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
- if (!machine_name)
- machine_name = "<unknown>";
- pr_info("Machine: %s\n", machine_name);
-
- /* Retrieve various information from the /chosen node */
- of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
- /* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- /* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
-}
-
-void __init early_init_dt_add_memory_arch(u64 base, u64 size)
-{
- base &= PAGE_MASK;
- size &= PAGE_MASK;
- if (base + size < PHYS_OFFSET) {
- pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
- base, base + size);
- return;
- }
- if (base < PHYS_OFFSET) {
- pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
- base, PHYS_OFFSET);
- size -= PHYS_OFFSET - base;
- base = PHYS_OFFSET;
- }
- memblock_add(base, size);
+ machine_name = of_flat_dt_get_machine_name();
}
/*
@@ -264,6 +228,7 @@ void __init setup_arch(char **cmdline_p)
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
#endif
@@ -304,6 +269,7 @@ subsys_initcall(topology_init);
static const char *hwcap_str[] = {
"fp",
"asimd",
+ "evtstrm",
NULL
};
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e393174fe859..b3fc9f5ec6d3 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -100,34 +100,6 @@ struct compat_rt_sigframe {
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-/*
- * For ARM syscalls, the syscall number has to be loaded into r7.
- * We do not support an OABI userspace.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_compat_sigreturn)
-#define SVC_SYS_SIGRETURN (0xef000000 | __NR_compat_sigreturn)
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_compat_rt_sigreturn)
-#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_compat_rt_sigreturn)
-
-/*
- * For Thumb syscalls, we also pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_compat_sigreturn) << 16) | \
- 0x2700 | __NR_compat_sigreturn)
-#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_compat_rt_sigreturn) << 16) | \
- 0x2700 | __NR_compat_rt_sigreturn)
-
-const compat_ulong_t aarch32_sigret_code[6] = {
- /*
- * AArch32 sigreturn code.
- * We don't construct an OABI SWI - instead we just set the imm24 field
- * to the EABI syscall number so that we create a sane disassembly.
- */
- MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN,
-};
-
static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
{
compat_sigset_t cset;
@@ -150,7 +122,7 @@ static inline int get_sigset_t(sigset_t *set,
return 0;
}
-int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
+int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
{
int err;
@@ -474,12 +446,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb) {
+ if (thumb)
spsr |= COMPAT_PSR_T_BIT;
- spsr &= ~COMPAT_PSR_IT_MASK;
- } else {
+ else
spsr &= ~COMPAT_PSR_T_BIT;
- }
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~COMPAT_PSR_IT_MASK;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 78db90dcc910..a5aeefab03c3 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -54,7 +55,6 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -63,61 +63,16 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -187,17 +142,13 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
/*
- * Synchronise with the boot thread.
+ * Enable GIC and timers.
*/
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ notify_cpu_starting(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -207,11 +158,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Enable GIC and timers.
- */
- notify_cpu_starting(cpu);
-
local_irq_enable();
local_fiq_enable();
@@ -221,39 +167,113 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE);
}
-void __init smp_cpus_done(unsigned int max_cpus)
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
{
- pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
+
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
}
-void __init smp_prepare_boot_cpu(void)
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
{
-}
+ unsigned int cpu = smp_processor_id();
+ int ret;
-static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
-{
- const struct smp_enable_ops **ops = enable_ops;
+ return 0;
+}
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
+static DECLARE_COMPLETION(cpu_died);
- ops++;
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
}
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
- return NULL;
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
+
+ BUG();
+}
+#endif
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+ pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
}
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+static void (*smp_cross_call)(const struct cpumask *, unsigned int);
+
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -261,9 +281,8 @@ static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -332,25 +351,10 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
-
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
- goto next;
- }
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -380,8 +384,8 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
/*
* are we trying to boot more cores than exist?
@@ -408,10 +412,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -451,7 +455,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
deleted file mode 100644
index 0c533301be77..000000000000
--- a/arch/arm64/kernel/smp_psci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * PSCI SMP initialisation
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/smp.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
-{
- return 0;
-}
-
-static int __init smp_psci_prepare_cpu(int cpu)
-{
- int err;
-
- if (!psci_ops.cpu_on) {
- pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
- if (err) {
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
- return err;
- }
-
- return 0;
-}
-
-const struct smp_enable_ops smp_psci_ops __initconst = {
- .name = "psci",
- .init_cpu = smp_psci_init_cpu,
- .prepare_cpu = smp_psci_prepare_cpu,
-};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa682f76..44c22805d2e2 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,15 +16,39 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
+static DEFINE_RAW_SPINLOCK(boot_lock);
+
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -40,7 +64,7 @@ static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
return 0;
}
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
@@ -48,7 +72,16 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -59,8 +92,60 @@ static int __init smp_spin_table_prepare_cpu(int cpu)
return 0;
}
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ unsigned long timeout;
+
+ /*
+ * Set synchronisation state between this boot processor
+ * and the secondary one
+ */
+ raw_spin_lock(&boot_lock);
+
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ timeout = jiffies + (1 * HZ);
+ while (time_before(jiffies, timeout)) {
+ if (secondary_holding_pen_release == INVALID_HWID)
+ break;
+ udelay(10);
+ }
+
+ /*
+ * Now the secondary core is starting up let it run its
+ * calibrations, then wait for it to finish
+ */
+ raw_spin_unlock(&boot_lock);
+
+ return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+}
+
+void smp_spin_table_cpu_postboot(void)
+{
+ /*
+ * Let the primary processor know we're out of the pen.
+ */
+ write_pen_release(INVALID_HWID);
+
+ /*
+ * Synchronise with the boot thread.
+ */
+ raw_spin_lock(&boot_lock);
+ raw_spin_unlock(&boot_lock);
+}
+
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
- .init_cpu = smp_spin_table_init_cpu,
- .prepare_cpu = smp_spin_table_prepare_cpu,
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
+ .cpu_postboot = smp_spin_table_cpu_postboot,
};
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index a1b19ed7467c..423a5b3fc2be 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -59,48 +59,48 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* extension.
*/
compat_sys_pread64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pread64
ENDPROC(compat_sys_pread64_wrapper)
compat_sys_pwrite64_wrapper:
- orr x3, x4, x5, lsl #32
+ regs_to_64 x3, x4, x5
b sys_pwrite64
ENDPROC(compat_sys_pwrite64_wrapper)
compat_sys_truncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_truncate
ENDPROC(compat_sys_truncate64_wrapper)
compat_sys_ftruncate64_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
b sys_ftruncate
ENDPROC(compat_sys_ftruncate64_wrapper)
compat_sys_readahead_wrapper:
- orr x1, x2, x3, lsl #32
+ regs_to_64 x1, x2, x3
mov w2, w4
b sys_readahead
ENDPROC(compat_sys_readahead_wrapper)
compat_sys_fadvise64_64_wrapper:
mov w6, w1
- orr x1, x2, x3, lsl #32
- orr x2, x4, x5, lsl #32
+ regs_to_64 x1, x2, x3
+ regs_to_64 x2, x4, x5
mov w3, w6
b sys_fadvise64_64
ENDPROC(compat_sys_fadvise64_64_wrapper)
compat_sys_sync_file_range2_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_sync_file_range2
ENDPROC(compat_sys_sync_file_range2_wrapper)
compat_sys_fallocate_wrapper:
- orr x2, x2, x3, lsl #32
- orr x3, x4, x5, lsl #32
+ regs_to_64 x2, x2, x3
+ regs_to_64 x3, x4, x5
b sys_fallocate
ENDPROC(compat_sys_fallocate_wrapper)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 03dc3718eb13..29c39d5d77e3 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -61,13 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-static u64 sched_clock_mult __read_mostly;
-
-unsigned long long notrace sched_clock(void)
-{
- return arch_timer_read_counter() * sched_clock_mult;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
@@ -78,9 +71,6 @@ void __init time_init(void)
if (!arch_timer_rate)
panic("Unable to initialise architected timer.\n");
- /* Cache the sched_clock multiplier to save a divide in the hot path. */
- sched_clock_mult = NSEC_PER_SEC / arch_timer_rate;
-
/* Calibrate the delay loop directly */
lpj_fine = arch_timer_rate / HZ;
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 6a389dc1bd49..65d40cf6945a 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -58,7 +58,10 @@ static struct page *vectors_page[1];
static int alloc_vectors_page(void)
{
extern char __kuser_helper_start[], __kuser_helper_end[];
+ extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
+
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long vpage;
vpage = get_zeroed_page(GFP_ATOMIC);
@@ -72,7 +75,7 @@ static int alloc_vectors_page(void)
/* sigreturn code */
memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET,
- aarch32_sigret_code, sizeof(aarch32_sigret_code));
+ __aarch32_sigret_code_start, sigret_sz);
flush_icache_range(vpage, vpage + PAGE_SIZE);
vectors_page[0] = virt_to_page(vpage);
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index f8ab9d8e2ea3..5161ad992091 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -54,7 +54,6 @@ SECTIONS
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -97,30 +96,13 @@ SECTIONS
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
-
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
+
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ __data_loc = _data - LOAD_OFFSET;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
_edata_loc = __data_loc + SIZEOF(.data);
BSS_SECTION(0, 0, 0)
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index ba84e6705e20..2b0244d65c16 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -74,7 +74,10 @@ __do_hyp_init:
msr mair_el2, x4
isb
- mov x4, #SCTLR_EL2_FLAGS
+ mrs x4, sctlr_el2
+ and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2
+ ldr x5, =SCTLR_EL2_FLAGS
+ orr x4, x4, x5
msr sctlr_el2, x4
isb
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1ac0bbbdddb2..3b47c36e10ff 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -403,6 +403,14 @@ __kvm_hyp_code_start:
ldr w9, [x2, #GICH_ELRSR0]
ldr w10, [x2, #GICH_ELRSR1]
ldr w11, [x2, #GICH_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
+CPU_BE( rev w7, w7 )
+CPU_BE( rev w8, w8 )
+CPU_BE( rev w9, w9 )
+CPU_BE( rev w10, w10 )
+CPU_BE( rev w11, w11 )
str w4, [x3, #VGIC_CPU_HCR]
str w5, [x3, #VGIC_CPU_VMCR]
@@ -421,6 +429,7 @@ __kvm_hyp_code_start:
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x2], #4
+CPU_BE( rev w5, w5 )
str w5, [x3], #4
sub w4, w4, #1
cbnz w4, 1b
@@ -446,6 +455,9 @@ __kvm_hyp_code_start:
ldr w4, [x3, #VGIC_CPU_HCR]
ldr w5, [x3, #VGIC_CPU_VMCR]
ldr w6, [x3, #VGIC_CPU_APR]
+CPU_BE( rev w4, w4 )
+CPU_BE( rev w5, w5 )
+CPU_BE( rev w6, w6 )
str w4, [x2, #GICH_HCR]
str w5, [x2, #GICH_VMCR]
@@ -456,6 +468,7 @@ __kvm_hyp_code_start:
ldr w4, [x3, #VGIC_CPU_NR_LR]
add x3, x3, #VGIC_CPU_LR
1: ldr w5, [x3], #4
+CPU_BE( rev w5, w5 )
str w5, [x2], #4
sub w4, w4, #1
cbnz w4, 1b
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index de2de5db628d..0cb8742de4f2 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -31,7 +31,6 @@
#include <linux/sort.h>
#include <linux/of_fdt.h>
-#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
@@ -39,17 +38,9 @@
#include "mm.h"
-static unsigned long phys_initrd_start __initdata = 0;
-static unsigned long phys_initrd_size __initdata = 0;
-
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
-{
- phys_initrd_start = start;
- phys_initrd_size = end - start;
-}
-
+#ifdef CONFIG_BLK_DEV_INITRD
static int __init early_initrd(char *p)
{
unsigned long start, size;
@@ -59,12 +50,13 @@ static int __init early_initrd(char *p)
if (*endp == ',') {
size = memparse(endp + 1, NULL);
- phys_initrd_start = start;
- phys_initrd_size = size;
+ initrd_start = (unsigned long)__va(start);
+ initrd_end = (unsigned long)__va(start + size);
}
return 0;
}
early_param("initrd", early_initrd);
+#endif
#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
@@ -137,13 +129,8 @@ void __init arm64_memblock_init(void)
/* Register the kernel text, kernel data and initrd with memblock */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
- if (phys_initrd_size) {
- memblock_reserve(phys_initrd_start, phys_initrd_size);
-
- /* Now convert initrd to virtual addresses */
- initrd_start = __phys_to_virt(phys_initrd_start);
- initrd_end = initrd_start + phys_initrd_size;
- }
+ if (initrd_start)
+ memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
/*
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 1725cd6db37a..2bb1d586664c 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -77,8 +77,24 @@ EXPORT_SYMBOL(__ioremap);
void __iounmap(volatile void __iomem *io_addr)
{
- void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+ unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
- vunmap(addr);
+ /*
+ * We could get an address outside vmalloc range in case
+ * of ioremap_cache() reusing a RAM mapping.
+ */
+ if (VMALLOC_START <= addr && addr < VMALLOC_END)
+ vunmap((void *)addr);
}
EXPORT_SYMBOL(__iounmap);
+
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b1b31bbc967b..421b99fd635d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -162,9 +162,9 @@ ENDPROC(__cpu_setup)
* CE0 XWHW CZ ME TEEA S
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
- * .... .100 .... 01.1 11.1 ..01 0001 1101 < software settings
+ * .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
*/
.type crval, #object
crval:
- .word 0x030802e2 // clear
+ .word 0x000802e2 // clear
.word 0x0405d11d // set
diff --git a/arch/arm64/xen/Makefile b/arch/arm64/xen/Makefile
index be240404ba96..74a8d87e542b 100644
--- a/arch/arm64/xen/Makefile
+++ b/arch/arm64/xen/Makefile
@@ -1,2 +1,2 @@
-xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o)
+xen-arm-y += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
obj-y := xen-arm.o hypercall.o