From 57b9da32addd819b0baef5573a88fcfaf3e6699b Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:22 +0100 Subject: ARM: 7508/1: opcodes: Don't define the thumb32 byteswapping macros for BE32 The existing __mem_to_opcode_thumb32() is incorrect for BE32 platforms. However, these don't support Thumb-2 kernels, so this option is not so relevant for those platforms anyway. This operation is complicated by the lack of unaligned memory access support prior to ARMv6. Rather than provide a "working" macro which will probably won't get used (or worse, will get misused), this patch removes the macro for BE32 kernels. People manipulating Thumb opcodes prior to ARMv6 should almost certainly be splitting these operations into halfwords anyway, using __opcode_thumb32_{first,second,compose}() and the 16-bit opcode transformations. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 19c48deda70f..6bf54f9411a6 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -49,18 +49,31 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #include #ifdef CONFIG_CPU_ENDIAN_BE8 + #define __opcode_to_mem_arm(x) swab32(x) #define __opcode_to_mem_thumb16(x) swab16(x) #define __opcode_to_mem_thumb32(x) swahb32(x) -#else + +#else /* ! CONFIG_CPU_ENDIAN_BE8 */ + #define __opcode_to_mem_arm(x) ((u32)(x)) #define __opcode_to_mem_thumb16(x) ((u16)(x)) +#ifndef CONFIG_CPU_ENDIAN_BE32 +/* + * On BE32 systems, using 32-bit accesses to store Thumb instructions will not + * work in all cases, due to alignment constraints. For now, a correct + * version is not provided for BE32. + */ #define __opcode_to_mem_thumb32(x) swahw32(x) #endif +#endif /* ! CONFIG_CPU_ENDIAN_BE8 */ + #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) +#ifndef CONFIG_CPU_ENDIAN_BE32 #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) +#endif /* Operations specific to Thumb opcodes */ -- cgit v1.2.3 From 0ce3de23f2a520a6ac8c2179fb8f88342c4992ef Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:23 +0100 Subject: ARM: 7509/1: opcodes: Make opcode byteswapping macros assembly-compatible Most of the existing macros don't work with assembler, due to the use of type casts and C functions from . This patch abstracts out those operations and provides simple explicit versions for use in assembly code. __opcode_is_thumb32() and __opcode_is_thumb16() are also converted to do bitmask-based testing to avoid confusion if these are used in assembly code (the assembler typically treats all arithmetic values as signed). These changes avoid the need for the compiler to pre-evaluate constant expressions used to generate opcodes. By ensuring that the forms of these expressions can be evaluated directly by the assembler, we can just stringify the expressions directly into the asm during the preprocessing pass. The alternative approach (passing the evaluated expression via an inline asm "i" constraint) gets painful because the contents of the asm and the constraints must be kept in sync. This makes the resulting macros awkward to use. Retaining the C forms of the macros allows more efficient code to be generated when opcodes are generated programmatically at run- time, but there is no way to embed run-time-generated opcodes in asm() blocks. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 97 +++++++++++++++++++++++++++++++++++------- 1 file changed, 82 insertions(+), 15 deletions(-) diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 6bf54f9411a6..f57e417cab95 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -18,6 +18,33 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); #define ARM_OPCODE_CONDTEST_UNCOND 2 +/* + * Assembler opcode byteswap helpers. + * These are only intended for use by this header: don't use them directly, + * because they will be suboptimal in most cases. + */ +#define ___asm_opcode_swab32(x) ( \ + (((x) << 24) & 0xFF000000) \ + | (((x) << 8) & 0x00FF0000) \ + | (((x) >> 8) & 0x0000FF00) \ + | (((x) >> 24) & 0x000000FF) \ +) +#define ___asm_opcode_swab16(x) ( \ + (((x) << 8) & 0xFF00) \ + | (((x) >> 8) & 0x00FF) \ +) +#define ___asm_opcode_swahb32(x) ( \ + (((x) << 8) & 0xFF00FF00) \ + | (((x) >> 8) & 0x00FF00FF) \ +) +#define ___asm_opcode_swahw32(x) ( \ + (((x) << 16) & 0xFFFF0000) \ + | (((x) >> 16) & 0x0000FFFF) \ +) +#define ___asm_opcode_identity32(x) ((x) & 0xFFFFFFFF) +#define ___asm_opcode_identity16(x) ((x) & 0xFFFF) + + /* * Opcode byteswap helpers * @@ -41,30 +68,58 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not * represent any valid Thumb-2 instruction. For this range, * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. + * + * The ___asm variants are intended only for use by this header, in situations + * involving inline assembler. For .S files, the normal __opcode_*() macros + * should do the right thing. */ +#ifdef __ASSEMBLY__ -#ifndef __ASSEMBLY__ +#define ___opcode_swab32(x) ___asm_opcode_swab32(x) +#define ___opcode_swab16(x) ___asm_opcode_swab16(x) +#define ___opcode_swahb32(x) ___asm_opcode_swahb32(x) +#define ___opcode_swahw32(x) ___asm_opcode_swahw32(x) +#define ___opcode_identity32(x) ___asm_opcode_identity32(x) +#define ___opcode_identity16(x) ___asm_opcode_identity16(x) + +#else /* ! __ASSEMBLY__ */ #include #include +#define ___opcode_swab32(x) swab32(x) +#define ___opcode_swab16(x) swab16(x) +#define ___opcode_swahb32(x) swahb32(x) +#define ___opcode_swahw32(x) swahw32(x) +#define ___opcode_identity32(x) ((u32)(x)) +#define ___opcode_identity16(x) ((u16)(x)) + +#endif /* ! __ASSEMBLY__ */ + + #ifdef CONFIG_CPU_ENDIAN_BE8 -#define __opcode_to_mem_arm(x) swab32(x) -#define __opcode_to_mem_thumb16(x) swab16(x) -#define __opcode_to_mem_thumb32(x) swahb32(x) +#define __opcode_to_mem_arm(x) ___opcode_swab32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_swab16(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahb32(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_swab32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_swab16(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahb32(x) #else /* ! CONFIG_CPU_ENDIAN_BE8 */ -#define __opcode_to_mem_arm(x) ((u32)(x)) -#define __opcode_to_mem_thumb16(x) ((u16)(x)) +#define __opcode_to_mem_arm(x) ___opcode_identity32(x) +#define __opcode_to_mem_thumb16(x) ___opcode_identity16(x) +#define ___asm_opcode_to_mem_arm(x) ___asm_opcode_identity32(x) +#define ___asm_opcode_to_mem_thumb16(x) ___asm_opcode_identity16(x) #ifndef CONFIG_CPU_ENDIAN_BE32 /* * On BE32 systems, using 32-bit accesses to store Thumb instructions will not * work in all cases, due to alignment constraints. For now, a correct * version is not provided for BE32. */ -#define __opcode_to_mem_thumb32(x) swahw32(x) +#define __opcode_to_mem_thumb32(x) ___opcode_swahw32(x) +#define ___asm_opcode_to_mem_thumb32(x) ___asm_opcode_swahw32(x) #endif #endif /* ! CONFIG_CPU_ENDIAN_BE8 */ @@ -78,15 +133,27 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); /* Operations specific to Thumb opcodes */ /* Instruction size checks: */ -#define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) -#define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) +#define __opcode_is_thumb32(x) ( \ + ((x) & 0xF8000000) == 0xE8000000 \ + || ((x) & 0xF0000000) == 0xF0000000 \ +) +#define __opcode_is_thumb16(x) ( \ + ((x) & 0xFFFF0000) == 0 \ + && !(((x) & 0xF800) == 0xE800 || ((x) & 0xF000) == 0xF000) \ +) /* Operations to construct or split 32-bit Thumb instructions: */ -#define __opcode_thumb32_first(x) ((u16)((x) >> 16)) -#define __opcode_thumb32_second(x) ((u16)(x)) -#define __opcode_thumb32_compose(first, second) \ - (((u32)(u16)(first) << 16) | (u32)(u16)(second)) - -#endif /* __ASSEMBLY__ */ +#define __opcode_thumb32_first(x) (___opcode_identity16((x) >> 16)) +#define __opcode_thumb32_second(x) (___opcode_identity16(x)) +#define __opcode_thumb32_compose(first, second) ( \ + (___opcode_identity32(___opcode_identity16(first)) << 16) \ + | ___opcode_identity32(___opcode_identity16(second)) \ +) +#define ___asm_opcode_thumb32_first(x) (___asm_opcode_identity16((x) >> 16)) +#define ___asm_opcode_thumb32_second(x) (___asm_opcode_identity16(x)) +#define ___asm_opcode_thumb32_compose(first, second) ( \ + (___asm_opcode_identity32(___asm_opcode_identity16(first)) << 16) \ + | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ +) #endif /* __ASM_ARM_OPCODES_H */ -- cgit v1.2.3 From a61a41a01822d48bbe53e6c1df56b88ee2f3de34 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:24 +0100 Subject: ARM: 7510/1: opcodes: Add helpers for emitting custom opcodes This patch adds some __inst_() macros for injecting custom opcodes in assembler (both inline and in .S files). They should make it easier and cleaner to get things right in little-/big- endian/ARM/Thumb-2 kernels without a lot of #ifdefs. This pure-preprocessor approach is preferred over the alternative method of wedging extra assembler directives into the assembler input using top-level asm() blocks, since there is no way to guarantee that the compiler won't reorder those with respect to each other or with respect to non-toplevel asm() blocks, unless -fno-toplevel-reorder is passed (which is in itself somewhat undesirable because it defeats some potential optimisations). Currently _does_ silently rely on the compiler not reordering at the top level, but it seems better to avoid adding extra code which depends on this if the same result can be achieved in another way. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes.h | 69 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index f57e417cab95..f7937e1c46bf 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -156,4 +156,73 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); | ___asm_opcode_identity32(___asm_opcode_identity16(second)) \ ) +/* + * Opcode injection helpers + * + * In rare cases it is necessary to assemble an opcode which the + * assembler does not support directly, or which would normally be + * rejected because of the CFLAGS or AFLAGS used to build the affected + * file. + * + * Before using these macros, consider carefully whether it is feasible + * instead to change the build flags for your file, or whether it really + * makes sense to support old assembler versions when building that + * particular kernel feature. + * + * The macros defined here should only be used where there is no viable + * alternative. + * + * + * __inst_arm(x): emit the specified ARM opcode + * __inst_thumb16(x): emit the specified 16-bit Thumb opcode + * __inst_thumb32(x): emit the specified 32-bit Thumb opcode + * + * __inst_arm_thumb16(arm, thumb): emit either the specified arm or + * 16-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * __inst_arm_thumb32(arm, thumb): emit either the specified arm or + * 32-bit Thumb opcode, depending on whether an ARM or Thumb-2 + * kernel is being built + * + * + * Note that using these macros directly is poor practice. Instead, you + * should use them to define human-readable wrapper macros to encode the + * instructions that you care about. In code which might run on ARMv7 or + * above, you can usually use the __inst_arm_thumb{16,32} macros to + * specify the ARM and Thumb alternatives at the same time. This ensures + * that the correct opcode gets emitted depending on the instruction set + * used for the kernel build. + */ +#include + +#define __inst_arm(x) ___inst_arm(___asm_opcode_to_mem_arm(x)) +#define __inst_thumb32(x) ___inst_thumb32( \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_first(x)), \ + ___asm_opcode_to_mem_thumb16(___asm_opcode_thumb32_second(x)) \ +) +#define __inst_thumb16(x) ___inst_thumb16(___asm_opcode_to_mem_thumb16(x)) + +#ifdef CONFIG_THUMB2_KERNEL +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) \ + __inst_thumb16(thumb_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) \ + __inst_thumb32(thumb_opcode) +#else +#define __inst_arm_thumb16(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#define __inst_arm_thumb32(arm_opcode, thumb_opcode) __inst_arm(arm_opcode) +#endif + +/* Helpers for the helpers. Don't use these directly. */ +#ifdef __ASSEMBLY__ +#define ___inst_arm(x) .long x +#define ___inst_thumb16(x) .short x +#define ___inst_thumb32(first, second) .short first, second +#else +#define ___inst_arm(x) ".long " __stringify(x) "\n\t" +#define ___inst_thumb16(x) ".short " __stringify(x) "\n\t" +#define ___inst_thumb32(first, second) \ + ".short " __stringify(first) ", " __stringify(second) "\n\t" +#endif + #endif /* __ASM_ARM_OPCODES_H */ -- cgit v1.2.3 From 508514ed25315dd28340000831b17535d6f772da Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Mon, 3 Sep 2012 13:49:25 +0100 Subject: ARM: 7511/1: opcodes: Opcode definitions for the Virtualization Extensions For now, this patch just adds a definition for the HVC instruction. More can be added here later, as needed. Now that we have a real example of how to use the opcode injection macros properly, this patch also adds a cross-reference from the explanation in opcodes.h (since without an example, figuring out how to use the macros is not that easy). Signed-off-by: Dave Martin Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/opcodes-virt.h | 29 +++++++++++++++++++++++++++++ arch/arm/include/asm/opcodes.h | 2 ++ 2 files changed, 31 insertions(+) create mode 100644 arch/arm/include/asm/opcodes-virt.h diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h new file mode 100644 index 000000000000..b85665a96f8e --- /dev/null +++ b/arch/arm/include/asm/opcodes-virt.h @@ -0,0 +1,29 @@ +/* + * opcodes-virt.h: Opcode definitions for the ARM virtualization extensions + * Copyright (C) 2012 Linaro Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ +#ifndef __ASM_ARM_OPCODES_VIRT_H +#define __ASM_ARM_OPCODES_VIRT_H + +#include + +#define __HVC(imm16) __inst_arm_thumb32( \ + 0xE1400070 | (((imm16) & 0xFFF0) << 4) | ((imm16) & 0x000F), \ + 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ +) + +#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index f7937e1c46bf..74e211a6fb24 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -193,6 +193,8 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); * specify the ARM and Thumb alternatives at the same time. This ensures * that the correct opcode gets emitted depending on the instruction set * used for the kernel build. + * + * Look at opcodes-virt.h for an example of how to use these macros. */ #include -- cgit v1.2.3 From b9a348cb12f3925a27fcf0a38a146b40978588d0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 9 Aug 2012 10:43:29 +0100 Subject: ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding Enabling boot from HYP mode requires the use of some more virt-specific instructions ("eret" and "msr elr_hyp, reg"). Add the necessary encoding to asm/opcode-virt.h. Signed-off-by: Marc Zyngier --- arch/arm/include/asm/opcodes-virt.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h index b85665a96f8e..efcfdf92d9d5 100644 --- a/arch/arm/include/asm/opcodes-virt.h +++ b/arch/arm/include/asm/opcodes-virt.h @@ -26,4 +26,14 @@ 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ ) +#define __ERET __inst_arm_thumb32( \ + 0xE160006E, \ + 0xF3DE8F00 \ +) + +#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \ + 0xE12EF300 | regnum, \ + 0xF3808E30 | (regnum << 16) \ +) + #endif /* ! __ASM_ARM_OPCODES_VIRT_H */ -- cgit v1.2.3 From 80c59dafb1a9a86fa996e6e34d06b60567c925ca Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 9 Feb 2012 08:47:17 -0800 Subject: ARM: virt: allow the kernel to be entered in HYP mode This patch does two things: * Ensure that asynchronous aborts are masked at kernel entry. The bootloader should be masking these anyway, but this reduces the damage window just in case it doesn't. * Enter svc mode via exception return to ensure that CPU state is properly serialised. This does not matter when switching from an ordinary privileged mode ("PL1" modes in ARMv7-AR rev C parlance), but it potentially does matter when switching from a another privileged mode such as hyp mode. This should allow the kernel to boot safely either from svc mode or hyp mode, even if no support for use of the ARM Virtualization Extensions is built into the kernel. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/include/asm/assembler.h | 28 ++++++ arch/arm/include/asm/ptrace.h | 1 + arch/arm/include/asm/virt.h | 52 +++++++++++ arch/arm/kernel/Makefile | 2 + arch/arm/kernel/head.S | 14 ++- arch/arm/kernel/hyp-stub.S | 192 +++++++++++++++++++++++++++++++++++++++ 6 files changed, 286 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/virt.h create mode 100644 arch/arm/kernel/hyp-stub.S diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 03fb93621d0d..658a15dbc87e 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -22,6 +22,7 @@ #include #include +#include #define IOMEM(x) (x) @@ -239,6 +240,33 @@ .endm #endif +/* + * Helper macro to enter SVC mode cleanly and mask interrupts. reg is + * a scratch register for the macro to overwrite. + * + * This macro is intended for forcing the CPU into SVC mode at boot time. + * you cannot return to the original mode. + * + * Beware, it also clobers LR. + */ +.macro safe_svcmode_maskall reg:req + mrs \reg , cpsr + mov lr , \reg + and lr , lr , #MODE_MASK + cmp lr , #HYP_MODE + orr \reg , \reg , #PSR_A_BIT | PSR_I_BIT | PSR_F_BIT + bic \reg , \reg , #MODE_MASK + orr \reg , \reg , #SVC_MODE +THUMB( orr \reg , \reg , #PSR_T_BIT ) + msr spsr_cxsf, \reg + adr lr, BSYM(2f) + bne 1f + __MSR_ELR_HYP(14) + __ERET +1: movs pc, lr +2: +.endm + /* * STRT/LDRT access macros with ARM and Thumb-2 variants */ diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 355ece523f41..91ef6c231c47 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -44,6 +44,7 @@ #define IRQ_MODE 0x00000012 #define SVC_MODE 0x00000013 #define ABT_MODE 0x00000017 +#define HYP_MODE 0x0000001a #define UND_MODE 0x0000001b #define SYSTEM_MODE 0x0000001f #define MODE32_BIT 0x00000010 diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h new file mode 100644 index 000000000000..0a99723edbf2 --- /dev/null +++ b/arch/arm/include/asm/virt.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#ifndef VIRT_H +#define VIRT_H + +#include + +/* + * Flag indicating that the kernel was not entered in the same mode on every + * CPU. The zImage loader stashes this value in an SPSR, so we need an + * architecturally defined flag bit here (the N flag, as it happens) + */ +#define BOOT_CPU_MODE_MISMATCH (1<<31) + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_ARM_VIRT_EXT +/* + * __boot_cpu_mode records what mode the primary CPU was booted in. + * A correctly-implemented bootloader must start all CPUs in the same mode: + * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate + * that some CPU(s) were booted in a different mode. + * + * This allows the kernel to flag an error when the secondaries have come up. + */ +extern int __boot_cpu_mode; + +void __hyp_set_vectors(unsigned long phys_vector_base); +unsigned long __hyp_get_vectors(void); +#else +#define __boot_cpu_mode (SVC_MODE) +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* ! VIRT_H */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 7ad2d5cf7008..49b61a3f5b29 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -82,4 +82,6 @@ head-y := head$(MMUEXT).o obj-$(CONFIG_DEBUG_LL) += debug.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o + extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 3db960e20cb8..27093e4feef8 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -83,8 +83,12 @@ ENTRY(stext) THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) - setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode - @ and irqs disabled +#ifdef CONFIG_ARM_VIRT_EXT + bl __hyp_stub_install +#endif + @ ensure svc mode and all interrupts masked + safe_svcmode_maskall r9 + mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? @@ -326,7 +330,11 @@ ENTRY(secondary_startup) * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ - setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 +#ifdef CONFIG_ARM_VIRT_EXT + bl __hyp_stub_install +#endif + safe_svcmode_maskall r9 + mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S new file mode 100644 index 000000000000..b03e9244e5ad --- /dev/null +++ b/arch/arm/kernel/hyp-stub.S @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2012 Linaro Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include +#include +#include +#include + +/* + * For the kernel proper, we need to find out the CPU boot mode long after + * boot, so we need to store it in a writable variable. + * + * This is not in .bss, because we set it sufficiently early that the boot-time + * zeroing of .bss would clobber it. + */ +.data +ENTRY(__boot_cpu_mode) + .long 0 +.text + + /* + * Save the primary CPU boot mode. Requires 3 scratch registers. + */ + .macro store_primary_cpu_mode reg1, reg2, reg3 + mrs \reg1, cpsr + and \reg1, \reg1, #MODE_MASK + adr \reg2, .L__boot_cpu_mode_offset + ldr \reg3, [\reg2] + str \reg1, [\reg2, \reg3] + .endm + + /* + * Compare the current mode with the one saved on the primary CPU. + * If they don't match, record that fact. The Z bit indicates + * if there's a match or not. + * Requires 3 additionnal scratch registers. + */ + .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3 + adr \reg2, .L__boot_cpu_mode_offset + ldr \reg3, [\reg2] + ldr \reg1, [\reg2, \reg3] + cmp \mode, \reg1 @ matches primary CPU boot mode? + orrne r7, r7, #BOOT_CPU_MODE_MISMATCH + strne r7, [r5, r6] @ record what happened and give up + .endm + +/* + * Hypervisor stub installation functions. + * + * These must be called with the MMU and D-cache off. + * They are not ABI compliant and are only intended to be called from the kernel + * entry points in head.S. + */ +@ Call this from the primary CPU +ENTRY(__hyp_stub_install) + store_primary_cpu_mode r4, r5, r6 +ENDPROC(__hyp_stub_install) + + @ fall through... + +@ Secondary CPUs should call here +ENTRY(__hyp_stub_install_secondary) + mrs r4, cpsr + and r4, r4, #MODE_MASK + + /* + * If the secondary has booted with a different mode, give up + * immediately. + */ + compare_cpu_mode_with_primary r4, r5, r6, r7 + bxne lr + + /* + * Once we have given up on one CPU, we do not try to install the + * stub hypervisor on the remaining ones: because the saved boot mode + * is modified, it can't compare equal to the CPSR mode field any + * more. + * + * Otherwise... + */ + + cmp r4, #HYP_MODE + bxne lr @ give up if the CPU is not in HYP mode + +/* + * Configure HSCTLR to set correct exception endianness/instruction set + * state etc. + * Turn off all traps + * Eventually, CPU-specific code might be needed -- assume not for now + * + * This code relies on the "eret" instruction to synchronize the + * various coprocessor accesses. + */ + @ Now install the hypervisor stub: + adr r7, __hyp_stub_vectors + mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR) + + @ Disable all traps, so we don't get any nasty surprise + mov r7, #0 + mcr p15, 4, r7, c1, c1, 0 @ HCR + mcr p15, 4, r7, c1, c1, 2 @ HCPTR + mcr p15, 4, r7, c1, c1, 3 @ HSTR + +THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE +#ifdef CONFIG_CPU_BIG_ENDIAN + orr r7, #(1 << 9) @ HSCTLR.EE +#endif + mcr p15, 4, r7, c1, c0, 0 @ HSCTLR + + mrc p15, 4, r7, c1, c1, 1 @ HDCR + and r7, #0x1f @ Preserve HPMN + mcr p15, 4, r7, c1, c1, 1 @ HDCR + + bic r7, r4, #MODE_MASK + orr r7, r7, #SVC_MODE +THUMB( orr r7, r7, #PSR_T_BIT ) + msr spsr_cxsf, r7 @ This is SPSR_hyp. + + __MSR_ELR_HYP(14) @ msr elr_hyp, lr + __ERET @ return, switching to SVC mode + @ The boot CPU mode is left in r4. +ENDPROC(__hyp_stub_install_secondary) + +__hyp_stub_do_trap: + cmp r0, #-1 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR + mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR + __ERET +ENDPROC(__hyp_stub_do_trap) + +/* + * __hyp_set_vectors: Call this after boot to set the initial hypervisor + * vectors as part of hypervisor installation. On an SMP system, this should + * be called on each CPU. + * + * r0 must be the physical address of the new vector table (which must lie in + * the bottom 4GB of physical address space. + * + * r0 must be 32-byte aligned. + * + * Before calling this, you must check that the stub hypervisor is installed + * everywhere, by waiting for any secondary CPUs to be brought up and then + * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true. + * + * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or + * something else went wrong... in such cases, trying to install a new + * hypervisor is unlikely to work as desired. + * + * When you call into your shiny new hypervisor, sp_hyp will contain junk, + * so you will need to set that to something sensible at the new hypervisor's + * initialisation entry point. + */ +ENTRY(__hyp_get_vectors) + mov r0, #-1 +ENDPROC(__hyp_get_vectors) + @ fall through +ENTRY(__hyp_set_vectors) + __HVC(0) + bx lr +ENDPROC(__hyp_set_vectors) + +.align 2 +.L__boot_cpu_mode_offset: + .long __boot_cpu_mode - . + +.align 5 +__hyp_stub_vectors: +__hyp_stub_reset: W(b) . +__hyp_stub_und: W(b) . +__hyp_stub_svc: W(b) . +__hyp_stub_pabort: W(b) . +__hyp_stub_dabort: W(b) . +__hyp_stub_trap: W(b) __hyp_stub_do_trap +__hyp_stub_irq: W(b) . +__hyp_stub_fiq: W(b) . +ENDPROC(__hyp_stub_vectors) + -- cgit v1.2.3 From 424e5994e63326a42012f003f1174f3c363c7b62 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 10 Feb 2012 18:07:07 -0800 Subject: ARM: zImage/virt: hyp mode entry support for the zImage loader The zImage loader needs to turn on the MMU in order to take advantage of caching while decompressing the zImage. Running this in hyp mode would require the LPAE pagetable format to be supported; to avoid this complexity, this patch switches out of hyp mode, and returns back to hyp mode just before booting the kernel. This implementation assumes that the Hyp mode view of memory and the PL1 view of memory are coherent, providing that the MMU and caches are off in both, as required by the boot protocol. The zImage decompression code must drain the write buffer on completion anyway, and entry into Hyp mode should flush any prefetch buffer, avoiding hazards associated with local write buffers and the pipeline. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/boot/compressed/.gitignore | 1 + arch/arm/boot/compressed/Makefile | 9 ++++- arch/arm/boot/compressed/head.S | 71 +++++++++++++++++++++++++++++++++---- arch/arm/kernel/hyp-stub.S | 18 ++++++++++ 4 files changed, 91 insertions(+), 8 deletions(-) diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index d0d441c429ae..f79a08efe000 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,6 +1,7 @@ ashldi3.S font.c lib1funcs.S +hyp-stub.S piggy.gzip piggy.lzo piggy.lzma diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index bb267562e7ed..a517153a13ea 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile @@ -30,6 +30,10 @@ FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c OBJS += string.o CFLAGS_string.o := -Os +ifeq ($(CONFIG_ARM_VIRT_EXT),y) +OBJS += hyp-stub.o +endif + # # Architecture dependencies # @@ -126,7 +130,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) endif ccflags-y := -fpic -fno-builtin -I$(obj) -asflags-y := -Wa,-march=all +asflags-y := -Wa,-march=all -DZIMAGE # Supply kernel BSS size to the decompressor via a linker symbol. KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ @@ -198,3 +202,6 @@ $(obj)/font.c: $(FONTC) $(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) @sed "$(SEDFLAGS)" < $< > $@ + +$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S + $(call cmd,shipped) diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index b8c64b80bafc..0749a6184abf 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S @@ -9,6 +9,7 @@ * published by the Free Software Foundation. */ #include +#include /* * Debugging stuff @@ -132,7 +133,12 @@ start: .word start @ absolute load/run zImage address .word _edata @ zImage end address THUMB( .thumb ) -1: mov r7, r1 @ save architecture ID +1: + mrs r9, cpsr +#ifdef CONFIG_ARM_VIRT_EXT + bl __hyp_stub_install @ get into SVC mode, reversibly +#endif + mov r7, r1 @ save architecture ID mov r8, r2 @ save atags pointer #ifndef __ARM_ARCH_2__ @@ -148,9 +154,9 @@ start: ARM( swi 0x123456 ) @ angel_SWI_ARM THUMB( svc 0xab ) @ angel_SWI_THUMB not_angel: - mrs r2, cpsr @ turn off interrupts to - orr r2, r2, #0xc0 @ prevent angel from running - msr cpsr_c, r2 + safe_svcmode_maskall r0 + msr spsr_cxsf, r9 @ Save the CPU boot mode in + @ SPSR #else teqp pc, #0x0c000003 @ turn off interrupts #endif @@ -350,6 +356,20 @@ dtb_check_done: adr r5, restart bic r5, r5, #31 +/* Relocate the hyp vector base if necessary */ +#ifdef CONFIG_ARM_VIRT_EXT + mrs r0, spsr + and r0, r0, #MODE_MASK + cmp r0, #HYP_MODE + bne 1f + + bl __hyp_get_vectors + sub r0, r0, r5 + add r0, r0, r10 + bl __hyp_set_vectors +1: +#endif + sub r9, r6, r5 @ size to copy add r9, r9, #31 @ rounded up to a multiple bic r9, r9, #31 @ ... of 32 bytes @@ -458,11 +478,29 @@ not_relocated: mov r0, #0 bl decompress_kernel bl cache_clean_flush bl cache_off - mov r0, #0 @ must be zero mov r1, r7 @ restore architecture number mov r2, r8 @ restore atags pointer - ARM( mov pc, r4 ) @ call kernel - THUMB( bx r4 ) @ entry point is always ARM + +#ifdef CONFIG_ARM_VIRT_EXT + mrs r0, spsr @ Get saved CPU boot mode + and r0, r0, #MODE_MASK + cmp r0, #HYP_MODE @ if not booted in HYP mode... + bne __enter_kernel @ boot kernel directly + + adr r12, .L__hyp_reentry_vectors_offset + ldr r0, [r12] + add r0, r0, r12 + + bl __hyp_set_vectors + __HVC(0) @ otherwise bounce to hyp mode + + b . @ should never be reached + + .align 2 +.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - . +#else + b __enter_kernel +#endif .align 2 .type LC0, #object @@ -1191,6 +1229,25 @@ memdump: mov r12, r0 #endif .ltorg + +#ifdef CONFIG_ARM_VIRT_EXT +.align 5 +__hyp_reentry_vectors: + W(b) . @ reset + W(b) . @ undef + W(b) . @ svc + W(b) . @ pabort + W(b) . @ dabort + W(b) __enter_kernel @ hyp + W(b) . @ irq + W(b) . @ fiq +#endif /* CONFIG_ARM_VIRT_EXT */ + +__enter_kernel: + mov r0, #0 @ must be 0 + ARM( mov pc, r4 ) @ call kernel + THUMB( bx r4 ) @ entry point is always ARM + reloc_code_end: .align diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index b03e9244e5ad..70609417deaf 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -21,6 +21,7 @@ #include #include +#ifndef ZIMAGE /* * For the kernel proper, we need to find out the CPU boot mode long after * boot, so we need to store it in a writable variable. @@ -59,6 +60,21 @@ ENTRY(__boot_cpu_mode) strne r7, [r5, r6] @ record what happened and give up .endm +#else /* ZIMAGE */ + + .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req + .endm + +/* + * The zImage loader only runs on one CPU, so we don't bother with mult-CPU + * consistency checking: + */ + .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3 + cmp \mode, \mode + .endm + +#endif /* ZIMAGE */ + /* * Hypervisor stub installation functions. * @@ -174,9 +190,11 @@ ENTRY(__hyp_set_vectors) bx lr ENDPROC(__hyp_set_vectors) +#ifndef ZIMAGE .align 2 .L__boot_cpu_mode_offset: .long __boot_cpu_mode - . +#endif .align 5 __hyp_stub_vectors: -- cgit v1.2.3 From 6a6d55c38c8b4ee77b50a33f03ea09e75b18bf82 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 10 Feb 2012 18:07:07 -0800 Subject: ARM: virt: Update documentation for hyp mode entry support Document the possibility of the kernel being entered in HYP mode. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- Documentation/arm/Booting | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting index a341d87d276e..0c1f475fdf36 100644 --- a/Documentation/arm/Booting +++ b/Documentation/arm/Booting @@ -154,13 +154,33 @@ In either case, the following conditions must be met: - CPU mode All forms of interrupts must be disabled (IRQs and FIQs) - The CPU must be in SVC mode. (A special exception exists for Angel) + + For CPUs which do not include the ARM virtualization extensions, the + CPU must be in SVC mode. (A special exception exists for Angel) + + CPUs which include support for the virtualization extensions can be + entered in HYP mode in order to enable the kernel to make full use of + these extensions. This is the recommended boot method for such CPUs, + unless the virtualisations are already in use by a pre-installed + hypervisor. + + If the kernel is not entered in HYP mode for any reason, it must be + entered in SVC mode. - Caches, MMUs The MMU must be off. Instruction cache may be on or off. Data cache must be off. + If the kernel is entered in HYP mode, the above requirements apply to + the HYP mode configuration in addition to the ordinary PL1 (privileged + kernel modes) configuration. In addition, all traps into the + hypervisor must be disabled, and PL1 access must be granted for all + peripherals and CPU resources for which this is architecturally + possible. Except for entering in HYP mode, the system configuration + should be such that a kernel which does not include support for the + virtualization extensions can boot correctly without extra help. + - The boot loader is expected to call the kernel image by jumping directly to the first instruction of the kernel image. -- cgit v1.2.3 From 4588c34daabb5aebee9cbe90f0ca6ab11412f207 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 17 Feb 2012 16:54:28 +0000 Subject: ARM: virt: Add boot-time diagnostics In order to easily detect pathological cases, print some diagnostics when the kernel boots. This also provides helpers to detect that HYP mode is actually available, which can be used by other subsystems to enable HYP specific features. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/include/asm/virt.h | 17 +++++++++++++++++ arch/arm/kernel/setup.c | 20 ++++++++++++++++++++ arch/arm/kernel/smp.c | 3 +++ 3 files changed, 40 insertions(+) diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 0a99723edbf2..86164df86cb4 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -47,6 +47,23 @@ unsigned long __hyp_get_vectors(void); #define __boot_cpu_mode (SVC_MODE) #endif +#ifndef ZIMAGE +void hyp_mode_check(void); + +/* Reports the availability of HYP mode */ +static inline bool is_hyp_mode_available(void) +{ + return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE && + !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH)); +} + +/* Check if the bootloader has booted CPUs in different modes */ +static inline bool is_hyp_mode_mismatched(void) +{ + return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); +} +#endif + #endif /* __ASSEMBLY__ */ #endif /* ! VIRT_H */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index a81dcecc7343..04fd01feea86 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -55,6 +55,7 @@ #include #include #include +#include #if defined(CONFIG_DEPRECATED_PARAM_STRUCT) #include "compat.h" @@ -937,6 +938,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b) return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; } +void __init hyp_mode_check(void) +{ +#ifdef CONFIG_ARM_VIRT_EXT + if (is_hyp_mode_available()) { + pr_info("CPU: All CPU(s) started in HYP mode.\n"); + pr_info("CPU: Virtualization extensions available.\n"); + } else if (is_hyp_mode_mismatched()) { + pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n", + __boot_cpu_mode & MODE_MASK); + pr_warn("CPU: This may indicate a broken bootloader or firmware.\n"); + } else + pr_info("CPU: All CPU(s) started in SVC mode.\n"); +#endif +} + void __init setup_arch(char **cmdline_p) { struct machine_desc *mdesc; @@ -980,6 +996,10 @@ void __init setup_arch(char **cmdline_p) if (is_smp()) smp_init_cpus(); #endif + + if (!is_smp()) + hyp_mode_check(); + reserve_crashkernel(); tcm_init(); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ebd8ad274d76..adf226e718d2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -42,6 +42,7 @@ #include #include #include +#include /* * as from 2.5, kernels no longer have an init_tasks structure @@ -287,6 +288,8 @@ void __init smp_cpus_done(unsigned int max_cpus) num_online_cpus(), bogosum / (500000/HZ), (bogosum / (5000/HZ)) % 100); + + hyp_mode_check(); } void __init smp_prepare_boot_cpu(void) -- cgit v1.2.3 From 5b6728d4189d14c19f384d5ec6087276e7c196d8 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Fri, 17 Feb 2012 16:54:28 +0000 Subject: ARM: virt: Add CONFIG_ARM_VIRT_EXT option It is now possible to enable the virtualization extention support. Signed-off-by: Dave Martin Signed-off-by: Marc Zyngier --- arch/arm/mm/Kconfig | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 101b9681c08c..c9a4963b5c3d 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -624,6 +624,23 @@ config ARM_THUMBEE Say Y here if you have a CPU with the ThumbEE extension and code to make use of it. Say N for code that can run on CPUs without ThumbEE. +config ARM_VIRT_EXT + bool "Native support for the ARM Virtualization Extensions" + depends on MMU && CPU_V7 + help + Enable the kernel to make use of the ARM Virtualization + Extensions to install hypervisors without run-time firmware + assistance. + + A compliant bootloader is required in order to make maximum + use of this feature. Refer to Documentation/arm/Booting for + details. + + It is safe to enable this option even if the kernel may not be + booted in HYP mode, may not have support for the + virtualization extensions, or may be booted with a + non-compliant bootloader. + config SWP_EMULATE bool "Emulate SWP/SWPB instructions" depends on !CPU_USE_DOMAINS && CPU_V7 -- cgit v1.2.3 From 8ec58be9f3ff2ad4a4d7bde8f48b4a4c406768e7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 1 Aug 2012 14:46:41 +0100 Subject: ARM: virt: arch_timers: enable access to physical timers If booting in HYP mode, it makes sense to enable the use of the physical timers, so the kernel can use them directly. Signed-off-by: Marc Zyngier --- arch/arm/kernel/hyp-stub.S | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 70609417deaf..65b2417aebce 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S @@ -142,6 +142,19 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE and r7, #0x1f @ Preserve HPMN mcr p15, 4, r7, c1, c1, 1 @ HDCR +#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER) + @ make CNTP_* and CNTPCT accessible from PL1 + mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1 + lsr r7, #16 + and r7, #0xf + cmp r7, #1 + bne 1f + mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL + orr r7, r7, #3 @ PL1PCEN | PL1PCTEN + mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL +1: +#endif + bic r7, r4, #MODE_MASK orr r7, r7, #SVC_MODE THUMB( orr r7, r7, #PSR_T_BIT ) -- cgit v1.2.3