From c4b9fd2ac035a55d1fd98322f4360c9d07530597 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:44 +0100 Subject: KVM: arm64: Drop is_kernel_in_hyp_mode() from __invalidate_icache_guest_page() It is pretty obvious that is_kernel_in_hyp_mode() doesn't make much sense in the hypervisor part of KVM, and should be reserved to the kernel side. However, mem_protect.c::invalidate_icache_guest_page() calls into __invalidate_icache_guest_page(), which uses is_kernel_in_hyp_mode(). Given that this is part of the pKVM side of the hypervisor, this helper can only return true. Nothing goes really bad, but __invalidate_icache_guest_page() could spell out what the actual check is: we cannot invalidate the cache if the i-cache is VPIPT and we're running at EL1. Drop the is_kernel_in_hyp_mode() check for an explicit check against CurrentEL being EL1 or not. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-2-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_mmu.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 27e63c111f78..c8113b931263 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -227,7 +227,8 @@ static inline void __invalidate_icache_guest_page(void *va, size_t size) if (icache_is_aliasing()) { /* any kind of VIPT cache */ icache_inval_all_pou(); - } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { + } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 || + !icache_is_vpipt()) { /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ icache_inval_pou((unsigned long)va, (unsigned long)va + size); } -- cgit v1.2.3 From 35230be87ec6147c20e7433ab9d41e2fd2664631 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:45 +0100 Subject: arm64: Prevent the use of is_kernel_in_hyp_mode() in hypervisor code Using is_kernel_in_hyp_mode() in hypervisor code is a pretty bad mistake. This helper only checks for CurrentEL being EL2, which is always true. Make the compilation fail if using the helper in hypervisor context Whilst we're at it, flag the helper as __always_inline, which it really should be. Signed-off-by: Marc Zyngier Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-3-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/virt.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 4eb601e7de50..21e94068804d 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -110,8 +110,10 @@ static inline bool is_hyp_mode_mismatched(void) return __boot_cpu_mode[0] != __boot_cpu_mode[1]; } -static inline bool is_kernel_in_hyp_mode(void) +static __always_inline bool is_kernel_in_hyp_mode(void) { + BUILD_BUG_ON(__is_defined(__KVM_NVHE_HYPERVISOR__) || + __is_defined(__KVM_VHE_HYPERVISOR__)); return read_sysreg(CurrentEL) == CurrentEL_EL2; } -- cgit v1.2.3 From 0ddc312b7c73049d982d173fee4c5dabf1727ebb Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:46 +0100 Subject: arm64: Turn kaslr_feature_override into a generic SW feature override Disabling KASLR from the command line is implemented as a feature override. Repaint it slightly so that it can further be used as more generic infrastructure for SW override purposes. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-4-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/cpufeature.h | 4 ++++ arch/arm64/kernel/cpufeature.c | 2 ++ arch/arm64/kernel/idreg-override.c | 16 ++++++---------- arch/arm64/kernel/kaslr.c | 6 +++--- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 6bf013fb110d..bc1009890180 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -15,6 +15,8 @@ #define MAX_CPU_FEATURES 128 #define cpu_feature(x) KERNEL_HWCAP_ ## x +#define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 + #ifndef __ASSEMBLY__ #include @@ -925,6 +927,8 @@ extern struct arm64_ftr_override id_aa64smfr0_override; extern struct arm64_ftr_override id_aa64isar1_override; extern struct arm64_ftr_override id_aa64isar2_override; +extern struct arm64_ftr_override arm64_sw_feature_override; + u32 get_kvm_ipa_limit(void); void dump_cpu_features(void); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 7d7128c65161..2d2b7bb5fa0c 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -664,6 +664,8 @@ struct arm64_ftr_override __ro_after_init id_aa64smfr0_override; struct arm64_ftr_override __ro_after_init id_aa64isar1_override; struct arm64_ftr_override __ro_after_init id_aa64isar2_override; +struct arm64_ftr_override arm64_sw_feature_override; + static const struct __ftr_reg_entry { u32 sys_id; struct arm64_ftr_reg *reg; diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 370ab84fd06e..8c93b6198bf5 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -138,15 +138,11 @@ static const struct ftr_set_desc smfr0 __initconst = { }, }; -extern struct arm64_ftr_override kaslr_feature_override; - -static const struct ftr_set_desc kaslr __initconst = { - .name = "kaslr", -#ifdef CONFIG_RANDOMIZE_BASE - .override = &kaslr_feature_override, -#endif +static const struct ftr_set_desc sw_features __initconst = { + .name = "arm64_sw", + .override = &arm64_sw_feature_override, .fields = { - FIELD("disabled", 0, NULL), + FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL), {} }, }; @@ -158,7 +154,7 @@ static const struct ftr_set_desc * const regs[] __initconst = { &isar1, &isar2, &smfr0, - &kaslr, + &sw_features, }; static const struct { @@ -175,7 +171,7 @@ static const struct { "id_aa64isar1.api=0 id_aa64isar1.apa=0 " "id_aa64isar2.gpa3=0 id_aa64isar2.apa3=0" }, { "arm64.nomte", "id_aa64pfr1.mte=0" }, - { "nokaslr", "kaslr.disabled=1" }, + { "nokaslr", "arm64_sw.nokaslr=1" }, }; static int __init parse_nokaslr(char *unused) diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index e7477f21a4c9..5d4ce7f5f157 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -23,8 +23,6 @@ u64 __ro_after_init module_alloc_base; u16 __initdata memstart_offset_seed; -struct arm64_ftr_override kaslr_feature_override __initdata; - static int __init kaslr_init(void) { u64 module_range; @@ -36,7 +34,9 @@ static int __init kaslr_init(void) */ module_alloc_base = (u64)_etext - MODULES_VSIZE; - if (kaslr_feature_override.val & kaslr_feature_override.mask & 0xf) { + if (cpuid_feature_extract_unsigned_field(arm64_sw_feature_override.val & + arm64_sw_feature_override.mask, + ARM64_SW_FEATURE_OVERRIDE_NOKASLR)) { pr_info("KASLR disabled on command line\n"); return 0; } -- cgit v1.2.3 From e2d6c906f0ac69559da887c0a2c3c10070e746c5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:47 +0100 Subject: arm64: Add KVM_HVHE capability and has_hvhe() predicate Expose a capability keying the hVHE feature as well as a new predicate testing it. Nothing is so far using it, and nothing is enabling it yet. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-5-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/cpufeature.h | 1 + arch/arm64/include/asm/virt.h | 8 ++++++++ arch/arm64/kernel/cpufeature.c | 19 +++++++++++++++++++ arch/arm64/tools/cpucaps | 1 + 4 files changed, 29 insertions(+) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index bc1009890180..3d4b547ae312 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -16,6 +16,7 @@ #define cpu_feature(x) KERNEL_HWCAP_ ## x #define ARM64_SW_FEATURE_OVERRIDE_NOKASLR 0 +#define ARM64_SW_FEATURE_OVERRIDE_HVHE 4 #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index 21e94068804d..5227db7640c8 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -142,6 +142,14 @@ static __always_inline bool is_protected_kvm_enabled(void) return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE); } +static __always_inline bool has_hvhe(void) +{ + if (is_vhe_hyp_code()) + return false; + + return cpus_have_final_cap(ARM64_KVM_HVHE); +} + static inline bool is_hyp_nvhe(void) { return is_hyp_mode_available() && !is_kernel_in_hyp_mode(); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 2d2b7bb5fa0c..c743b1a5ae31 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1998,6 +1998,19 @@ static bool has_nested_virt_support(const struct arm64_cpu_capabilities *cap, return true; } +static bool hvhe_possible(const struct arm64_cpu_capabilities *entry, + int __unused) +{ + u64 val; + + val = read_sysreg(id_aa64mmfr1_el1); + if (!cpuid_feature_extract_unsigned_field(val, ID_AA64MMFR1_EL1_VH_SHIFT)) + return false; + + val = arm64_sw_feature_override.val & arm64_sw_feature_override.mask; + return cpuid_feature_extract_unsigned_field(val, ARM64_SW_FEATURE_OVERRIDE_HVHE); +} + #ifdef CONFIG_ARM64_PAN static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused) { @@ -2643,6 +2656,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .cpu_enable = cpu_enable_dit, ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, DIT, IMP) }, + { + .desc = "VHE for hypervisor only", + .capability = ARM64_KVM_HVHE, + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .matches = hvhe_possible, + }, {}, }; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 40ba95472594..3c23a55d7c2f 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -47,6 +47,7 @@ HAS_TLB_RANGE HAS_VIRT_HOST_EXTN HAS_WFXT HW_DBM +KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE MTE -- cgit v1.2.3 From 7a26e1f51e3c26a1b4a1f087e2056f1554365682 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:48 +0100 Subject: arm64: Don't enable VHE for the kernel if OVERRIDE_HVHE is set If the OVERRIDE_HVHE SW override is set (as a precursor of the KVM_HVHE capability), do not enable VHE for the kernel and drop to EL1 as if VHE was either disabled or unavailable. Further changes will enable VHE at EL2 only, with the kernel still running at EL1. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-6-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/hyp-stub.S | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 9439240c3fcf..5c71e1019545 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -82,7 +82,15 @@ SYM_CODE_START_LOCAL(__finalise_el2) tbnz x1, #0, 1f // Needs to be VHE capable, obviously - check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 2f 1f x1 x2 + check_override id_aa64mmfr1 ID_AA64MMFR1_EL1_VH_SHIFT 0f 1f x1 x2 + +0: // Check whether we only want the hypervisor to run VHE, not the kernel + adr_l x1, arm64_sw_feature_override + ldr x2, [x1, FTR_OVR_VAL_OFFSET] + ldr x1, [x1, FTR_OVR_MASK_OFFSET] + and x2, x2, x1 + ubfx x2, x2, #ARM64_SW_FEATURE_OVERRIDE_HVHE, #4 + cbz x2, 2f 1: mov_q x0, HVC_STUB_ERR eret -- cgit v1.2.3 From 9e7462bbe00d8431694720804a50b8f48d8ed0b0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:49 +0100 Subject: arm64: Allow EL1 physical timer access when running VHE To initialise the timer access from EL2 when HCR_EL2.E2H is set, we must make use the CNTHCTL_EL2 formap used is appropriate. This amounts to shifting the timer/counter enable bits by 10 to the left. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-7-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 5 +++++ arch/arm64/kvm/hyp/nvhe/hyp-init.S | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 037724b19c5c..225bf1f2514d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -34,6 +34,11 @@ */ .macro __init_el2_timers mov x0, #3 // Enable EL1 physical timers + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .LnVHE_\@ + lsl x0, x0, #10 +.LnVHE_\@: msr cnthctl_el2, x0 msr cntvoff_el2, xzr // Clear virtual offset .endm diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index a6d67c2bb5ae..f9ee10e29497 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -95,6 +95,15 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) ldr x1, [x0, #NVHE_INIT_HCR_EL2] msr hcr_el2, x1 + mov x2, #HCR_E2H + and x2, x1, x2 + cbz x2, 1f + + mrs x1, cnthctl_el2 + and x1, x1, #~(BIT(0) | BIT(1)) + orr x1, x1, #(BIT(10) | BIT(11)) + msr cnthctl_el2, x1 +1: ldr x1, [x0, #NVHE_INIT_VTTBR] msr vttbr_el2, x1 -- cgit v1.2.3 From 659803aef48b1a43bf47c6b105f5e3cee6a1501c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:50 +0100 Subject: arm64: Use CPACR_EL1 format to set CPTR_EL2 when E2H is set When HCR_EL2.E2H is set, the CPTR_EL2 register takes the CPACR_EL1 format. Yes, this is good fun. Hack the bits of startup code that assume E2H=0 while setting up CPTR_EL2 to make them grok the CPTR_EL1 format. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-8-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 225bf1f2514d..bba508ffa12d 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -129,8 +129,15 @@ .endm /* Coprocessor traps */ -.macro __init_el2_nvhe_cptr +.macro __init_el2_cptr + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .LnVHE_\@ + mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN) + b .Lset_cptr_\@ +.LnVHE_\@: mov x0, #0x33ff +.Lset_cptr_\@: msr cptr_el2, x0 // Disable copro. traps to EL2 .endm @@ -196,7 +203,7 @@ __init_el2_gicv3 __init_el2_hstr __init_el2_nvhe_idregs - __init_el2_nvhe_cptr + __init_el2_cptr __init_el2_fgt __init_el2_nvhe_prepare_eret .endm @@ -244,7 +251,17 @@ .Linit_sve_\@: /* SVE register access */ mrs x0, cptr_el2 // Disable SVE traps + mrs x1, hcr_el2 + and x1, x1, #HCR_E2H + cbz x1, .Lcptr_nvhe_\@ + + // VHE case + orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) + b .Lset_cptr_\@ + +.Lcptr_nvhe_\@: // nVHE case bic x0, x0, #CPTR_EL2_TZ +.Lset_cptr_\@: msr cptr_el2, x0 isb mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector -- cgit v1.2.3 From 57e784b4079e9499ea1bafd56a0d422252aa2401 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:51 +0100 Subject: KVM: arm64: Remove alternatives from sysreg accessors in VHE hypervisor context In the VHE hypervisor code, we should be using the remapped VHE accessors, no ifs, no buts. No need to generate any alternative. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-9-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_hyp.h | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index bdd9cf546d95..fea04eb25cb4 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -16,6 +16,23 @@ DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); DECLARE_PER_CPU(unsigned long, kvm_hyp_vector); DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); +/* + * Unified accessors for registers that have a different encoding + * between VHE and non-VHE. They must be specified without their "ELx" + * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. + */ + +#if defined(__KVM_VHE_HYPERVISOR__) + +#define read_sysreg_el0(r) read_sysreg_s(r##_EL02) +#define write_sysreg_el0(v,r) write_sysreg_s(v, r##_EL02) +#define read_sysreg_el1(r) read_sysreg_s(r##_EL12) +#define write_sysreg_el1(v,r) write_sysreg_s(v, r##_EL12) +#define read_sysreg_el2(r) read_sysreg_s(r##_EL1) +#define write_sysreg_el2(v,r) write_sysreg_s(v, r##_EL1) + +#else // !__KVM_VHE_HYPERVISOR__ + #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ @@ -35,12 +52,6 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); : : "rZ" (__val)); \ } while (0) -/* - * Unified accessors for registers that have a different encoding - * between VHE and non-VHE. They must be specified without their "ELx" - * encoding, but with the SYS_ prefix, as defined in asm/sysreg.h. - */ - #define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02) #define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02) #define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12) @@ -48,6 +59,8 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); #define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1) #define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1) +#endif // __KVM_VHE_HYPERVISOR__ + /* * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the * static inline can allow the compiler to out-of-line this. KVM always wants -- cgit v1.2.3 From 6f617d3aa643e4ed6929f5b582759f4a73804034 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:52 +0100 Subject: KVM: arm64: Key use of VHE instructions in nVHE code off ARM64_KVM_HVHE We can now start with the fun stuff: if we enable VHE *only* for the hypervisor, we need to generate the VHE instructions when accessing the system registers. For this, reporpose the alternative sequence to be keyed off ARM64_KVM_HVHE in the nVHE hypervisor code, and only there. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-10-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_hyp.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index fea04eb25cb4..b7238c72a04c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -33,12 +33,18 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); #else // !__KVM_VHE_HYPERVISOR__ +#if defined(__KVM_NVHE_HYPERVISOR__) +#define VHE_ALT_KEY ARM64_KVM_HVHE +#else +#define VHE_ALT_KEY ARM64_HAS_VIRT_HOST_EXTN +#endif + #define read_sysreg_elx(r,nvh,vh) \ ({ \ u64 reg; \ - asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ + asm volatile(ALTERNATIVE(__mrs_s("%0", r##nvh), \ __mrs_s("%0", r##vh), \ - ARM64_HAS_VIRT_HOST_EXTN) \ + VHE_ALT_KEY) \ : "=r" (reg)); \ reg; \ }) @@ -48,7 +54,7 @@ DECLARE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); u64 __val = (u64)(v); \ asm volatile(ALTERNATIVE(__msr_s(r##nvh, "%x0"), \ __msr_s(r##vh, "%x0"), \ - ARM64_HAS_VIRT_HOST_EXTN) \ + VHE_ALT_KEY) \ : : "rZ" (__val)); \ } while (0) -- cgit v1.2.3 From d0daf5a21e635057e87cc05d6bca012157aa3ab7 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:53 +0100 Subject: KVM: arm64: Force HCR_EL2.E2H when ARM64_KVM_HVHE is set Obviously, in order to be able to use VHE whilst at EL2, we need to set HCR_EL2.E2H. Do so when ARM64_KVM_HVHE is set. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-11-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 14391826241c..c12276dd2cf4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1676,6 +1676,8 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; else params->hcr_el2 = HCR_HOST_NVHE_FLAGS; + if (cpus_have_final_cap(ARM64_KVM_HVHE)) + params->hcr_el2 |= HCR_E2H; params->vttbr = params->vtcr = 0; /* -- cgit v1.2.3 From cff3b5cf96edbb9e7448bca6261871272922a0df Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:54 +0100 Subject: KVM: arm64: Disable TTBR1_EL2 when using ARM64_KVM_HVHE When using hVHE, we end-up with two TTBRs at EL2. That's great, but we're not quite ready for this just yet. Disable TTBR1_EL2 by setting TCR_EL2.EPD1 so that we only translate via TTBR0_EL2. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-12-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index c12276dd2cf4..35b32cb6faa5 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1666,7 +1666,13 @@ static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) params->mair_el2 = read_sysreg(mair_el1); - tcr = (read_sysreg(tcr_el1) & TCR_EL2_MASK) | TCR_EL2_RES1; + tcr = read_sysreg(tcr_el1); + if (cpus_have_final_cap(ARM64_KVM_HVHE)) { + tcr |= TCR_EPD1_MASK; + } else { + tcr &= TCR_EL2_MASK; + tcr |= TCR_EL2_RES1; + } tcr &= ~TCR_T0SZ_MASK; tcr |= TCR_T0SZ(hyp_va_bits); params->tcr_el2 = tcr; -- cgit v1.2.3 From 6537565fd9b7f169cda025482f6de2646cf7b60a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:55 +0100 Subject: KVM: arm64: Adjust EL2 stage-1 leaf AP bits when ARM64_KVM_HVHE is set El2 stage-1 page-table format is subtly (and annoyingly) different when HCR_EL2.E2H is set. Take the ARM64_KVM_HVHE configuration into account when setting the AP bits. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-13-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index 5282cb9ca4cf..21d0d0b1cf15 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -21,8 +21,10 @@ #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2) #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6) -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3 -#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1 +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 2 : 3; }) +#define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW \ + ({ cpus_have_final_cap(ARM64_KVM_HVHE) ? 0 : 1; }) #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8) #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10) -- cgit v1.2.3 From 75c76ab5a641b64e872b6e136b5edf8a72009cc6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:56 +0100 Subject: KVM: arm64: Rework CPTR_EL2 programming for HVHE configuration Just like we repainted the early arm64 code, we need to update the CPTR_EL2 accesses that are taking place in the nVHE code when hVHE is used, making them look as if they were CPACR_EL1 accesses. Just like the VHE code. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-14-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_arm.h | 4 +--- arch/arm64/include/asm/kvm_emulate.h | 31 +++++++++++++++++++++++++++++++ arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/fpsimd.c | 4 ++-- arch/arm64/kvm/hyp/include/hyp/switch.h | 2 +- arch/arm64/kvm/hyp/nvhe/hyp-main.c | 6 +++++- arch/arm64/kvm/hyp/nvhe/pkvm.c | 24 ++++++++++++++++++------ arch/arm64/kvm/hyp/nvhe/switch.c | 28 ++++++++++++++++------------ arch/arm64/kvm/hyp/vhe/switch.c | 2 +- arch/arm64/kvm/sys_regs.c | 2 +- 10 files changed, 77 insertions(+), 28 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index baef29fcbeee..e448f8f7fd7e 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -285,7 +285,6 @@ #define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT) #define CPTR_EL2_TZ (1 << 8) #define CPTR_NVHE_EL2_RES1 0x000032ff /* known RES1 bits in CPTR_EL2 (nVHE) */ -#define CPTR_EL2_DEFAULT CPTR_NVHE_EL2_RES1 #define CPTR_NVHE_EL2_RES0 (GENMASK(63, 32) | \ GENMASK(29, 21) | \ GENMASK(19, 14) | \ @@ -347,8 +346,7 @@ ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \ ECN(BKPT32), ECN(VECTOR32), ECN(BRK64), ECN(ERET) -#define CPACR_EL1_DEFAULT (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |\ - CPACR_EL1_ZEN_EL1EN) +#define CPACR_EL1_TTA (1 << 28) #define kvm_mode_names \ { PSR_MODE_EL0t, "EL0t" }, \ diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index b31b32ecbe2d..4d82e622240d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -570,4 +570,35 @@ static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) return test_bit(feature, vcpu->arch.features); } +static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) +{ + u64 val; + + if (has_vhe()) { + val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | + CPACR_EL1_ZEN_EL1EN); + } else if (has_hvhe()) { + val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN); + } else { + val = CPTR_NVHE_EL2_RES1; + + if (vcpu_has_sve(vcpu) && + (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) + val |= CPTR_EL2_TZ; + if (cpus_have_final_cap(ARM64_SME)) + val &= ~CPTR_EL2_TSM; + } + + return val; +} + +static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) +{ + u64 val = kvm_get_reset_cptr_el2(vcpu); + + if (has_vhe() || has_hvhe()) + write_sysreg(val, cpacr_el1); + else + write_sysreg(val, cptr_el2); +} #endif /* __ARM64_KVM_EMULATE_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 35b32cb6faa5..01c975a0acc9 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1240,7 +1240,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, } vcpu_reset_hcr(vcpu); - vcpu->arch.cptr_el2 = CPTR_EL2_DEFAULT; + vcpu->arch.cptr_el2 = kvm_get_reset_cptr_el2(vcpu); /* * Handle the "start in power-off" case. diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c index 4c9dcd8fc939..8c1d0d4853df 100644 --- a/arch/arm64/kvm/fpsimd.c +++ b/arch/arm64/kvm/fpsimd.c @@ -180,7 +180,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) /* * If we have VHE then the Hyp code will reset CPACR_EL1 to - * CPACR_EL1_DEFAULT and we need to reenable SME. + * the default value and we need to reenable SME. */ if (has_vhe() && system_supports_sme()) { /* Also restore EL0 state seen on entry */ @@ -210,7 +210,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been - * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE + * reset by kvm_reset_cptr_el2() in the Hyp code, disabling SVE * for EL0. To avoid spurious traps, restore the trap state * seen by kvm_arch_vcpu_load_fp(): */ diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index e78a08a72a3c..b5f53d3fc14c 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -192,7 +192,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) /* Valid trap. Switch the context: */ /* First disable enough traps to allow us to update the registers */ - if (has_vhe()) { + if (has_vhe() || has_hvhe()) { reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; if (sve_guest) reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c index 728e01d4536b..ce602f9e93eb 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c @@ -392,7 +392,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt) handle_host_smc(host_ctxt); break; case ESR_ELx_EC_SVE: - sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); + if (has_hvhe()) + sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN | + CPACR_EL1_ZEN_EL0EN)); + else + sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0); isb(); sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2); break; diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index a06ece14a6d8..5d5ee735a7d9 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -27,6 +27,7 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) u64 hcr_set = HCR_RW; u64 hcr_clear = 0; u64 cptr_set = 0; + u64 cptr_clear = 0; /* Protected KVM does not support AArch32 guests. */ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), @@ -57,12 +58,17 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) } /* Trap SVE */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) - cptr_set |= CPTR_EL2_TZ; + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) { + if (has_hvhe()) + cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; + else + cptr_set |= CPTR_EL2_TZ; + } vcpu->arch.hcr_el2 |= hcr_set; vcpu->arch.hcr_el2 &= ~hcr_clear; vcpu->arch.cptr_el2 |= cptr_set; + vcpu->arch.cptr_el2 &= ~cptr_clear; } /* @@ -120,8 +126,12 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu) mdcr_set |= MDCR_EL2_TTRF; /* Trap Trace */ - if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) - cptr_set |= CPTR_EL2_TTA; + if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) { + if (has_hvhe()) + cptr_set |= CPACR_EL1_TTA; + else + cptr_set |= CPTR_EL2_TTA; + } vcpu->arch.mdcr_el2 |= mdcr_set; vcpu->arch.mdcr_el2 &= ~mdcr_clear; @@ -176,8 +186,10 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu) /* Clear res0 and set res1 bits to trap potential new features. */ vcpu->arch.hcr_el2 &= ~(HCR_RES0); vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0); - vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; - vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); + if (!has_hvhe()) { + vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1; + vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0); + } } /* diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 71fa16a0dc77..5fa0b1c9ee8d 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -44,13 +44,24 @@ static void __activate_traps(struct kvm_vcpu *vcpu) __activate_traps_common(vcpu); val = vcpu->arch.cptr_el2; - val |= CPTR_EL2_TTA | CPTR_EL2_TAM; + val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */ + val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA; + if (cpus_have_final_cap(ARM64_SME)) { + if (has_hvhe()) + val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN); + else + val |= CPTR_EL2_TSM; + } + if (!guest_owns_fp_regs(vcpu)) { - val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + if (has_hvhe()) + val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN | + CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN); + else + val |= CPTR_EL2_TFP | CPTR_EL2_TZ; + __activate_traps_fpsimd32(vcpu); } - if (cpus_have_final_cap(ARM64_SME)) - val |= CPTR_EL2_TSM; write_sysreg(val, cptr_el2); write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2); @@ -73,7 +84,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu) static void __deactivate_traps(struct kvm_vcpu *vcpu) { extern char __kvm_hyp_host_vector[]; - u64 cptr; ___deactivate_traps(vcpu); @@ -98,13 +108,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); - cptr = CPTR_EL2_DEFAULT; - if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)) - cptr |= CPTR_EL2_TZ; - if (cpus_have_final_cap(ARM64_SME)) - cptr &= ~CPTR_EL2_TSM; - - write_sysreg(cptr, cptr_el2); + kvm_reset_cptr_el2(vcpu); write_sysreg(__kvm_hyp_host_vector, vbar_el2); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 3d868e84c7a0..034777a23b74 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -84,7 +84,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) */ asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT)); - write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); + kvm_reset_cptr_el2(vcpu); if (!arm64_kernel_unmapped_at_el0()) host_vectors = __this_cpu_read(this_cpu_vector); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 71b12094d613..11ededa80737 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2180,7 +2180,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(ACTLR_EL2, access_rw, reset_val, 0), EL2_REG(HCR_EL2, access_rw, reset_val, 0), EL2_REG(MDCR_EL2, access_rw, reset_val, 0), - EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_EL2_DEFAULT ), + EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1), EL2_REG(HSTR_EL2, access_rw, reset_val, 0), EL2_REG(HACR_EL2, access_rw, reset_val, 0), -- cgit v1.2.3 From aca18585db4fd0ed0bd7420eddcdc39a535194fe Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:57 +0100 Subject: KVM: arm64: Program the timer traps with VHE layout in hVHE mode Just like the rest of the timer code, we need to shift the enable bits around when HCR_EL2.E2H is set, which is the case in hVHE mode. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-15-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/nvhe/timer-sr.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c index b185ac0dbd47..3aaab20ae5b4 100644 --- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c +++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c @@ -17,21 +17,24 @@ void __kvm_timer_set_cntvoff(u64 cntvoff) } /* - * Should only be called on non-VHE systems. + * Should only be called on non-VHE or hVHE setups. * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). */ void __timer_disable_traps(struct kvm_vcpu *vcpu) { - u64 val; + u64 val, shift = 0; + + if (has_hvhe()) + shift = 10; /* Allow physical timer/counter access for the host */ val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift; write_sysreg(val, cnthctl_el2); } /* - * Should only be called on non-VHE systems. + * Should only be called on non-VHE or hVHE setups. * VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe(). */ void __timer_enable_traps(struct kvm_vcpu *vcpu) @@ -50,5 +53,10 @@ void __timer_enable_traps(struct kvm_vcpu *vcpu) else clr |= CNTHCTL_EL1PCTEN; + if (has_hvhe()) { + clr <<= 10; + set <<= 10; + } + sysreg_clear_set(cnthctl_el2, clr, set); } -- cgit v1.2.3 From 38cba55008e5fab9181302ea5daf79e2070c9998 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:58 +0100 Subject: KVM: arm64: Force HCR_E2H in guest context when ARM64_KVM_HVHE is set Also make sure HCR_EL2.E2H is set when switching HCR_EL2 in guest context. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230609162200.2024064-16-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 2 +- arch/arm64/kvm/hyp/nvhe/pkvm.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 4d82e622240d..cf40d19a72f8 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -74,7 +74,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; - if (is_kernel_in_hyp_mode()) + if (has_vhe() || has_hvhe()) vcpu->arch.hcr_el2 |= HCR_E2H; if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) { /* route synchronous external abort exceptions to EL2 */ diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 5d5ee735a7d9..8033ef353a5d 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -44,6 +44,9 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu) BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD), PVM_ID_AA64PFR0_ALLOW)); + if (has_hvhe()) + hcr_set |= HCR_E2H; + /* Trap RAS unless all current versions are supported */ if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) < ID_AA64PFR0_EL1_RAS_V1P1) { -- cgit v1.2.3 From ad744e8cb346743dd76425942910c7b75a782ed0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 9 Jun 2023 17:21:59 +0100 Subject: arm64: Allow arm64_sw.hvhe on command line Add the arm64_sw.hvhe=1 option to force the use of the hVHE mode in the hypervisor code only. This enables the hVHE mode of operation when using KVM on VHE hardware. Signed-off-by: Marc Zyngier Reviewed-by: Catalin Marinas Link: https://lore.kernel.org/r/20230609162200.2024064-17-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kernel/idreg-override.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/kernel/idreg-override.c b/arch/arm64/kernel/idreg-override.c index 8c93b6198bf5..c553d30089e5 100644 --- a/arch/arm64/kernel/idreg-override.c +++ b/arch/arm64/kernel/idreg-override.c @@ -138,11 +138,22 @@ static const struct ftr_set_desc smfr0 __initconst = { }, }; +static bool __init hvhe_filter(u64 val) +{ + u64 mmfr1 = read_sysreg(id_aa64mmfr1_el1); + + return (val == 1 && + lower_32_bits(__boot_status) == BOOT_CPU_MODE_EL2 && + cpuid_feature_extract_unsigned_field(mmfr1, + ID_AA64MMFR1_EL1_VH_SHIFT)); +} + static const struct ftr_set_desc sw_features __initconst = { .name = "arm64_sw", .override = &arm64_sw_feature_override, .fields = { FIELD("nokaslr", ARM64_SW_FEATURE_OVERRIDE_NOKASLR, NULL), + FIELD("hvhe", ARM64_SW_FEATURE_OVERRIDE_HVHE, hvhe_filter), {} }, }; -- cgit v1.2.3 From 1700f89cb99aae19e4f8ec38b1b59f3b7ae71b91 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 14 Jun 2023 16:51:29 +0100 Subject: KVM: arm64: Fix hVHE init on CPUs where HCR_EL2.E2H is not RES1 On CPUs where E2H is RES1, we very quickly set the scene for running EL2 with a VHE configuration, as we do not have any other choice. However, CPUs that conform to the current writing of the architecture start with E2H=0, and only later upgrade with E2H=1. This is all good, but nothing there is actually reconfiguring EL2 to be able to correctly run the kernel at EL1. Huhuh... The "obvious" solution is not to just reinitialise the timer controls like we do, but to really intitialise *everything* unconditionally. This requires a bit of surgery, and is a good opportunity to remove the macro that messes with SPSR_EL2 in init_el2_state. With that, hVHE now works correctly on my trusted A55 machine! Reported-by: Oliver Upton Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230614155129.2697388-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/el2_setup.h | 1 - arch/arm64/kernel/head.S | 2 ++ arch/arm64/kvm/hyp/nvhe/hyp-init.S | 19 ++++++++++++------- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index bba508ffa12d..5a353f94e9cd 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -205,7 +205,6 @@ __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt - __init_el2_nvhe_prepare_eret .endm #ifndef __KVM_NVHE_HYPERVISOR__ diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index e92caebff46a..23955050da73 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -603,6 +603,8 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL) msr sctlr_el1, x1 mov x2, xzr 2: + __init_el2_nvhe_prepare_eret + mov w0, #BOOT_CPU_MODE_EL2 orr x0, x0, x2 eret diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S index f9ee10e29497..74ee77d9cfd0 100644 --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S @@ -83,9 +83,6 @@ SYM_CODE_END(__kvm_hyp_init) * x0: struct kvm_nvhe_init_params PA */ SYM_CODE_START_LOCAL(___kvm_hyp_init) - ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] - msr tpidr_el2, x1 - ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA] mov sp, x1 @@ -99,11 +96,18 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init) and x2, x1, x2 cbz x2, 1f - mrs x1, cnthctl_el2 - and x1, x1, #~(BIT(0) | BIT(1)) - orr x1, x1, #(BIT(10) | BIT(11)) - msr cnthctl_el2, x1 + // hVHE: Replay the EL2 setup to account for the E2H bit + // TPIDR_EL2 is used to preserve x0 across the macro maze... + isb + msr tpidr_el2, x0 + init_el2_state + finalise_el2_state + mrs x0, tpidr_el2 + 1: + ldr x1, [x0, #NVHE_INIT_TPIDR_EL2] + msr tpidr_el2, x1 + ldr x1, [x0, #NVHE_INIT_VTTBR] msr vttbr_el2, x1 @@ -193,6 +197,7 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu) /* Initialize EL2 CPU state to sane values. */ init_el2_state // Clobbers x0..x2 finalise_el2_state + __init_el2_nvhe_prepare_eret /* Enable MMU, set vectors and stack. */ mov x0, x28 -- cgit v1.2.3