summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-09-06 09:28:45 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2016-09-08 12:53:00 +0200
commitfb5ee369ccd3986b28adc20d43d73a2b2c141977 (patch)
tree34722e194ff7610827d4950bd4f06aab6da3c96e /arch/arm64/kvm/hyp
parent8cebe750c4d9acdfdce41ee0a83a58be973ebc5e (diff)
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the guest, we need to be able to avoid going back all the way to the host kernel. For this, we introduce a new hook in the world switch code, conveniently placed just after populating the fault info. At that point, we only have saved/restored the GP registers, and we can quickly perform all the required checks (data abort, translation fault, valid faulting syndrome, not an external abort, not a PTW). Coming back from the emulation code, we need to skip the emulated instruction. This involves an additional bit of save/restore in order to be able to access the guest's PC (and possibly CPSR if this is a 32bit guest). At this stage, no emulation code is provided. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/switch.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 35d2e09ac695..b3a66c50d12b 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <asm/kvm_asm.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h>
static bool __hyp_text __fpsimd_enabled_nvhe(void)
@@ -232,6 +233,21 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
return true;
}
+static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
+{
+ *vcpu_pc(vcpu) = read_sysreg_el2(elr);
+
+ if (vcpu_mode_is_32bit(vcpu)) {
+ vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
+ kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
+ } else {
+ *vcpu_pc(vcpu) += 4;
+ }
+
+ write_sysreg_el2(*vcpu_pc(vcpu), elr);
+}
+
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
@@ -270,6 +286,22 @@ again:
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
goto again;
+ if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
+ exit_code == ARM_EXCEPTION_TRAP) {
+ bool valid;
+
+ valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
+ kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+ kvm_vcpu_dabt_isvalid(vcpu) &&
+ !kvm_vcpu_dabt_isextabt(vcpu) &&
+ !kvm_vcpu_dabt_iss1tw(vcpu);
+
+ if (valid && __vgic_v2_perform_cpuif_access(vcpu)) {
+ __skip_instr(vcpu);
+ goto again;
+ }
+ }
+
fp_enabled = __fpsimd_enabled();
__sysreg_save_guest_state(guest_ctxt);