summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-09-06 14:02:06 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2016-09-08 12:53:00 +0200
commitddb3d07cfe90ce58c342cf97ce6ce53ba7d10973 (patch)
tree10178630a3b5c24a880ce7efd2a3e9e9337a3233 /arch/arm64
parent20163403a1f2fe3f00f59df2b45ea5cb74b85d97 (diff)
arm64: KVM: Inject a Virtual SError if it was pending
If we have caught an SError whilst exiting, we've tagged the exit code with the pending information. In that case, let's re-inject the error into the guest, after having adjusted the PC if required. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kvm/handle_exit.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 08afc69ab157..a204adf29f0a 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -170,6 +170,26 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
{
exit_handle_fn exit_handler;
+ if (ARM_SERROR_PENDING(exception_index)) {
+ u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
+
+ /*
+ * HVC/SMC already have an adjusted PC, which we need
+ * to correct in order to return to after having
+ * injected the SError.
+ */
+ if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
+ hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
+ u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
+ *vcpu_pc(vcpu) -= adj;
+ }
+
+ kvm_inject_vabt(vcpu);
+ return 1;
+ }
+
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
+
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;