diff options
author | Eric Auger <eric.auger@linaro.org> | 2015-09-25 23:41:17 +0200 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2015-10-22 23:01:46 +0200 |
commit | 3b92830ad41b2fe377e0765322e8aefd0ab8388d (patch) | |
tree | 5a69f4ea8df65339fee73f748bef97e85ee9fb14 /arch/arm/kvm/arm.c | |
parent | 101d3da09c953b08c814cd9a0b8605623d640ba0 (diff) |
KVM: arm/arm64: implement kvm_arm_[halt,resume]_guest
We introduce kvm_arm_halt_guest and resume functions. They
will be used for IRQ forward state change.
Halt is synchronous and prevents the guest from being re-entered.
We use the same mechanism put in place for PSCI former pause,
now renamed power_off. A new flag is introduced in arch vcpu state,
pause, only meant to be used by those functions.
Signed-off-by: Eric Auger <eric.auger@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm/kvm/arm.c')
-rw-r--r-- | arch/arm/kvm/arm.c | 35 |
1 files changed, 31 insertions, 4 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 3b3384c37e24..ed1574724caf 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -353,7 +353,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return ((!!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v)) - && !v->arch.power_off); + && !v->arch.power_off && !v->arch.pause); } /* Just ensure a guest exit from a particular CPU */ @@ -479,11 +479,38 @@ bool kvm_arch_intc_initialized(struct kvm *kvm) return vgic_initialized(kvm); } +static void kvm_arm_halt_guest(struct kvm *kvm) __maybe_unused; +static void kvm_arm_resume_guest(struct kvm *kvm) __maybe_unused; + +static void kvm_arm_halt_guest(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) + vcpu->arch.pause = true; + force_vm_exit(cpu_all_mask); +} + +static void kvm_arm_resume_guest(struct kvm *kvm) +{ + int i; + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { + wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); + + vcpu->arch.pause = false; + wake_up_interruptible(wq); + } +} + static void vcpu_sleep(struct kvm_vcpu *vcpu) { wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); - wait_event_interruptible(*wq, !vcpu->arch.power_off); + wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && + (!vcpu->arch.pause))); } static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) @@ -533,7 +560,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) update_vttbr(vcpu->kvm); - if (vcpu->arch.power_off) + if (vcpu->arch.power_off || vcpu->arch.pause) vcpu_sleep(vcpu); /* @@ -561,7 +588,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) } if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || - vcpu->arch.power_off) { + vcpu->arch.power_off || vcpu->arch.pause) { local_irq_enable(); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); |