diff options
author | Marc Zyngier <maz@kernel.org> | 2024-12-17 14:23:11 +0000 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2025-01-02 19:19:09 +0000 |
commit | cc45963cbf6334d2b9078f06efef9864639cddd0 (patch) | |
tree | 2905d41544aeac7d7be54c379229437466d671cc | |
parent | 4bad3068cfa9fc38dd767441871e0edab821105b (diff) |
KVM: arm64: nv: Publish emulated timer interrupt state in the in-memory state
With FEAT_NV2, the EL0 timer state is entirely stored in memory,
meaning that the hypervisor can only provide a very poor emulation.
The only thing we can really do is to publish the interrupt state
in the guest view of CNT{P,V}_CTL_EL0, and defer everything else
to the next exit.
Only FEAT_ECV will allow us to fix it, at the cost of extra trapping.
Suggested-by: Chase Conklin <chase.conklin@arm.com>
Suggested-by: Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Acked-by: Oliver Upton <oliver.upton@linux.dev>
Link: https://lore.kernel.org/r/20241217142321.763801-4-maz@kernel.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r-- | arch/arm64/kvm/arch_timer.c | 21 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 2 |
2 files changed, 22 insertions, 1 deletions
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index ee5f732fbbec..8bff913ed126 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -441,11 +441,30 @@ void kvm_timer_update_run(struct kvm_vcpu *vcpu) regs->device_irq_level |= KVM_ARM_DEV_EL1_PTIMER; } +static void kvm_timer_update_status(struct arch_timer_context *ctx, bool level) +{ + /* + * Paper over NV2 brokenness by publishing the interrupt status + * bit. This still results in a poor quality of emulation (guest + * writes will have no effect until the next exit). + * + * But hey, it's fast, right? + */ + if (is_hyp_ctxt(ctx->vcpu) && + (ctx == vcpu_vtimer(ctx->vcpu) || ctx == vcpu_ptimer(ctx->vcpu))) { + unsigned long val = timer_get_ctl(ctx); + __assign_bit(__ffs(ARCH_TIMER_CTRL_IT_STAT), &val, level); + timer_set_ctl(ctx, val); + } +} + static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, struct arch_timer_context *timer_ctx) { int ret; + kvm_timer_update_status(timer_ctx, new_level); + timer_ctx->irq.level = new_level; trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_irq(timer_ctx), timer_ctx->irq.level); @@ -471,6 +490,8 @@ static void timer_emulate(struct arch_timer_context *ctx) return; } + kvm_timer_update_status(ctx, should_fire); + /* * If the timer can fire now, we don't need to have a soft timer * scheduled for the future. If the timer cannot fire at all, diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fa3089822f9f..bda905022df4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1228,7 +1228,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) if (unlikely(!irqchip_in_kernel(vcpu->kvm))) kvm_timer_sync_user(vcpu); - if (vcpu_has_nv(vcpu)) + if (is_hyp_ctxt(vcpu)) kvm_timer_sync_nested(vcpu); kvm_arch_vcpu_ctxsync_fp(vcpu); |