diff options
author | Sean Christopherson <seanjc@google.com> | 2022-10-13 21:12:29 +0000 |
---|---|---|
committer | David Woodhouse <dwmw@amazon.co.uk> | 2022-11-30 19:25:24 +0000 |
commit | 9f87791d686d85614584438d4f249eb32ef7964c (patch) | |
tree | c2c4a01fd11883c843242ad0a3035fd7754809fa /virt | |
parent | 0318f207d1c2e297d1ec1c6e145bb8bd053236f9 (diff) |
KVM: Drop KVM's API to allow temporarily unmapping gfn=>pfn cache
Drop kvm_gpc_unmap() as it has no users and unclear requirements. The
API was added as part of the original gfn_to_pfn_cache support, but its
sole usage[*] was never merged. Fold the guts of kvm_gpc_unmap() into
the deactivate path and drop the API. Omit acquiring refresh_lock as
as concurrent calls to kvm_gpc_deactivate() are not allowed (this is
not enforced, e.g. via lockdep. due to it being called during vCPU
destruction).
If/when temporary unmapping makes a comeback, the desirable behavior is
likely to restrict temporary unmapping to vCPU-exclusive mappings and
require the vcpu->mutex be held to serialize unmap. Use of the
refresh_lock to protect unmapping was somewhat specuatively added by
commit 93984f19e7bc ("KVM: Fully serialize gfn=>pfn cache refresh via
mutex") to guard against concurrent unmaps, but the primary use case of
the temporary unmap, nested virtualization[*], doesn't actually need or
want concurrent unmaps.
[*] https://lore.kernel.org/all/20211210163625.2886-7-dwmw2@infradead.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Woodhouse <dwmw@amazon.co.uk>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/pfncache.c | 44 |
1 files changed, 16 insertions, 28 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index 015c5d16948a..5b2512793691 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -333,33 +333,6 @@ out_unlock: } EXPORT_SYMBOL_GPL(kvm_gpc_refresh); -void kvm_gpc_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc) -{ - void *old_khva; - kvm_pfn_t old_pfn; - - mutex_lock(&gpc->refresh_lock); - write_lock_irq(&gpc->lock); - - gpc->valid = false; - - old_khva = gpc->khva - offset_in_page(gpc->khva); - old_pfn = gpc->pfn; - - /* - * We can leave the GPA → uHVA map cache intact but the PFN - * lookup will need to be redone even for the same page. - */ - gpc->khva = NULL; - gpc->pfn = KVM_PFN_ERR_FAULT; - - write_unlock_irq(&gpc->lock); - mutex_unlock(&gpc->refresh_lock); - - gpc_unmap_khva(old_pfn, old_khva); -} -EXPORT_SYMBOL_GPL(kvm_gpc_unmap); - void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu, enum pfn_cache_usage usage) { @@ -405,6 +378,8 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate); void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) { struct kvm *kvm = gpc->kvm; + kvm_pfn_t old_pfn; + void *old_khva; if (gpc->active) { /* @@ -414,13 +389,26 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc) */ write_lock_irq(&gpc->lock); gpc->active = false; + gpc->valid = false; + + /* + * Leave the GPA => uHVA cache intact, it's protected by the + * memslot generation. The PFN lookup needs to be redone every + * time as mmu_notifier protection is lost when the cache is + * removed from the VM's gpc_list. + */ + old_khva = gpc->khva - offset_in_page(gpc->khva); + gpc->khva = NULL; + + old_pfn = gpc->pfn; + gpc->pfn = KVM_PFN_ERR_FAULT; write_unlock_irq(&gpc->lock); spin_lock(&kvm->gpc_lock); list_del(&gpc->list); spin_unlock(&kvm->gpc_lock); - kvm_gpc_unmap(kvm, gpc); + gpc_unmap_khva(old_pfn, old_khva); } } EXPORT_SYMBOL_GPL(kvm_gpc_deactivate); |