summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-10-10 11:23:46 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2024-10-25 13:00:47 -0400
commit54ba8c98a2589f816c52545c9cd3db6665ee0c67 (patch)
tree3f5308a55f3c2e517b2698ce35989bc0026fcb65 /arch
parent1c7b627e930624dd64ee906df554c8f2bad628ff (diff)
KVM: x86/mmu: Convert page fault paths to kvm_faultin_pfn()
Convert KVM x86 to use the recently introduced __kvm_faultin_pfn(). Opportunstically capture the refcounted_page grabbed by KVM for use in future changes. No functional change intended. Tested-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Sean Christopherson <seanjc@google.com> Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-ID: <20241010182427.1434605-45-seanjc@google.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu/mmu.c14
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h1
2 files changed, 11 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 6058e7dbcd2b..2bea2d20c571 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4423,11 +4423,14 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu *vcpu,
static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault)
{
+ unsigned int foll = fault->write ? FOLL_WRITE : 0;
+
if (fault->is_private)
return kvm_mmu_faultin_pfn_private(vcpu, fault);
- fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, false, true,
- fault->write, &fault->map_writable);
+ foll |= FOLL_NOWAIT;
+ fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
+ &fault->map_writable, &fault->refcounted_page);
/*
* If resolving the page failed because I/O is needed to fault-in the
@@ -4454,8 +4457,11 @@ static int __kvm_mmu_faultin_pfn(struct kvm_vcpu *vcpu,
* to wait for IO. Note, gup always bails if it is unable to quickly
* get a page and a fatal signal, i.e. SIGKILL, is pending.
*/
- fault->pfn = __gfn_to_pfn_memslot(fault->slot, fault->gfn, true, true,
- fault->write, &fault->map_writable);
+ foll |= FOLL_INTERRUPTIBLE;
+ foll &= ~FOLL_NOWAIT;
+ fault->pfn = __kvm_faultin_pfn(fault->slot, fault->gfn, foll,
+ &fault->map_writable, &fault->refcounted_page);
+
return RET_PF_CONTINUE;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 59e600f6ff9d..fabbea504a69 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -238,6 +238,7 @@ struct kvm_page_fault {
/* Outputs of kvm_mmu_faultin_pfn(). */
unsigned long mmu_seq;
kvm_pfn_t pfn;
+ struct page *refcounted_page;
bool map_writable;
/*