summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJérôme Glisse <jglisse@redhat.com>2019-02-20 12:32:45 -0500
committerJérôme Glisse <jglisse@redhat.com>2019-02-20 20:14:40 -0500
commit8c2215ab6b914f3ddfcde32f2f2b5cdef58de385 (patch)
tree425c31c4559104fbdcffd4b8377bed47fb9f4d3a
parent95d0d8c0f8b8c4712320afe0804dc0f116ca210a (diff)
kvm/x86: leverage clear versus unmap distinction and avoid freeing rmapwip-kvm-mmu-notifier-opti
Do not free rmap when clearing a range of virtual address ie we get call through kvm_clear_hva_range() not through kvm_unmap_hva_range(). The range is still valid for a clear and likely to be access again which would need to re-allocate the structure if we were to free them. Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krčmář <rkrcmar@redhat.com> Cc: kvm@vger.kernel.org Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Peter Xu <peterx@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--arch/x86/kvm/mmu.c43
1 files changed, 32 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index aa337ef5914f..8adfac01e2d2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1274,7 +1274,8 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
static void
pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
struct pte_list_desc *desc, int i,
- struct pte_list_desc *prev_desc)
+ struct pte_list_desc *prev_desc,
+ bool free)
{
int j;
@@ -1282,7 +1283,7 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
;
desc->sptes[i] = desc->sptes[j];
desc->sptes[j] = NULL;
- if (j != 0)
+ if (!free || j != 0)
return;
if (!prev_desc && !desc->more)
rmap_head->val = (unsigned long)desc->sptes[0];
@@ -1294,7 +1295,8 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
mmu_free_pte_list_desc(desc);
}
-static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
+static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head,
+ bool free)
{
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
@@ -1318,7 +1320,7 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
if (desc->sptes[i] == spte) {
pte_list_desc_remove_entry(rmap_head,
- desc, i, prev_desc);
+ desc, i, prev_desc, free);
return;
}
}
@@ -1330,10 +1332,11 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
}
}
-static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
+static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep,
+ bool free)
{
mmu_spte_clear_track_bits(sptep);
- __pte_list_remove(sptep, rmap_head);
+ __pte_list_remove(sptep, rmap_head, free);
}
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
@@ -1384,7 +1387,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);
- __pte_list_remove(spte, rmap_head);
+ __pte_list_remove(spte, rmap_head, true);
}
/*
@@ -1724,7 +1727,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
while ((sptep = rmap_get_first(rmap_head, &iter))) {
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(rmap_head, sptep, true);
flush = true;
}
@@ -1738,6 +1741,24 @@ static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
return kvm_zap_rmapp(kvm, rmap_head);
}
+static int kvm_clear_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ struct kvm_memory_slot *slot, gfn_t gfn, int level,
+ unsigned long data)
+{
+ u64 *sptep;
+ struct rmap_iterator iter;
+ bool flush = false;
+
+ while ((sptep = rmap_get_first(rmap_head, &iter))) {
+ rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
+
+ pte_list_remove(rmap_head, sptep, false);
+ flush = true;
+ }
+
+ return flush;
+}
+
static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
struct kvm_memory_slot *slot, gfn_t gfn, int level,
unsigned long data)
@@ -1913,7 +1934,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
int kvm_clear_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
{
- return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
+ return kvm_handle_hva_range(kvm, start, end, 0, kvm_clear_rmapp);
}
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
@@ -2031,7 +2052,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- __pte_list_remove(parent_pte, &sp->parent_ptes);
+ __pte_list_remove(parent_pte, &sp->parent_ptes, true);
}
static void drop_parent_pte(struct kvm_mmu_page *sp,
@@ -5721,7 +5742,7 @@ restart:
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(rmap_head, sptep, true);
if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,