summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu/mmu.c3
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c58
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.h2
3 files changed, 63 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 0c64643819b9..cd1be200e2a3 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -5544,6 +5544,9 @@ void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
slot_handle_leaf(kvm, (struct kvm_memory_slot *)memslot,
kvm_mmu_zap_collapsible_spte, true);
+
+ if (kvm->arch.tdp_mmu_enabled)
+ kvm_tdp_mmu_zap_collapsible_sptes(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 7181b4ab54d0..0f181f324455 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1020,3 +1020,61 @@ bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot)
return spte_set;
}
+/*
+ * Clear non-leaf entries (and free associated page tables) which could
+ * be replaced by large mappings, for GFNs within the slot.
+ */
+static void zap_collapsible_spte_range(struct kvm *kvm,
+ struct kvm_mmu_page *root,
+ gfn_t start, gfn_t end)
+{
+ struct tdp_iter iter;
+ kvm_pfn_t pfn;
+ bool spte_set = false;
+
+ tdp_root_for_each_pte(iter, root, start, end) {
+ if (!is_shadow_present_pte(iter.old_spte) ||
+ is_last_spte(iter.old_spte, iter.level))
+ continue;
+
+ pfn = spte_to_pfn(iter.old_spte);
+ if (kvm_is_reserved_pfn(pfn) ||
+ !PageTransCompoundMap(pfn_to_page(pfn)))
+ continue;
+
+ tdp_mmu_set_spte(kvm, &iter, 0);
+
+ spte_set = tdp_mmu_iter_flush_cond_resched(kvm, &iter);
+ }
+
+ if (spte_set)
+ kvm_flush_remote_tlbs(kvm);
+}
+
+/*
+ * Clear non-leaf entries (and free associated page tables) which could
+ * be replaced by large mappings, for GFNs within the slot.
+ */
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot)
+{
+ struct kvm_mmu_page *root;
+ int root_as_id;
+
+ for_each_tdp_mmu_root(kvm, root) {
+ root_as_id = kvm_mmu_page_as_id(root);
+ if (root_as_id != slot->as_id)
+ continue;
+
+ /*
+ * Take a reference on the root so that it cannot be freed if
+ * this thread releases the MMU lock and yields in this loop.
+ */
+ kvm_mmu_get_root(kvm, root);
+
+ zap_collapsible_spte_range(kvm, root, slot->base_gfn,
+ slot->base_gfn + slot->npages);
+
+ kvm_mmu_put_root(kvm, root);
+ }
+}
diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h
index ece66f10d85f..8cc902b8b9f8 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.h
+++ b/arch/x86/kvm/mmu/tdp_mmu.h
@@ -38,4 +38,6 @@ void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
gfn_t gfn, unsigned long mask,
bool wrprot);
bool kvm_tdp_mmu_slot_set_dirty(struct kvm *kvm, struct kvm_memory_slot *slot);
+void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
+ const struct kvm_memory_slot *slot);
#endif /* __KVM_X86_MMU_TDP_MMU_H */