summaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/x86.c4
2 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index fd6012eef9c9..2ce9da58611e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -51,7 +51,12 @@
extern bool itlb_multihit_kvm_mitigation;
static int __read_mostly nx_huge_pages = -1;
+#ifdef CONFIG_PREEMPT_RT
+/* Recovery can cause latency spikes, disable it for PREEMPT_RT. */
+static uint __read_mostly nx_huge_pages_recovery_ratio = 0;
+#else
static uint __read_mostly nx_huge_pages_recovery_ratio = 60;
+#endif
static int set_nx_huge_pages(const char *val, const struct kernel_param *kp);
static int set_nx_huge_pages_recovery_ratio(const char *val, const struct kernel_param *kp);
@@ -6280,14 +6285,13 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
if (new_val != old_val) {
struct kvm *kvm;
- int idx;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
- idx = srcu_read_lock(&kvm->srcu);
+ mutex_lock(&kvm->slots_lock);
kvm_mmu_zap_all_fast(kvm);
- srcu_read_unlock(&kvm->srcu, idx);
+ mutex_unlock(&kvm->slots_lock);
wake_up_process(kvm->arch.nx_lpage_recovery_thread);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7db5c8ef35dd..5d530521f11d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5130,6 +5130,10 @@ static void kvm_init_msr_list(void)
perf_get_x86_pmu_capability(&x86_pmu);
+ num_msrs_to_save = 0;
+ num_emulated_msrs = 0;
+ num_msr_based_features = 0;
+
for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue;