From 4ae3cb3a2551b41f22284f713e7d5e2b61a85c1d Mon Sep 17 00:00:00 2001 From: Lan Tianyu Date: Sun, 13 Mar 2016 11:10:28 +0800 Subject: KVM: Replace smp_mb() with smp_load_acquire() in the kvm_flush_remote_tlbs() smp_load_acquire() is enough here and it's cheaper than smp_mb(). Adding a comment about reusing memory barrier of kvm_make_all_cpus_request() here to keep order between modifications to the page tables and reading mode. Signed-off-by: Lan Tianyu Signed-off-by: Paolo Bonzini --- virt/kvm/kvm_main.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) (limited to 'virt') diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 402590dfcf9b..4fd482fb9260 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -191,9 +191,23 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL void kvm_flush_remote_tlbs(struct kvm *kvm) { - long dirty_count = kvm->tlbs_dirty; + /* + * Read tlbs_dirty before setting KVM_REQ_TLB_FLUSH in + * kvm_make_all_cpus_request. + */ + long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); - smp_mb(); + /* + * We want to publish modifications to the page tables before reading + * mode. Pairs with a memory barrier in arch-specific code. + * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest + * and smp_mb in walk_shadow_page_lockless_begin/end. + * - powerpc: smp_mb in kvmppc_prepare_to_enter. + * + * There is already an smp_mb__after_atomic() before + * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that + * barrier here. + */ if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); -- cgit v1.2.3