diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-07-03 18:33:02 -0300 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-07-20 12:42:38 +0300 |
commit | 5a4c92880493945678315a6df810f7a21f55b985 (patch) | |
tree | 2a03286d5f9ea2e6f9426bbf153d3044122105e6 /arch/x86 | |
parent | 7e37c2998a5a0b00134f6227167694b710f57ac0 (diff) |
KVM: mmu_shrink: kvm_mmu_zap_page requires slots_lock to be held
kvm_mmu_zap_page() needs slots lock held (rmap_remove->gfn_to_memslot,
for example).
Since kvm_lock spinlock is held in mmu_shrink(), do a non-blocking
down_read_trylock().
Untested.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 3 |
1 files changed, 3 insertions, 0 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1fd8e3b58cc0..ff7cf632175b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -1987,6 +1987,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) list_for_each_entry(kvm, &vm_list, vm_list) { int npages; + if (!down_read_trylock(&kvm->slots_lock)) + continue; spin_lock(&kvm->mmu_lock); npages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; @@ -1999,6 +2001,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) nr_to_scan--; spin_unlock(&kvm->mmu_lock); + up_read(&kvm->slots_lock); } if (kvm_freed) list_move_tail(&kvm_freed->vm_list, &vm_list); |