diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 150 |
1 files changed, 32 insertions, 118 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 084ad54c73cd..510bdfbc4073 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -45,16 +45,12 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags) * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. - * If we're not using MMU notifiers, we never take pages away - * from the guest, so we can use tlbiel if requested. * Otherwise, don't use tlbiel. */ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; - else if (kvm->arch.using_mmu_notifiers) - global = 1; else - global = !(flags & H_LOCAL); + global = 1; if (!global) { /* any other core might now have stale TLB entries... */ @@ -170,7 +166,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, struct revmap_entry *rev; unsigned long g_ptel; struct kvm_memory_slot *memslot; - unsigned long *physp, pte_size; + unsigned long pte_size; unsigned long is_io; unsigned long *rmap; pte_t pte; @@ -198,9 +194,6 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, is_io = ~0ul; rmap = NULL; if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { - /* PPC970 can't do emulated MMIO */ - if (!cpu_has_feature(CPU_FTR_ARCH_206)) - return H_PARAMETER; /* Emulated MMIO - mark this with key=31 */ pteh |= HPTE_V_ABSENT; ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; @@ -213,37 +206,20 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, slot_fn = gfn - memslot->base_gfn; rmap = &memslot->arch.rmap[slot_fn]; - if (!kvm->arch.using_mmu_notifiers) { - physp = memslot->arch.slot_phys; - if (!physp) - return H_PARAMETER; - physp += slot_fn; - if (realmode) - physp = real_vmalloc_addr(physp); - pa = *physp; - if (!pa) - return H_TOO_HARD; - is_io = pa & (HPTE_R_I | HPTE_R_W); - pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); - pa &= PAGE_MASK; + /* Translate to host virtual address */ + hva = __gfn_to_hva_memslot(memslot, gfn); + + /* Look up the Linux PTE for the backing page */ + pte_size = psize; + pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); + if (pte_present(pte) && !pte_numa(pte)) { + if (writing && !pte_write(pte)) + /* make the actual HPTE be read-only */ + ptel = hpte_make_readonly(ptel); + is_io = hpte_cache_bits(pte_val(pte)); + pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (pte_size - 1); pa |= gpa & ~PAGE_MASK; - } else { - /* Translate to host virtual address */ - hva = __gfn_to_hva_memslot(memslot, gfn); - - /* Look up the Linux PTE for the backing page */ - pte_size = psize; - pte = lookup_linux_pte_and_update(pgdir, hva, writing, - &pte_size); - if (pte_present(pte) && !pte_numa(pte)) { - if (writing && !pte_write(pte)) - /* make the actual HPTE be read-only */ - ptel = hpte_make_readonly(ptel); - is_io = hpte_cache_bits(pte_val(pte)); - pa = pte_pfn(pte) << PAGE_SHIFT; - pa |= hva & (pte_size - 1); - pa |= gpa & ~PAGE_MASK; - } } if (pte_size < psize) @@ -337,8 +313,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, rmap = real_vmalloc_addr(rmap); lock_rmap(rmap); /* Check for pending invalidations under the rmap chain lock */ - if (kvm->arch.using_mmu_notifiers && - mmu_notifier_retry(kvm, mmu_seq)) { + if (mmu_notifier_retry(kvm, mmu_seq)) { /* inval in progress, write a non-present HPTE */ pteh |= HPTE_V_ABSENT; pteh &= ~HPTE_V_VALID; @@ -395,61 +370,11 @@ static inline int try_lock_tlbie(unsigned int *lock) return old == 0; } -/* - * tlbie/tlbiel is a bit different on the PPC970 compared to later - * processors such as POWER7; the large page bit is in the instruction - * not RB, and the top 16 bits and the bottom 12 bits of the VA - * in RB must be 0. - */ -static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues, - long npages, int global, bool need_sync) -{ - long i; - - if (global) { - while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) - cpu_relax(); - if (need_sync) - asm volatile("ptesync" : : : "memory"); - for (i = 0; i < npages; ++i) { - unsigned long rb = rbvalues[i]; - - if (rb & 1) /* large page */ - asm volatile("tlbie %0,1" : : - "r" (rb & 0x0000fffffffff000ul)); - else - asm volatile("tlbie %0,0" : : - "r" (rb & 0x0000fffffffff000ul)); - } - asm volatile("eieio; tlbsync; ptesync" : : : "memory"); - kvm->arch.tlbie_lock = 0; - } else { - if (need_sync) - asm volatile("ptesync" : : : "memory"); - for (i = 0; i < npages; ++i) { - unsigned long rb = rbvalues[i]; - - if (rb & 1) /* large page */ - asm volatile("tlbiel %0,1" : : - "r" (rb & 0x0000fffffffff000ul)); - else - asm volatile("tlbiel %0,0" : : - "r" (rb & 0x0000fffffffff000ul)); - } - asm volatile("ptesync" : : : "memory"); - } -} - static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { long i; - if (cpu_has_feature(CPU_FTR_ARCH_201)) { - /* PPC970 tlbie instruction is a bit different */ - do_tlbies_970(kvm, rbvalues, npages, global, need_sync); - return; - } if (global) { while (!try_lock_tlbie(&kvm->arch.tlbie_lock)) cpu_relax(); @@ -667,40 +592,29 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, rev->guest_rpte = r; note_hpte_modification(kvm, rev); } - r = (be64_to_cpu(hpte[1]) & ~mask) | bits; /* Update HPTE */ if (v & HPTE_V_VALID) { - rb = compute_tlbie_rb(v, r, pte_index); - hpte[0] = cpu_to_be64(v & ~HPTE_V_VALID); - do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true); /* - * If the host has this page as readonly but the guest - * wants to make it read/write, reduce the permissions. - * Checking the host permissions involves finding the - * memslot and then the Linux PTE for the page. + * If the page is valid, don't let it transition from + * readonly to writable. If it should be writable, we'll + * take a trap and let the page fault code sort it out. */ - if (hpte_is_writable(r) && kvm->arch.using_mmu_notifiers) { - unsigned long psize, gfn, hva; - struct kvm_memory_slot *memslot; - pgd_t *pgdir = vcpu->arch.pgdir; - pte_t pte; - - psize = hpte_page_size(v, r); - gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; - memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); - if (memslot) { - hva = __gfn_to_hva_memslot(memslot, gfn); - pte = lookup_linux_pte_and_update(pgdir, hva, - 1, &psize); - if (pte_present(pte) && !pte_write(pte)) - r = hpte_make_readonly(r); - } + pte = be64_to_cpu(hpte[1]); + r = (pte & ~mask) | bits; + if (hpte_is_writable(r) && !hpte_is_writable(pte)) + r = hpte_make_readonly(r); + /* If the PTE is changing, invalidate it first */ + if (r != pte) { + rb = compute_tlbie_rb(v, r, pte_index); + hpte[0] = cpu_to_be64((v & ~HPTE_V_VALID) | + HPTE_V_ABSENT); + do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), + true); + hpte[1] = cpu_to_be64(r); } } - hpte[1] = cpu_to_be64(r); - eieio(); - hpte[0] = cpu_to_be64(v & ~HPTE_V_HVLOCK); + unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); asm volatile("ptesync" : : : "memory"); return H_SUCCESS; } |