diff options
author | Avi Kivity <avi@redhat.com> | 2010-11-22 17:53:26 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-01-12 11:29:59 +0200 |
commit | ab9ae3138789afacd133a9c4b3d7a3f1578e25c7 (patch) | |
tree | 27c81b48c395dffd2049b37bafef940573ae6841 | |
parent | 35d3d4a1dd2c1ffd6f2481f6d8ad6c358bb22f07 (diff) |
KVM: Push struct x86_exception info the various gva_to_gpa variants
Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 14 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 19 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 52 |
4 files changed, 51 insertions, 47 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 14524781de13..9980a2484624 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -245,7 +245,7 @@ struct kvm_mmu { void (*inject_page_fault)(struct kvm_vcpu *vcpu); void (*free)(struct kvm_vcpu *vcpu); gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, - u32 *error); + struct x86_exception *exception); gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access); void (*prefetch_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page); @@ -708,10 +708,14 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); int kvm_mmu_load(struct kvm_vcpu *vcpu); void kvm_mmu_unload(struct kvm_vcpu *vcpu); void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); -gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); -gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error); +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception); int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 53ff31f3dc6f..9ce041469a8e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2567,18 +2567,19 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) } static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, struct x86_exception *exception) { - if (error) - *error = 0; + if (exception) + exception->error_code = 0; return vaddr; } static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, + struct x86_exception *exception) { - if (error) - *error = 0; + if (exception) + exception->error_code = 0; return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); } diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 2b3d66c7b68d..3ac39de444ec 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h @@ -677,7 +677,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) } static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, - u32 *error) + struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; @@ -688,14 +688,18 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, if (r) { gpa = gfn_to_gpa(walker.gfn); gpa |= vaddr & ~PAGE_MASK; - } else if (error) - *error = walker.error_code; + } else if (exception) { + exception->vector = PF_VECTOR; + exception->error_code_valid = true; + exception->error_code = walker.error_code; + } return gpa; } static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, - u32 access, u32 *error) + u32 access, + struct x86_exception *exception) { struct guest_walker walker; gpa_t gpa = UNMAPPED_GVA; @@ -706,8 +710,11 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, if (r) { gpa = gfn_to_gpa(walker.gfn); gpa |= vaddr & ~PAGE_MASK; - } else if (error) - *error = walker.error_code; + } else if (exception) { + exception->vector = PF_VECTOR; + exception->error_code_valid = true; + exception->error_code = walker.error_code; + } return gpa; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8311ed909c49..a7a7decba43f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -3603,51 +3603,47 @@ static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) static gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access) { gpa_t t_gpa; - u32 error; + struct x86_exception exception; BUG_ON(!mmu_is_nested(vcpu)); /* NPT walks are always user-walks */ access |= PFERR_USER_MASK; - t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &error); + t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception); if (t_gpa == UNMAPPED_GVA) vcpu->arch.fault.nested = true; return t_gpa; } -gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } - gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) + gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_FETCH_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } -gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) +gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; access |= PFERR_WRITE_MASK; - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, error); + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); } /* uses this to access any guest's mapped memory without checking CPL */ -gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) -{ - return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error); -} - -static int make_page_fault(struct x86_exception *exception, u32 error) +gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, + struct x86_exception *exception) { - exception->vector = PF_VECTOR; - exception->error_code_valid = true; - exception->error_code = error; - return X86EMUL_PROPAGATE_FAULT; + return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); } static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, @@ -3656,17 +3652,16 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, { void *data = val; int r = X86EMUL_CONTINUE; - u32 error; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, - &error); + exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) - return make_page_fault(exception, error); + return X86EMUL_PROPAGATE_FAULT; ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); if (ret < 0) { r = X86EMUL_IO_NEEDED; @@ -3715,18 +3710,17 @@ static int kvm_write_guest_virt_system(gva_t addr, void *val, { void *data = val; int r = X86EMUL_CONTINUE; - u32 error; while (bytes) { gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, PFERR_WRITE_MASK, - &error); + exception); unsigned offset = addr & (PAGE_SIZE-1); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); int ret; if (gpa == UNMAPPED_GVA) - return make_page_fault(exception, error); + return X86EMUL_PROPAGATE_FAULT; ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); if (ret < 0) { r = X86EMUL_IO_NEEDED; @@ -3748,7 +3742,6 @@ static int emulator_read_emulated(unsigned long addr, struct kvm_vcpu *vcpu) { gpa_t gpa; - u32 error_code; if (vcpu->mmio_read_completed) { memcpy(val, vcpu->mmio_data, bytes); @@ -3758,10 +3751,10 @@ static int emulator_read_emulated(unsigned long addr, return X86EMUL_CONTINUE; } - gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code); + gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) - return make_page_fault(exception, error_code); + return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) @@ -3810,12 +3803,11 @@ static int emulator_write_emulated_onepage(unsigned long addr, struct kvm_vcpu *vcpu) { gpa_t gpa; - u32 error_code; - gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code); + gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, exception); if (gpa == UNMAPPED_GVA) - return make_page_fault(exception, error_code); + return X86EMUL_PROPAGATE_FAULT; /* For APIC access vmexit */ if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) |