summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S3
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c17
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c20
-rw-r--r--arch/powerpc/kvm/book3s_hv.c60
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S18
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S1
-rw-r--r--arch/powerpc/kvm/book3s_pr.c3
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S3
-rw-r--r--arch/powerpc/kvm/book3s_xive.c21
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c39
-rw-r--r--arch/powerpc/kvm/e500.c1
-rw-r--r--arch/powerpc/kvm/e500mc.c1
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c7
-rw-r--r--arch/powerpc/kvm/powerpc.c31
23 files changed, 108 insertions, 142 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index edaf4720d156..87348e498c89 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -28,7 +28,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 45c8ea4a0487..612169988a3d 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index cf9d686e8162..c92dd25bed23 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -23,7 +23,6 @@
#include <linux/kvm_host.h>
#include <linux/highmem.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 7f3a8cf5d66f..3c0e8fb2b773 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -29,7 +29,6 @@
#include <linux/file.h>
#include <linux/debugfs.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 176f911ee983..0af1c0aea1fe 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -66,10 +66,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
bits = root & RPDS_MASK;
root = root & RPDB_MASK;
- /* P9 DD1 interprets RTS (radix tree size) differently */
offset = rts + 31;
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- offset -= 3;
/* current implementations only support 52-bit space */
if (offset != 52)
@@ -160,17 +157,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
unsigned long clr, unsigned long set,
unsigned long addr, unsigned int shift)
{
- unsigned long old = 0;
-
- if (!(clr & _PAGE_PRESENT) && cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- pte_present(*ptep)) {
- /* have to invalidate it first */
- old = __radix_pte_update(ptep, _PAGE_PRESENT, 0);
- kvmppc_radix_tlbie_page(kvm, addr, shift);
- set |= _PAGE_PRESENT;
- old &= _PAGE_PRESENT;
- }
- return __radix_pte_update(ptep, clr, set) | old;
+ return __radix_pte_update(ptep, clr, set);
}
void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 688722acd692..066c665dc86f 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,6 +17,9 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#define SHADOW_SLB_ENTRY_LEN 0x10
#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8c456fa691a5..9a3f2646ecc7 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -31,7 +31,6 @@
#include <linux/iommu.h>
#include <linux/file.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -180,7 +179,7 @@ extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
if ((tbltmp->it_page_shift <= stt->page_shift) &&
(tbltmp->it_offset << tbltmp->it_page_shift ==
stt->offset << stt->page_shift) &&
- (tbltmp->it_size << tbltmp->it_page_shift ==
+ (tbltmp->it_size << tbltmp->it_page_shift >=
stt->size << stt->page_shift)) {
/*
* Reference the table to avoid races with
@@ -296,7 +295,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
{
struct kvmppc_spapr_tce_table *stt = NULL;
struct kvmppc_spapr_tce_table *siter;
- unsigned long npages, size;
+ unsigned long npages, size = args->size;
int ret = -ENOMEM;
int i;
@@ -304,7 +303,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
(args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
return -EINVAL;
- size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
npages = kvmppc_tce_pages(size);
ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
if (ret)
@@ -378,19 +376,19 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -437,7 +435,8 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
enum dma_data_direction dir)
{
long ret;
- unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ unsigned long hpa;
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -464,7 +463,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 5b298f5a1a14..506a4d400458 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -26,8 +26,8 @@
#include <linux/slab.h>
#include <linux/hugetlb.h>
#include <linux/list.h>
+#include <linux/stringify.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/book3s/64/mmu-hash.h>
@@ -200,23 +200,19 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
return H_TOO_HARD;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
- mem = mm_iommu_lookup_rm(kvm->mm, *pua, pgsize);
+ mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
return H_TOO_HARD;
mm_iommu_mapped_dec(mem);
- *pua = 0;
+ *pua = cpu_to_be64(0);
return H_SUCCESS;
}
@@ -268,7 +264,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{
long ret;
unsigned long hpa = 0;
- unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -283,10 +279,6 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
&hpa)))
return H_HARDWARE;
- pua = (void *) vmalloc_to_phys(pua);
- if (WARN_ON_ONCE_RM(!pua))
- return H_HARDWARE;
-
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
return H_CLOSED;
@@ -303,7 +295,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (dir != DMA_NONE)
kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
- *pua = ua;
+ *pua = cpu_to_be64(ua);
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index de686b340f4a..3e3a71594e63 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -46,6 +46,7 @@
#include <linux/compiler.h>
#include <linux/of.h>
+#include <asm/ftrace.h>
#include <asm/reg.h>
#include <asm/ppc-opcode.h>
#include <asm/asm-prototypes.h>
@@ -53,7 +54,6 @@
#include <asm/disassemble.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
@@ -128,14 +128,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
* and SPURR count and should be set according to the number of
* online threads in the vcore being run.
*/
-#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
-#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
-#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
-#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
+#define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL
+#define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL
+#define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL
+#define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL
static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
RWMR_RPA_P8_1THREAD,
@@ -216,7 +216,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
wqp = kvm_arch_vcpu_wq(vcpu);
if (swq_has_sleeper(wqp)) {
- swake_up(wqp);
+ swake_up_one(wqp);
++vcpu->stat.halt_wakeup;
}
@@ -1693,14 +1693,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
break;
case KVM_REG_PPC_TB_OFFSET:
- /*
- * POWER9 DD1 has an erratum where writing TBU40 causes
- * the timebase to lose ticks. So we don't let the
- * timebase offset be changed on P9 DD1. (It is
- * initialized to zero.)
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1))
- break;
/* round up to multiple of 2^24 */
vcpu->arch.vcore->tb_offset =
ALIGN(set_reg_val(id, *val), 1UL << 24);
@@ -1816,7 +1808,7 @@ static int threads_per_vcore(struct kvm *kvm)
return threads_per_subcore;
}
-static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
+static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id)
{
struct kvmppc_vcore *vcore;
@@ -1830,7 +1822,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
init_swait_queue_head(&vcore->wq);
vcore->preempt_tb = TB_NIL;
vcore->lpcr = kvm->arch.lpcr;
- vcore->first_vcpuid = core * kvm->arch.smt_mode;
+ vcore->first_vcpuid = id;
vcore->kvm = kvm;
INIT_LIST_HEAD(&vcore->preempt_list);
@@ -2026,8 +2018,6 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
/*
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
- * On POWER9 DD1, TM doesn't work, so we make sure to
- * prevent the guest from using it.
* On POWER9, we want to virtualize the doorbell facility, so we
* turn off the HFSCR bit, which causes those instructions to trap.
*/
@@ -2048,12 +2038,26 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
mutex_lock(&kvm->lock);
vcore = NULL;
err = -EINVAL;
- core = id / kvm->arch.smt_mode;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) {
+ pr_devel("KVM: VCPU ID too high\n");
+ core = KVM_MAX_VCORES;
+ } else {
+ BUG_ON(kvm->arch.smt_mode != 1);
+ core = kvmppc_pack_vcpu_id(kvm, id);
+ }
+ } else {
+ core = id / kvm->arch.smt_mode;
+ }
if (core < KVM_MAX_VCORES) {
vcore = kvm->arch.vcores[core];
- if (!vcore) {
+ if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) {
+ pr_devel("KVM: collision on id %u", id);
+ vcore = NULL;
+ } else if (!vcore) {
err = -ENOMEM;
- vcore = kvmppc_vcore_create(kvm, core);
+ vcore = kvmppc_vcore_create(kvm,
+ id & ~(kvm->arch.smt_mode - 1));
kvm->arch.vcores[core] = vcore;
kvm->arch.online_vcores++;
}
@@ -3188,7 +3192,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
}
}
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE);
if (kvmppc_vcore_check_block(vc)) {
finish_swait(&vc->wq, &wait);
@@ -3311,7 +3315,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_start_thread(vcpu, vc);
trace_kvm_guest_enter(vcpu);
} else if (vc->vcore_state == VCORE_SLEEPING) {
- swake_up(&vc->wq);
+ swake_up_one(&vc->wq);
}
}
@@ -4561,6 +4565,8 @@ static int kvmppc_book3s_init_hv(void)
pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
return -ENODEV;
}
+ /* presence of intc confirmed - node can be dropped again */
+ of_node_put(np);
}
#endif
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index d4a3f4da409b..fc6bb9630a9c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -77,7 +77,7 @@ struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
- GFP_KERNEL);
+ false);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 82f2ff9410b6..666b91c79eb4 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -27,6 +27,8 @@
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
#include <asm/ppc-opcode.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/*****************************************************************************
* *
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 1f22d9e977d4..a67cf1cdeda4 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/log2.h>
-#include <asm/tlbflush.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 153988d878e8..1d14046124a0 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -32,6 +32,8 @@
#include <asm/opal.h>
#include <asm/xive-regs.h>
#include <asm/thread_info.h>
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
/* Sign-extend HDEC if not on POWER9 */
#define EXTEND_HDEC(reg) \
@@ -917,9 +919,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
mtspr SPRN_PID, r7
mtspr SPRN_WORT, r8
BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-BEGIN_FTR_SECTION
/* POWER8-only registers */
ld r5, VCPU_TCSCR(r4)
ld r6, VCPU_ACOP(r4)
@@ -1912,7 +1911,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
cmpwi cr2, r0, 0
- beq cr2, 4f
+ beq cr2, 2f
/*
* Radix: do eieio; tlbsync; ptesync sequence in case we
@@ -1952,11 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
bdnz 1b
ptesync
-2: /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-4:
+2:
#endif /* CONFIG_PPC_RADIX_MMU */
/*
@@ -3367,11 +3362,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
mtspr SPRN_CIABR, r0
mtspr SPRN_DAWRX, r0
- /* Flush the ERAT on radix P9 DD1 guest exit */
-BEGIN_FTR_SECTION
- PPC_INVALIDATE_ERAT
-END_FTR_SECTION_IFSET(CPU_FTR_POWER9_DD1)
-
BEGIN_MMU_FTR_SECTION
b 4f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index c18e845019ec..d71dab16dc6f 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -23,6 +23,7 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/asm-compat.h>
#if defined(CONFIG_PPC_BOOK3S_64)
#ifdef PPC64_ELF_ABI_v2
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index c3b8006f0eac..614ebb4261f7 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -27,7 +27,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/kvm_ppc.h>
@@ -515,7 +514,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
/*
* When switching from 32 to 64-bit, we may have a stale 32-bit
* magic page around, we need to flush it. Typically 32-bit magic
- * page will be instanciated when calling into RTAS. Note: We
+ * page will be instantiated when calling into RTAS. Note: We
* assume that such transition only happens while in kernel mode,
* ie, we never transition from user 32-bit to kernel 64-bit with
* a 32-bit magic page around.
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34a5adeff084..b0089e04c8c8 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -23,6 +23,7 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
+#include <asm/asm-compat.h>
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/exception-64s.h>
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 98ccc7ec5d48..e5c542a7c5ac 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -19,6 +19,9 @@
/* Real mode helpers */
+#include <asm/asm-compat.h>
+#include <asm/feature-fixups.h>
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define GET_SHADOW_VCPU(reg) \
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index f9818d7d3381..30c2eb766954 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -38,7 +38,7 @@
* Virtual mode variants of the hcalls for use on radix/radix
* with AIL. They require the VCPU's VP to be "pushed"
*
- * We still instanciate them here because we use some of the
+ * We still instantiate them here because we use some of the
* generated utility functions as well in this file.
*/
#define XIVE_RUNTIME_CHECKS
@@ -317,6 +317,11 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
return -EBUSY;
}
+static u32 xive_vp(struct kvmppc_xive *xive, u32 server)
+{
+ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
+}
+
static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
struct kvmppc_xive_src_block *sb,
struct kvmppc_xive_irq_state *state)
@@ -362,7 +367,7 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
MASKED, state->number);
/* set old_p so we can track if an H_EOI was done */
state->old_p = true;
@@ -418,7 +423,7 @@ static void xive_finish_unmask(struct kvmppc_xive *xive,
*/
if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
xive_native_configure_irq(hw_num,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/* If an EOI is needed, do it here */
if (!state->old_p)
@@ -495,7 +500,7 @@ static int xive_target_interrupt(struct kvm *kvm,
kvmppc_xive_select_irq(state, &hw_num, NULL);
return xive_native_configure_irq(hw_num,
- xive->vp_base + server,
+ xive_vp(xive, server),
prio, state->number);
}
@@ -883,7 +888,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
* which is fine for a never started interrupt.
*/
xive_native_configure_irq(hw_irq,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -959,7 +964,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
/* Reconfigure the IPI */
xive_native_configure_irq(state->ipi_number,
- xive->vp_base + state->act_server,
+ xive_vp(xive, state->act_server),
state->act_priority, state->number);
/*
@@ -1084,7 +1089,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
pr_devel("Duplicate !\n");
return -EEXIST;
}
- if (cpu >= KVM_MAX_VCPUS) {
+ if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
pr_devel("Out of bounds !\n");
return -EINVAL;
}
@@ -1098,7 +1103,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
xc->xive = xive;
xc->vcpu = vcpu;
xc->server_num = cpu;
- xc->vp_id = xive->vp_base + cpu;
+ xc->vp_id = xive_vp(xive, cpu);
xc->mfrr = 0xff;
xc->valid = true;
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 6e41ba7ec8f4..4171ede8722b 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -25,18 +25,6 @@ static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
*/
eieio();
- /*
- * DD1 bug workaround: If PIPR is less favored than CPPR
- * ignore the interrupt or we might incorrectly lose an IPB
- * bit.
- */
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- __be64 qw1 = __x_readq(__x_tima + TM_QW1_OS);
- u8 pipr = be64_to_cpu(qw1) & 0xff;
- if (pipr >= xc->hw_cppr)
- return;
- }
-
/* Perform the acknowledge OS to register cycle. */
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
@@ -89,8 +77,15 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
/* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
- else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
+ else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW)
opal_int_eoi(hw_irq);
+ else if (xd->flags & XIVE_IRQ_FLAG_LSI) {
+ /*
+ * For LSIs the HW EOI cycle is used rather than PQ bits,
+ * as they are automatically re-triggred in HW when still
+ * pending.
+ */
+ __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
} else {
uint64_t eoi_val;
@@ -102,20 +97,12 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
*
* This allows us to then do a re-trigger if Q was set
* rather than synthetizing an interrupt in software
- *
- * For LSIs, using the HW EOI cycle works around a problem
- * on P9 DD1 PHBs where the other ESB accesses don't work
- * properly.
*/
- if (xd->flags & XIVE_IRQ_FLAG_LSI)
- __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
- else {
- eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
-
- /* Re-trigger if needed */
- if ((eoi_val & 1) && __x_trig_page(xd))
- __x_writeq(0, __x_trig_page(xd));
- }
+ eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
+
+ /* Re-trigger if needed */
+ if ((eoi_val & 1) && __x_trig_page(xd))
+ __x_writeq(0, __x_trig_page(xd));
}
}
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index f9f6468f4171..afd3c255a427 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include "../mm/mmu_decl.h"
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index d0b6b5788afc..d31645491a93 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -21,7 +21,6 @@
#include <asm/reg.h>
#include <asm/cputable.h>
-#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/dbell.h>
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index afde788be141..75dce1ef3bc8 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -106,7 +106,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
* if mmio_vsx_tx_sx_enabled == 1, copy data between
* VSR[32..63] and memory
*/
- vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
vcpu->arch.mmio_vsx_copy_nums = 0;
vcpu->arch.mmio_vsx_offset = 0;
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
@@ -242,8 +241,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
emulated = kvmppc_handle_vsx_load(run, vcpu,
- KVM_MMIO_REG_VSX | (op.reg & 0x1f),
- io_size_each, 1, op.type & SIGNEXT);
+ KVM_MMIO_REG_VSX|op.reg, io_size_each,
+ 1, op.type & SIGNEXT);
break;
}
#endif
@@ -363,7 +362,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
emulated = kvmppc_handle_vsx_store(run, vcpu,
- op.reg & 0x1f, io_size_each, 1);
+ op.reg, io_size_each, 1);
break;
}
#endif
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e8c20c5eaac..eba5756d5b41 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,7 +33,6 @@
#include <asm/cputable.h>
#include <linux/uaccess.h>
#include <asm/kvm_ppc.h>
-#include <asm/tlbflush.h>
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include <asm/iommu.h>
@@ -880,10 +879,10 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[offset] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
}
@@ -895,11 +894,11 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsxval[0] = gpr;
val.vsxval[1] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
@@ -912,12 +911,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
union kvmppc_one_reg val;
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (index >= 32) {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
val.vsx32val[2] = gpr;
val.vsx32val[3] = gpr;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
val.vsx32val[0] = gpr;
val.vsx32val[1] = gpr;
@@ -937,10 +936,10 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
if (offset == -1)
return;
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
- val.vval = VCPU_VSX_VR(vcpu, index);
+ if (index >= 32) {
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
val.vsx32val[offset] = gpr32;
- VCPU_VSX_VR(vcpu, index) = val.vval;
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
} else {
dword_offset = offset / 2;
word_offset = offset % 2;
@@ -1361,10 +1360,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break;
}
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (rs < 32) {
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs);
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsxval[vsx_offset];
}
break;
@@ -1378,13 +1377,13 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
break;
}
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ if (rs < 32) {
dword_offset = vsx_offset / 2;
word_offset = vsx_offset % 2;
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
*val = reg.vsx32val[word_offset];
} else {
- reg.vval = VCPU_VSX_VR(vcpu, rs);
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
*val = reg.vsx32val[vsx_offset];
}
break;