summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-08 11:31:16 -0700
commit3f17ea6dea8ba5668873afa54628a91aaa3fb1c0 (patch)
treeafbeb2accd4c2199ddd705ae943995b143a0af02 /arch/powerpc
parent1860e379875dfe7271c649058aeddffe5afd9d0d (diff)
parent1a5700bc2d10cd379a795fd2bb377a190af5acd4 (diff)
Merge branch 'next' (accumulated 3.16 merge window patches) into master
Now that 3.15 is released, this merges the 'next' branch into 'master', bringing us to the normal situation where my 'master' branch is the merge window. * accumulated work in next: (6809 commits) ufs: sb mutex merge + mutex_destroy powerpc: update comments for generic idle conversion cris: update comments for generic idle conversion idle: remove cpu_idle() forward declarations nbd: zero from and len fields in NBD_CMD_DISCONNECT. mm: convert some level-less printks to pr_* MAINTAINERS: adi-buildroot-devel is moderated MAINTAINERS: add linux-api for review of API/ABI changes mm/kmemleak-test.c: use pr_fmt for logging fs/dlm/debug_fs.c: replace seq_printf by seq_puts fs/dlm/lockspace.c: convert simple_str to kstr fs/dlm/config.c: convert simple_str to kstr mm: mark remap_file_pages() syscall as deprecated mm: memcontrol: remove unnecessary memcg argument from soft limit functions mm: memcontrol: clean up memcg zoneinfo lookup mm/memblock.c: call kmemleak directly from memblock_(alloc|free) mm/mempool.c: update the kmemleak stack trace for mempool allocations lib/radix-tree.c: update the kmemleak stack trace for radix tree allocations mm: introduce kmemleak_update_trace() mm/kmemleak.c: use %u to print ->checksum ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/configs/40x/ep405_defconfig1
-rw-r--r--arch/powerpc/configs/44x/canyonlands_defconfig1
-rw-r--r--arch/powerpc/configs/44x/currituck_defconfig1
-rw-r--r--arch/powerpc/configs/44x/sam440ep_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig1
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8313_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc8315_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc832x_rdb_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/mpc834x_itx_defconfig1
-rw-r--r--arch/powerpc/configs/83xx/sbc834x_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/socrates_defconfig1
-rw-r--r--arch/powerpc/configs/85xx/xes_mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig1
-rw-r--r--arch/powerpc/configs/amigaone_defconfig1
-rw-r--r--arch/powerpc/configs/c2k_defconfig1
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/celleb_defconfig1
-rw-r--r--arch/powerpc/configs/chrp32_defconfig1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/linkstation_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig1
-rw-r--r--arch/powerpc/configs/mpc86xx_defconfig1
-rw-r--r--arch/powerpc/configs/pmac32_defconfig1
-rw-r--r--arch/powerpc/configs/ppc6xx_defconfig2
-rw-r--r--arch/powerpc/configs/storcenter_defconfig1
-rw-r--r--arch/powerpc/include/asm/atomic.h6
-rw-r--r--arch/powerpc/include/asm/barrier.h3
-rw-r--r--arch/powerpc/include/asm/bitops.h6
-rw-r--r--arch/powerpc/include/asm/dcr-mmio.h4
-rw-r--r--arch/powerpc/include/asm/disassemble.h34
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h146
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h5
-rw-r--r--arch/powerpc/include/asm/kvm_host.h9
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h80
-rw-r--r--arch/powerpc/include/asm/pci.h5
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/prom.h39
-rw-r--r--arch/powerpc/include/asm/reg.h12
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/topology.h8
-rw-r--r--arch/powerpc/include/asm/unistd.h1
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm_para.h6
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/align.c34
-rw-r--r--arch/powerpc/kernel/asm-offsets.c11
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c7
-rw-r--r--arch/powerpc/kernel/fadump.c4
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/kvm.c2
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kernel/pci-common.c20
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c3
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c3
-rw-r--r--arch/powerpc/kernel/prom.c82
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/kernel/smp.c31
-rw-r--r--arch/powerpc/kvm/Kconfig2
-rw-r--r--arch/powerpc/kvm/book3s.c106
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c41
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c39
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c15
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c116
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S87
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c156
-rw-r--r--arch/powerpc/kvm/book3s_exports.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv.c48
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S60
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S23
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c16
-rw-r--r--arch/powerpc/kvm/book3s_pr.c238
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c16
-rw-r--r--arch/powerpc/kvm/book3s_rtas.c29
-rw-r--r--arch/powerpc/kvm/book3s_segment.S25
-rw-r--r--arch/powerpc/kvm/e500_emulate.c15
-rw-r--r--arch/powerpc/kvm/emulate.c24
-rw-r--r--arch/powerpc/kvm/mpic.c5
-rw-r--r--arch/powerpc/kvm/powerpc.c64
-rw-r--r--arch/powerpc/kvm/trace_pr.h2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/hugetlbpage.c10
-rw-r--r--arch/powerpc/mm/slb.c2
-rw-r--r--arch/powerpc/platforms/52xx/efika.c4
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c12
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/sysdev/dcr.c6
97 files changed, 1226 insertions, 606 deletions
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig
index cf06d42f2c03..e9d84b5d0ab6 100644
--- a/arch/powerpc/configs/40x/ep405_defconfig
+++ b/arch/powerpc/configs/40x/ep405_defconfig
@@ -57,7 +57,6 @@ CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_THERMAL=y
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig
index 7b8abd1b88b0..9919a91add12 100644
--- a/arch/powerpc/configs/44x/canyonlands_defconfig
+++ b/arch/powerpc/configs/44x/canyonlands_defconfig
@@ -71,7 +71,6 @@ CONFIG_I2C_IBM_IIC=y
CONFIG_SENSORS_AD7414=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/44x/currituck_defconfig b/arch/powerpc/configs/44x/currituck_defconfig
index 4192322f8a7f..47de68261443 100644
--- a/arch/powerpc/configs/44x/currituck_defconfig
+++ b/arch/powerpc/configs/44x/currituck_defconfig
@@ -71,7 +71,6 @@ CONFIG_I2C_IBM_IIC=y
# CONFIG_HWMON is not set
CONFIG_THERMAL=y
CONFIG_USB=y
-CONFIG_USB_DEBUG=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_RTC_CLASS=y
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig
index ca088cd581af..9622eb2a3e37 100644
--- a/arch/powerpc/configs/44x/sam440ep_defconfig
+++ b/arch/powerpc/configs/44x/sam440ep_defconfig
@@ -83,7 +83,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_EHCI_HCD=m
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 4f84a0b2fbf3..0dc99e141035 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -64,7 +64,6 @@ CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 2401e2554329..1d03c35540c7 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -76,7 +76,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_HCD_PPC_SOC is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 21c841e0f482..ca83ec88b114 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -75,7 +75,6 @@ CONFIG_FB_FOREIGN_ENDIAN=y
CONFIG_FB_SM501=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
index 0b73b7f9d112..4b4a2a9133a5 100644
--- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig
@@ -74,7 +74,6 @@ CONFIG_WATCHDOG=y
CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
index 97ac3b993cb6..5871395573c5 100644
--- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig
@@ -71,7 +71,6 @@ CONFIG_WATCHDOG=y
CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
index b4da1a7e6449..5adc4cea42d3 100644
--- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
+++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig
@@ -61,7 +61,6 @@ CONFIG_WATCHDOG=y
CONFIG_VIDEO_OUTPUT_CONTROL=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
index 291f8221d5a6..82b6b6c88d6a 100644
--- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
+++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig
@@ -71,7 +71,6 @@ CONFIG_SPI_BITBANG=y
CONFIG_WATCHDOG=y
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig
index a3bcda67d2d9..4ae385894c64 100644
--- a/arch/powerpc/configs/83xx/sbc834x_defconfig
+++ b/arch/powerpc/configs/83xx/sbc834x_defconfig
@@ -70,7 +70,6 @@ CONFIG_I2C_MPC=y
CONFIG_WATCHDOG=y
# CONFIG_USB_HID is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_FSL=y
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
index c9765b54dd1a..dc939de9b5b0 100644
--- a/arch/powerpc/configs/85xx/ge_imp3a_defconfig
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -158,7 +158,6 @@ CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_EHCI_FSL=y
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig
index e5147488c000..435fd408eef1 100644
--- a/arch/powerpc/configs/85xx/socrates_defconfig
+++ b/arch/powerpc/configs/85xx/socrates_defconfig
@@ -86,7 +86,6 @@ CONFIG_FONTS=y
CONFIG_FONT_8x16=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
index 07bb81df27e0..72df8ab8449e 100644
--- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
+++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig
@@ -125,7 +125,6 @@ CONFIG_SENSORS_LM90=y
CONFIG_WATCHDOG=y
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_ISP1760_HCD=y
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
index f51c7ebc181e..76f43df3dec7 100644
--- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig
@@ -123,7 +123,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/amigaone_defconfig b/arch/powerpc/configs/amigaone_defconfig
index b6d49da9c82c..8c66b13e59fc 100644
--- a/arch/powerpc/configs/amigaone_defconfig
+++ b/arch/powerpc/configs/amigaone_defconfig
@@ -108,7 +108,6 @@ CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_UHCI_HCD=y
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig
index c69f61620908..5e2aa43562b5 100644
--- a/arch/powerpc/configs/c2k_defconfig
+++ b/arch/powerpc/configs/c2k_defconfig
@@ -261,7 +261,6 @@ CONFIG_USBPCWATCHDOG=m
# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID_SUPPORT is not set
CONFIG_USB=m
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 22a403d78d34..4bee1a6d41d0 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -179,7 +179,6 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID=m
# CONFIG_USB_HID is not set
CONFIG_USB=m
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=m
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
index 895449ed971e..6d7b22f41b50 100644
--- a/arch/powerpc/configs/celleb_defconfig
+++ b/arch/powerpc/configs/celleb_defconfig
@@ -87,7 +87,6 @@ CONFIG_WATCHDOG=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
diff --git a/arch/powerpc/configs/chrp32_defconfig b/arch/powerpc/configs/chrp32_defconfig
index b20554efddcc..db5b30857e1c 100644
--- a/arch/powerpc/configs/chrp32_defconfig
+++ b/arch/powerpc/configs/chrp32_defconfig
@@ -111,7 +111,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1ea22fc24ea8..3c72fa615bd9 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -175,7 +175,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig
index 353435256f4c..b5e684640fdf 100644
--- a/arch/powerpc/configs/linkstation_defconfig
+++ b/arch/powerpc/configs/linkstation_defconfig
@@ -111,7 +111,6 @@ CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_HID=m
# CONFIG_USB_HID is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 2a5afac29861..95e545d9f25c 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -79,7 +79,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_ROOT_HUB_TT=y
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 8b682d1cf4d6..530601e8ccfe 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -113,7 +113,6 @@ CONFIG_HID_TOPSEED=y
CONFIG_HID_THRUSTMASTER=y
CONFIG_HID_ZEROPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig
index a1cc8179e9fd..35595ea74ff4 100644
--- a/arch/powerpc/configs/mpc86xx_defconfig
+++ b/arch/powerpc/configs/mpc86xx_defconfig
@@ -126,7 +126,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig
index a73626b09051..553e66278010 100644
--- a/arch/powerpc/configs/pmac32_defconfig
+++ b/arch/powerpc/configs/pmac32_defconfig
@@ -279,7 +279,6 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_HID_TOPSEED=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_DYNAMIC_MINORS=y
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 175a8b99c196..c91066944842 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -964,9 +964,7 @@ CONFIG_HID_SAMSUNG=y
CONFIG_HID_SONY=y
CONFIG_HID_SUNPLUS=y
CONFIG_USB=y
-CONFIG_USB_DEBUG=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_DEVICEFS=y
# CONFIG_USB_DEVICE_CLASS is not set
CONFIG_USB_MON=y
CONFIG_USB_EHCI_HCD=m
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig
index ba39c785445d..60ad2c08caa6 100644
--- a/arch/powerpc/configs/storcenter_defconfig
+++ b/arch/powerpc/configs/storcenter_defconfig
@@ -72,7 +72,6 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_USB=y
-CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_STORAGE=y
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index e3b1d41c89be..28992d012926 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -8,6 +8,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
@@ -270,11 +271,6 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
}
#define atomic_dec_if_positive atomic_dec_if_positive
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
-
#ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) }
diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
index f89da808ce31..bab79a110c7b 100644
--- a/arch/powerpc/include/asm/barrier.h
+++ b/arch/powerpc/include/asm/barrier.h
@@ -84,4 +84,7 @@ do { \
___p1; \
})
+#define smp_mb__before_atomic() smp_mb()
+#define smp_mb__after_atomic() smp_mb()
+
#endif /* _ASM_POWERPC_BARRIER_H */
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index a5e9a7d494d8..bd3bd573d0ae 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -51,11 +51,7 @@
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
-/*
- * clear_bit doesn't imply a memory barrier
- */
-#define smp_mb__before_clear_bit() smp_mb()
-#define smp_mb__after_clear_bit() smp_mb()
+#include <asm/barrier.h>
/* Macro for generating the ***_bits() functions */
#define DEFINE_BITOP(fn, op, prefix) \
diff --git a/arch/powerpc/include/asm/dcr-mmio.h b/arch/powerpc/include/asm/dcr-mmio.h
index acd491dbd45a..93a68b28e695 100644
--- a/arch/powerpc/include/asm/dcr-mmio.h
+++ b/arch/powerpc/include/asm/dcr-mmio.h
@@ -51,10 +51,6 @@ static inline void dcr_write_mmio(dcr_host_mmio_t host,
out_be32(host.token + ((host.base + dcr_n) * host.stride), value);
}
-extern u64 of_translate_dcr_address(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int *stride);
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_DCR_MMIO_H */
diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h
index 856f8deb557a..6330a61b875a 100644
--- a/arch/powerpc/include/asm/disassemble.h
+++ b/arch/powerpc/include/asm/disassemble.h
@@ -81,4 +81,38 @@ static inline unsigned int get_oc(u32 inst)
{
return (inst >> 11) & 0x7fff;
}
+
+#define IS_XFORM(inst) (get_op(inst) == 31)
+#define IS_DSFORM(inst) (get_op(inst) >= 56)
+
+/*
+ * Create a DSISR value from the instruction
+ */
+static inline unsigned make_dsisr(unsigned instr)
+{
+ unsigned dsisr;
+
+
+ /* bits 6:15 --> 22:31 */
+ dsisr = (instr & 0x03ff0000) >> 16;
+
+ if (IS_XFORM(instr)) {
+ /* bits 29:30 --> 15:16 */
+ dsisr |= (instr & 0x00000006) << 14;
+ /* bit 25 --> 17 */
+ dsisr |= (instr & 0x00000040) << 8;
+ /* bits 21:24 --> 18:21 */
+ dsisr |= (instr & 0x00000780) << 3;
+ } else {
+ /* bit 5 --> 17 */
+ dsisr |= (instr & 0x04000000) >> 12;
+ /* bits 1: 4 --> 18:21 */
+ dsisr |= (instr & 0x78000000) >> 17;
+ /* bits 30:31 --> 12:13 */
+ if (IS_DSFORM(instr))
+ dsisr |= (instr & 0x00000003) << 18;
+ }
+
+ return dsisr;
+}
#endif /* __ASM_PPC_DISASSEMBLE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 19eb74a95b59..9601741080e5 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -102,6 +102,7 @@
#define BOOK3S_INTERRUPT_PERFMON 0xf00
#define BOOK3S_INTERRUPT_ALTIVEC 0xf20
#define BOOK3S_INTERRUPT_VSX 0xf40
+#define BOOK3S_INTERRUPT_FAC_UNAVAIL 0xf60
#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80
#define BOOK3S_IRQPRIO_SYSTEM_RESET 0
@@ -114,14 +115,15 @@
#define BOOK3S_IRQPRIO_FP_UNAVAIL 7
#define BOOK3S_IRQPRIO_ALTIVEC 8
#define BOOK3S_IRQPRIO_VSX 9
-#define BOOK3S_IRQPRIO_SYSCALL 10
-#define BOOK3S_IRQPRIO_MACHINE_CHECK 11
-#define BOOK3S_IRQPRIO_DEBUG 12
-#define BOOK3S_IRQPRIO_EXTERNAL 13
-#define BOOK3S_IRQPRIO_DECREMENTER 14
-#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15
-#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16
-#define BOOK3S_IRQPRIO_MAX 17
+#define BOOK3S_IRQPRIO_FAC_UNAVAIL 10
+#define BOOK3S_IRQPRIO_SYSCALL 11
+#define BOOK3S_IRQPRIO_MACHINE_CHECK 12
+#define BOOK3S_IRQPRIO_DEBUG 13
+#define BOOK3S_IRQPRIO_EXTERNAL 14
+#define BOOK3S_IRQPRIO_DECREMENTER 15
+#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
+#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
+#define BOOK3S_IRQPRIO_MAX 18
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index bb1e38a23ac7..f52f65694527 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -268,9 +268,10 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
return vcpu->arch.pc;
}
+static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
{
- return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
+ return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
}
static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc)
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 51388befeddb..fddb72b48ce9 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -77,34 +77,122 @@ static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
return old == 0;
}
+static inline int __hpte_actual_psize(unsigned int lp, int psize)
+{
+ int i, shift;
+ unsigned int mask;
+
+ /* start from 1 ignoring MMU_PAGE_4K */
+ for (i = 1; i < MMU_PAGE_COUNT; i++) {
+
+ /* invalid penc */
+ if (mmu_psize_defs[psize].penc[i] == -1)
+ continue;
+ /*
+ * encoding bits per actual page size
+ * PTE LP actual page size
+ * rrrr rrrz >=8KB
+ * rrrr rrzz >=16KB
+ * rrrr rzzz >=32KB
+ * rrrr zzzz >=64KB
+ * .......
+ */
+ shift = mmu_psize_defs[i].shift - LP_SHIFT;
+ if (shift > LP_BITS)
+ shift = LP_BITS;
+ mask = (1 << shift) - 1;
+ if ((lp & mask) == mmu_psize_defs[psize].penc[i])
+ return i;
+ }
+ return -1;
+}
+
static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
unsigned long pte_index)
{
- unsigned long rb, va_low;
+ int b_psize, a_psize;
+ unsigned int penc;
+ unsigned long rb = 0, va_low, sllp;
+ unsigned int lp = (r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
+ if (!(v & HPTE_V_LARGE)) {
+ /* both base and actual psize is 4k */
+ b_psize = MMU_PAGE_4K;
+ a_psize = MMU_PAGE_4K;
+ } else {
+ for (b_psize = 0; b_psize < MMU_PAGE_COUNT; b_psize++) {
+
+ /* valid entries have a shift value */
+ if (!mmu_psize_defs[b_psize].shift)
+ continue;
+ a_psize = __hpte_actual_psize(lp, b_psize);
+ if (a_psize != -1)
+ break;
+ }
+ }
+ /*
+ * Ignore the top 14 bits of va
+ * v have top two bits covering segment size, hence move
+ * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
+ * AVA field in v also have the lower 23 bits ignored.
+ * For base page size 4K we need 14 .. 65 bits (so need to
+ * collect extra 11 bits)
+ * For others we need 14..14+i
+ */
+ /* This covers 14..54 bits of va*/
rb = (v & ~0x7fUL) << 16; /* AVA field */
+ /*
+ * AVA in v had cleared lower 23 bits. We need to derive
+ * that from pteg index
+ */
va_low = pte_index >> 3;
if (v & HPTE_V_SECONDARY)
va_low = ~va_low;
- /* xor vsid from AVA */
+ /*
+ * get the vpn bits from va_low using reverse of hashing.
+ * In v we have va with 23 bits dropped and then left shifted
+ * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
+ * right shift it with (SID_SHIFT - (23 - 7))
+ */
if (!(v & HPTE_V_1TB_SEG))
- va_low ^= v >> 12;
+ va_low ^= v >> (SID_SHIFT - 16);
else
- va_low ^= v >> 24;
+ va_low ^= v >> (SID_SHIFT_1T - 16);
va_low &= 0x7ff;
- if (v & HPTE_V_LARGE) {
- rb |= 1; /* L field */
- if (cpu_has_feature(CPU_FTR_ARCH_206) &&
- (r & 0xff000)) {
- /* non-16MB large page, must be 64k */
- /* (masks depend on page size) */
- rb |= 0x1000; /* page encoding in LP field */
- rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
- }
- } else {
- /* 4kB page */
- rb |= (va_low & 0x7ff) << 12; /* remaining 11b of VA */
+
+ switch (b_psize) {
+ case MMU_PAGE_4K:
+ sllp = ((mmu_psize_defs[a_psize].sllp & SLB_VSID_L) >> 6) |
+ ((mmu_psize_defs[a_psize].sllp & SLB_VSID_LP) >> 4);
+ rb |= sllp << 5; /* AP field */
+ rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
+ break;
+ default:
+ {
+ int aval_shift;
+ /*
+ * remaining 7bits of AVA/LP fields
+ * Also contain the rr bits of LP
+ */
+ rb |= (va_low & 0x7f) << 16;
+ /*
+ * Now clear not needed LP bits based on actual psize
+ */
+ rb &= ~((1ul << mmu_psize_defs[a_psize].shift) - 1);
+ /*
+ * AVAL field 58..77 - base_page_shift bits of va
+ * we have space for 58..64 bits, Missing bits should
+ * be zero filled. +1 is to take care of L bit shift
+ */
+ aval_shift = 64 - (77 - mmu_psize_defs[b_psize].shift) + 1;
+ rb |= ((va_low << aval_shift) & 0xfe);
+
+ rb |= 1; /* L field */
+ penc = mmu_psize_defs[b_psize].penc[a_psize];
+ rb |= penc << 12; /* LP field */
+ break;
+ }
}
rb |= (v >> 54) & 0x300; /* B field */
return rb;
@@ -112,14 +200,26 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
{
+ int size, a_psize;
+ /* Look at the 8 bit LP value */
+ unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
+
/* only handle 4k, 64k and 16M pages for now */
if (!(h & HPTE_V_LARGE))
- return 1ul << 12; /* 4k page */
- if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
- return 1ul << 16; /* 64k page */
- if ((l & 0xff000) == 0)
- return 1ul << 24; /* 16M page */
- return 0; /* error */
+ return 1ul << 12;
+ else {
+ for (size = 0; size < MMU_PAGE_COUNT; size++) {
+ /* valid entries have a shift value */
+ if (!mmu_psize_defs[size].shift)
+ continue;
+
+ a_psize = __hpte_actual_psize(lp, size);
+ if (a_psize != -1)
+ return 1ul << mmu_psize_defs[a_psize].shift;
+ }
+
+ }
+ return 0;
}
static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 821725c1bf46..5bdfb5dd3400 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -104,6 +104,7 @@ struct kvmppc_host_state {
#ifdef CONFIG_PPC_BOOK3S_64
u64 cfar;
u64 ppr;
+ u64 host_fscr;
#endif
};
@@ -133,6 +134,7 @@ struct kvmppc_book3s_shadow_vcpu {
u64 esid;
u64 vsid;
} slb[64]; /* guest SLB */
+ u64 shadow_fscr;
#endif
};
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index 80d46b5a7efb..c7aed6105ff9 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -108,9 +108,4 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dear;
}
-
-static inline ulong kvmppc_get_msr(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.shared->msr;
-}
#endif /* __ASM_KVM_BOOKE_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 1eaea2dea174..bb66d8b8efdf 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -449,7 +449,9 @@ struct kvm_vcpu_arch {
ulong pc;
ulong ctr;
ulong lr;
+#ifdef CONFIG_PPC_BOOK3S
ulong tar;
+#endif
ulong xer;
u32 cr;
@@ -475,6 +477,7 @@ struct kvm_vcpu_arch {
ulong ppr;
ulong pspb;
ulong fscr;
+ ulong shadow_fscr;
ulong ebbhr;
ulong ebbrr;
ulong bescr;
@@ -562,6 +565,7 @@ struct kvm_vcpu_arch {
#ifdef CONFIG_PPC_BOOK3S
ulong fault_dar;
u32 fault_dsisr;
+ unsigned long intr_msr;
#endif
#ifdef CONFIG_BOOKE
@@ -622,8 +626,12 @@ struct kvm_vcpu_arch {
wait_queue_head_t cpu_run;
struct kvm_vcpu_arch_shared *shared;
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ bool shared_big_endian;
+#endif
unsigned long magic_page_pa; /* phys addr to map the magic page to */
unsigned long magic_page_ea; /* effect. addr to map the magic page to */
+ bool disable_kernel_nx;
int irq_type; /* one of KVM_IRQ_* */
int irq_cpu_id;
@@ -654,7 +662,6 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
- unsigned long intr_msr;
#endif
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 4096f16502a9..4a7cc453be0b 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -449,6 +449,84 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
/*
+ * Shared struct helpers. The shared struct can be little or big endian,
+ * depending on the guest endianness. So expose helpers to all of them.
+ */
+static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
+{
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ /* Only Book3S_64 PR supports bi-endian for now */
+ return vcpu->arch.shared_big_endian;
+#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
+ /* Book3s_64 HV on little endian is always little endian */
+ return false;
+#else
+ return true;
+#endif
+}
+
+#define SHARED_WRAPPER_GET(reg, size) \
+static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ return be##size##_to_cpu(vcpu->arch.shared->reg); \
+ else \
+ return le##size##_to_cpu(vcpu->arch.shared->reg); \
+} \
+
+#define SHARED_WRAPPER_SET(reg, size) \
+static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
+{ \
+ if (kvmppc_shared_big_endian(vcpu)) \
+ vcpu->arch.shared->reg = cpu_to_be##size(val); \
+ else \
+ vcpu->arch.shared->reg = cpu_to_le##size(val); \
+} \
+
+#define SHARED_WRAPPER(reg, size) \
+ SHARED_WRAPPER_GET(reg, size) \
+ SHARED_WRAPPER_SET(reg, size) \
+
+SHARED_WRAPPER(critical, 64)
+SHARED_WRAPPER(sprg0, 64)
+SHARED_WRAPPER(sprg1, 64)
+SHARED_WRAPPER(sprg2, 64)
+SHARED_WRAPPER(sprg3, 64)
+SHARED_WRAPPER(srr0, 64)
+SHARED_WRAPPER(srr1, 64)
+SHARED_WRAPPER(dar, 64)
+SHARED_WRAPPER_GET(msr, 64)
+static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->msr = cpu_to_be64(val);
+ else
+ vcpu->arch.shared->msr = cpu_to_le64(val);
+}
+SHARED_WRAPPER(dsisr, 32)
+SHARED_WRAPPER(int_pending, 32)
+SHARED_WRAPPER(sprg4, 64)
+SHARED_WRAPPER(sprg5, 64)
+SHARED_WRAPPER(sprg6, 64)
+SHARED_WRAPPER(sprg7, 64)
+
+static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ return be32_to_cpu(vcpu->arch.shared->sr[nr]);
+ else
+ return le32_to_cpu(vcpu->arch.shared->sr[nr]);
+}
+
+static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
+{
+ if (kvmppc_shared_big_endian(vcpu))
+ vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
+ else
+ vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
+}
+
+/*
* Please call after prepare_to_enter. This function puts the lazy ee and irq
* disabled tracking state back to normal mode, without actually enabling
* interrupts.
@@ -485,7 +563,7 @@ static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
msr_64bit = MSR_SF;
#endif
- if (!(vcpu->arch.shared->msr & msr_64bit))
+ if (!(kvmppc_get_msr(vcpu) & msr_64bit))
ea = (uint32_t)ea;
return ea;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 95145a15c708..1b0739bc14b5 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -46,11 +46,6 @@ struct pci_dev;
#define pcibios_assign_all_busses() \
(pci_has_flag(PCI_REASSIGN_ALL_BUS))
-static inline void pcibios_penalize_isa_irq(int irq, int active)
-{
- /* We don't do dynamic PCI IRQ allocation */
-}
-
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 3ebb188c3ff5..d98c1ecc3266 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -44,6 +44,12 @@ static inline int pte_present(pte_t pte)
return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA);
}
+#define pte_present_nonuma pte_present_nonuma
+static inline int pte_present_nonuma(pte_t pte)
+{
+ return pte_val(pte) & (_PAGE_PRESENT);
+}
+
#define pte_numa pte_numa
static inline int pte_numa(pte_t pte)
{
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index d977b9b78696..74b79f07f041 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -26,6 +26,45 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
+#define OF_DT_BEGIN_NODE 0x1 /* Start of node, full name */
+#define OF_DT_END_NODE 0x2 /* End node */
+#define OF_DT_PROP 0x3 /* Property: name off, size,
+ * content */
+#define OF_DT_NOP 0x4 /* nop */
+#define OF_DT_END 0x9
+
+#define OF_DT_VERSION 0x10
+
+/*
+ * This is what gets passed to the kernel by prom_init or kexec
+ *
+ * The dt struct contains the device tree structure, full pathes and
+ * property contents. The dt strings contain a separate block with just
+ * the strings for the property names, and is fully page aligned and
+ * self contained in a page, so that it can be kept around by the kernel,
+ * each property name appears only once in this page (cheap compression)
+ *
+ * the mem_rsvmap contains a map of reserved ranges of physical memory,
+ * passing it here instead of in the device-tree itself greatly simplifies
+ * the job of everybody. It's just a list of u64 pairs (base/size) that
+ * ends when size is 0
+ */
+struct boot_param_header {
+ __be32 magic; /* magic word OF_DT_HEADER */
+ __be32 totalsize; /* total size of DT block */
+ __be32 off_dt_struct; /* offset to structure */
+ __be32 off_dt_strings; /* offset to strings */
+ __be32 off_mem_rsvmap; /* offset to memory reserve map */
+ __be32 version; /* format version */
+ __be32 last_comp_version; /* last compatible version */
+ /* version 2 fields below */
+ __be32 boot_cpuid_phys; /* Physical CPU id we're booting on */
+ /* version 3 fields below */
+ __be32 dt_strings_size; /* size of the DT strings block */
+ /* version 17 fields below */
+ __be32 dt_struct_size; /* size of the DT structure block */
+};
+
/*
* OF address retreival & translation
*/
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e5d2e0bc7e03..4852bcf270f3 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -670,18 +670,20 @@
#define MMCR0_PROBLEM_DISABLE MMCR0_FCP
#define MMCR0_FCM1 0x10000000UL /* freeze counters while MSR mark = 1 */
#define MMCR0_FCM0 0x08000000UL /* freeze counters while MSR mark = 0 */
-#define MMCR0_PMXE 0x04000000UL /* performance monitor exception enable */
-#define MMCR0_FCECE 0x02000000UL /* freeze ctrs on enabled cond or event */
+#define MMCR0_PMXE ASM_CONST(0x04000000) /* perf mon exception enable */
+#define MMCR0_FCECE ASM_CONST(0x02000000) /* freeze ctrs on enabled cond or event */
#define MMCR0_TBEE 0x00400000UL /* time base exception enable */
#define MMCR0_BHRBA 0x00200000UL /* BHRB Access allowed in userspace */
#define MMCR0_EBE 0x00100000UL /* Event based branch enable */
#define MMCR0_PMCC 0x000c0000UL /* PMC control */
#define MMCR0_PMCC_U6 0x00080000UL /* PMC1-6 are R/W by user (PR) */
#define MMCR0_PMC1CE 0x00008000UL /* PMC1 count enable*/
-#define MMCR0_PMCjCE 0x00004000UL /* PMCj count enable*/
+#define MMCR0_PMCjCE ASM_CONST(0x00004000) /* PMCj count enable*/
#define MMCR0_TRIGGER 0x00002000UL /* TRIGGER enable */
-#define MMCR0_PMAO_SYNC 0x00000800UL /* PMU interrupt is synchronous */
-#define MMCR0_PMAO 0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_PMAO_SYNC ASM_CONST(0x00000800) /* PMU intr is synchronous */
+#define MMCR0_C56RUN ASM_CONST(0x00000100) /* PMC5/6 count when RUN=0 */
+/* performance monitor alert has occurred, set to 0 after handling exception */
+#define MMCR0_PMAO ASM_CONST(0x00000080)
#define MMCR0_SHRFC 0x00000040UL /* SHRre freeze conditions between threads */
#define MMCR0_FC56 0x00000010UL /* freeze counters 5 and 6 */
#define MMCR0_FCTI 0x00000008UL /* freeze counters in tags inactive mode */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 163c3b05a76e..464f1089b532 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -583,6 +583,7 @@
/* Bit definitions for L1CSR0. */
#define L1CSR0_CPE 0x00010000 /* Data Cache Parity Enable */
+#define L1CSR0_CUL 0x00000400 /* Data Cache Unable to Lock */
#define L1CSR0_CLFC 0x00000100 /* Cache Lock Bits Flash Clear */
#define L1CSR0_DCFI 0x00000002 /* Data Cache Flash Invalidate */
#define L1CSR0_CFI 0x00000002 /* Cache Flash Invalidate */
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index c9202151079f..6c8a8c5a37a1 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -9,12 +9,8 @@ struct device_node;
#ifdef CONFIG_NUMA
/*
- * Before going off node we want the VM to try and reclaim from the local
- * node. It does this if the remote distance is larger than RECLAIM_DISTANCE.
- * With the default REMOTE_DISTANCE of 20 and the default RECLAIM_DISTANCE of
- * 20, we never reclaim and go off node straight away.
- *
- * To fix this we choose a smaller value of RECLAIM_DISTANCE.
+ * If zone_reclaim_mode is enabled, a RECLAIM_DISTANCE of 10 will mean that
+ * all zones on all nodes will be eligible for zone_reclaim().
*/
#define RECLAIM_DISTANCE 10
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 9b892bbd9d84..5ce5552ab9f5 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -29,7 +29,6 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
#define __ARCH_WANT_SYS_SIGNAL
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_UTIME
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index a6665be4f3ab..2bc4a9409a93 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -545,7 +545,6 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
-#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4)
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
@@ -555,6 +554,7 @@ struct kvm_get_htab_header {
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
+#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h
index e3af3286a068..91e42f09b323 100644
--- a/arch/powerpc/include/uapi/asm/kvm_para.h
+++ b/arch/powerpc/include/uapi/asm/kvm_para.h
@@ -82,10 +82,16 @@ struct kvm_vcpu_arch_shared {
#define KVM_FEATURE_MAGIC_PAGE 1
+/* Magic page flags from host to guest */
+
#define KVM_MAGIC_FEAT_SR (1 << 0)
/* MASn, ESR, PIR, and high SPRGs */
#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
+/* Magic page flags from guest to host */
+
+#define MAGIC_PAGE_FLAG_NOT_MAPPED_NX (1 << 0)
+
#endif /* _UAPI__POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fcc9a89a4695..fab19ec25597 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -2,6 +2,7 @@
# Makefile for the linux kernel.
#
+CFLAGS_prom.o = -I$(src)/../../../scripts/dtc/libfdt
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 94908af308d8..34f55524d456 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -25,14 +25,13 @@
#include <asm/cputable.h>
#include <asm/emulated_ops.h>
#include <asm/switch_to.h>
+#include <asm/disassemble.h>
struct aligninfo {
unsigned char len;
unsigned char flags;
};
-#define IS_XFORM(inst) (((inst) >> 26) == 31)
-#define IS_DSFORM(inst) (((inst) >> 26) >= 56)
#define INVALID { 0, 0 }
@@ -192,37 +191,6 @@ static struct aligninfo aligninfo[128] = {
};
/*
- * Create a DSISR value from the instruction
- */
-static inline unsigned make_dsisr(unsigned instr)
-{
- unsigned dsisr;
-
-
- /* bits 6:15 --> 22:31 */
- dsisr = (instr & 0x03ff0000) >> 16;
-
- if (IS_XFORM(instr)) {
- /* bits 29:30 --> 15:16 */
- dsisr |= (instr & 0x00000006) << 14;
- /* bit 25 --> 17 */
- dsisr |= (instr & 0x00000040) << 8;
- /* bits 21:24 --> 18:21 */
- dsisr |= (instr & 0x00000780) << 3;
- } else {
- /* bit 5 --> 17 */
- dsisr |= (instr & 0x04000000) >> 12;
- /* bits 1: 4 --> 18:21 */
- dsisr |= (instr & 0x78000000) >> 17;
- /* bits 30:31 --> 12:13 */
- if (IS_DSFORM(instr))
- dsisr |= (instr & 0x00000003) << 18;
- }
-
- return dsisr;
-}
-
-/*
* The dcbz (data cache block zero) instruction
* gives an alignment fault if used on non-cacheable
* memory. We handle the fault mainly for the
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index dba8140ebc20..93e1465c8496 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -54,6 +54,7 @@
#endif
#if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S)
#include <asm/kvm_book3s.h>
+#include <asm/kvm_ppc.h>
#endif
#ifdef CONFIG_PPC32
@@ -445,7 +446,9 @@ int main(void)
DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar));
+#endif
DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -467,6 +470,9 @@ int main(void)
DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ DEFINE(VCPU_SHAREDBE, offsetof(struct kvm_vcpu, arch.shared_big_endian));
+#endif
DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
@@ -493,7 +499,6 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
- DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
#endif
#ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -528,11 +533,13 @@ int main(void)
DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
+ DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar));
DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr));
DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr));
+ DEFINE(VCPU_SHADOW_FSCR, offsetof(struct kvm_vcpu, arch.shadow_fscr));
DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb));
DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr));
DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr));
@@ -614,6 +621,7 @@ int main(void)
#ifdef CONFIG_PPC64
SVCPU_FIELD(SVCPU_SLB, slb);
SVCPU_FIELD(SVCPU_SLB_MAX, slb_max);
+ SVCPU_FIELD(SVCPU_SHADOW_FSCR, shadow_fscr);
#endif
HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
@@ -649,6 +657,7 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S_64
HSTATE_FIELD(HSTATE_CFAR, cfar);
HSTATE_FIELD(HSTATE_PPR, ppr);
+ HSTATE_FIELD(HSTATE_HOST_FSCR, host_fscr);
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 18d7c80ddeb9..51dbace3269b 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -81,7 +81,7 @@ void crash_ipi_callback(struct pt_regs *regs)
}
atomic_inc(&cpus_in_crash);
- smp_mb__after_atomic_inc();
+ smp_mb__after_atomic();
/*
* Starting the kdump boot.
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 7898be90f2dc..60d1a2259dbe 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -36,7 +36,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
int depth, void *data)
{
const u32 *insts;
- unsigned long len;
+ int len;
int i;
insts = of_get_flat_dt_prop(node, "hcall-instructions", &len);
@@ -47,9 +47,10 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
- patch_instruction(epapr_hypercall_start + i, insts[i]);
+ u32 inst = be32_to_cpu(insts[i]);
+ patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
- patch_instruction(epapr_ev_idle_start + i, insts[i]);
+ patch_instruction(epapr_ev_idle_start + i, inst);
#endif
}
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 2230fd0ca3e4..7213d930918d 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -55,9 +55,9 @@ int crash_mem_ranges;
int __init early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data)
{
- __be32 *sections;
+ const __be32 *sections;
int i, num_sections;
- unsigned long size;
+ int size;
const int *token;
if (depth != 1 || strcmp(uname, "rtas") != 0)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index ca1cd7459c4a..248ee7e5bebd 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -304,7 +304,7 @@ void notrace restore_interrupts(void)
* being re-enabled and generally sanitized the lazy irq state,
* and in the latter case it will leave with interrupts hard
* disabled and marked as such, so the local_irq_enable() call
- * in cpu_idle() will properly re-enable everything.
+ * in arch_cpu_idle() will properly re-enable everything.
*/
bool prep_irq_for_idle(void)
{
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index dd8695f6cb6d..33aa4ddf597d 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -417,7 +417,7 @@ static void kvm_map_magic_page(void *data)
ulong out[8];
in[0] = KVM_MAGIC_PAGE;
- in[1] = KVM_MAGIC_PAGE;
+ in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ad302f845e5d..d6e195e8cd4c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -98,6 +98,9 @@ static inline void free_lppacas(void) { }
/*
* 3 persistent SLBs are registered here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
+ *
+ * If you make the number of persistent SLB entries dynamic, please also
+ * update PR KVM to flush and restore them accordingly.
*/
static struct slb_shadow *slb_shadow;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index d9476c1fc959..24d342e91790 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -201,26 +201,6 @@ struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
return NULL;
}
-static ssize_t pci_show_devspec(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct pci_dev *pdev;
- struct device_node *np;
-
- pdev = to_pci_dev (dev);
- np = pci_device_to_OF_node(pdev);
- if (np == NULL || np->full_name == NULL)
- return 0;
- return sprintf(buf, "%s", np->full_name);
-}
-static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
-
-/* Add sysfs properties */
-int pcibios_add_platform_entries(struct pci_dev *pdev)
-{
- return device_create_file(&pdev->dev, &dev_attr_devspec);
-}
-
/*
* Reads the interrupt pin to determine if interrupt is use by card.
* If the interrupt is used, then gets the interrupt line from the
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index c1e17ae68a08..5b789177aa29 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -98,8 +98,7 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
max = bus->busn_res.start;
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ if (pci_is_bridge(dev))
max = pci_scan_bridge(bus, dev,
max, pass);
}
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 83c26d829991..059e244484fe 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -362,8 +362,7 @@ static void __of_scan_bus(struct device_node *node, struct pci_bus *bus,
/* Now scan child busses */
list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
+ if (pci_is_bridge(dev)) {
of_scan_pci_bridge(dev);
}
}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 668aa4791fd7..613a860a203c 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -29,11 +29,11 @@
#include <linux/bitops.h>
#include <linux/export.h>
#include <linux/kexec.h>
-#include <linux/debugfs.h>
#include <linux/irq.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
#include <asm/prom.h>
#include <asm/rtas.h>
@@ -118,14 +118,14 @@ static void __init move_device_tree(void)
DBG("-> move_device_tree\n");
start = __pa(initial_boot_params);
- size = be32_to_cpu(initial_boot_params->totalsize);
+ size = fdt_totalsize(initial_boot_params);
if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
overlaps_crashkernel(start, size) ||
overlaps_initrd(start, size)) {
p = __va(memblock_alloc(size, PAGE_SIZE));
memcpy(p, initial_boot_params, size);
- initial_boot_params = (struct boot_param_header *)p;
+ initial_boot_params = p;
DBG("Moved device tree to 0x%p\n", p);
}
@@ -163,7 +163,7 @@ static struct ibm_pa_feature {
{CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
};
-static void __init scan_features(unsigned long node, unsigned char *ftrs,
+static void __init scan_features(unsigned long node, const unsigned char *ftrs,
unsigned long tablelen,
struct ibm_pa_feature *fp,
unsigned long ft_size)
@@ -202,8 +202,8 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs,
static void __init check_cpu_pa_features(unsigned long node)
{
- unsigned char *pa_ftrs;
- unsigned long tablelen;
+ const unsigned char *pa_ftrs;
+ int tablelen;
pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
if (pa_ftrs == NULL)
@@ -216,7 +216,7 @@ static void __init check_cpu_pa_features(unsigned long node)
#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
- __be32 *slb_size_ptr;
+ const __be32 *slb_size_ptr;
slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL);
if (slb_size_ptr != NULL) {
@@ -257,7 +257,7 @@ static struct feature_property {
static inline void identical_pvr_fixup(unsigned long node)
{
unsigned int pvr;
- char *model = of_get_flat_dt_prop(node, "model", NULL);
+ const char *model = of_get_flat_dt_prop(node, "model", NULL);
/*
* Since 440GR(x)/440EP(x) processors have the same pvr,
@@ -295,11 +295,11 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
const __be32 *prop;
const __be32 *intserv;
int i, nthreads;
- unsigned long len;
+ int len;
int found = -1;
int found_thread = 0;
@@ -325,9 +325,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
* version 2 of the kexec param format adds the phys cpuid of
* booted proc.
*/
- if (be32_to_cpu(initial_boot_params->version) >= 2) {
+ if (fdt_version(initial_boot_params) >= 2) {
if (be32_to_cpu(intserv[i]) ==
- be32_to_cpu(initial_boot_params->boot_cpuid_phys)) {
+ fdt_boot_cpuid_phys(initial_boot_params)) {
found = boot_cpu_count;
found_thread = i;
}
@@ -392,7 +392,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
int depth, void *data)
{
- unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
+ const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
/* Use common scan routine to determine if this is the chosen node */
if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
@@ -443,8 +443,9 @@ int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname,
*/
static int __init early_init_dt_scan_drconf_memory(unsigned long node)
{
- __be32 *dm, *ls, *usm;
- unsigned long l, n, flags;
+ const __be32 *dm, *ls, *usm;
+ int l;
+ unsigned long n, flags;
u64 base, size, memblock_size;
unsigned int is_kexec_kdump = 0, rngs;
@@ -564,9 +565,12 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
static void __init early_reserve_mem_dt(void)
{
- unsigned long i, len, dt_root;
+ unsigned long i, dt_root;
+ int len;
const __be32 *prop;
+ early_init_fdt_scan_reserved_mem();
+
dt_root = of_get_flat_dt_root();
prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
@@ -589,24 +593,14 @@ static void __init early_reserve_mem_dt(void)
memblock_reserve(base, size);
}
}
-
- early_init_fdt_scan_reserved_mem();
}
static void __init early_reserve_mem(void)
{
- u64 base, size;
__be64 *reserve_map;
- unsigned long self_base;
- unsigned long self_size;
reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
- be32_to_cpu(initial_boot_params->off_mem_rsvmap));
-
- /* before we do anything, lets reserve the dt blob */
- self_base = __pa((unsigned long)initial_boot_params);
- self_size = be32_to_cpu(initial_boot_params->totalsize);
- memblock_reserve(self_base, self_size);
+ fdt_off_mem_rsvmap(initial_boot_params));
/* Look for the new "reserved-regions" property in the DT */
early_reserve_mem_dt();
@@ -636,26 +630,12 @@ static void __init early_reserve_mem(void)
size_32 = be32_to_cpup(reserve_map_32++);
if (size_32 == 0)
break;
- /* skip if the reservation is for the blob */
- if (base_32 == self_base && size_32 == self_size)
- continue;
DBG("reserving: %x -> %x\n", base_32, size_32);
memblock_reserve(base_32, size_32);
}
return;
}
#endif
- DBG("Processing reserve map\n");
-
- /* Handle the reserve map in the fdt blob if it exists */
- while (1) {
- base = be64_to_cpup(reserve_map++);
- size = be64_to_cpup(reserve_map++);
- if (size == 0)
- break;
- DBG("reserving: %llx -> %llx\n", base, size);
- memblock_reserve(base, size);
- }
}
void __init early_init_devtree(void *params)
@@ -922,23 +902,3 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-
-#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
-static struct debugfs_blob_wrapper flat_dt_blob;
-
-static int __init export_flat_device_tree(void)
-{
- struct dentry *d;
-
- flat_dt_blob.data = initial_boot_params;
- flat_dt_blob.size = be32_to_cpu(initial_boot_params->totalsize);
-
- d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR,
- powerpc_debugfs_root, &flat_dt_blob);
- if (!d)
- return 1;
-
- return 0;
-}
-__initcall(export_flat_device_tree);
-#endif
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8cd5ed049b5d..8b4c857c1421 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1142,7 +1142,7 @@ void __init rtas_initialize(void)
int __init early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data)
{
- u32 *basep, *entryp, *sizep;
+ const u32 *basep, *entryp, *sizep;
if (depth != 1 || strcmp(uname, "rtas") != 0)
return 0;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e2a4232c5871..10ffffef0414 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -766,6 +766,28 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+#ifdef CONFIG_SCHED_SMT
+/* cpumask of CPUs with asymetric SMT dependancy */
+static const int powerpc_smt_flags(void)
+{
+ int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+
+ if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
+ printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
+ flags |= SD_ASYM_PACKING;
+ }
+ return flags;
+}
+#endif
+
+static struct sched_domain_topology_level powerpc_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+ { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+ { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+ { NULL, },
+};
+
void __init smp_cpus_done(unsigned int max_cpus)
{
cpumask_var_t old_mask;
@@ -790,15 +812,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
dump_numa_cpu_topology();
-}
+ set_sched_topology(powerpc_topology);
-int arch_sd_sibling_asym_packing(void)
-{
- if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
- printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
- return SD_ASYM_PACKING;
- }
- return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 141b2027189a..d6a53b95de94 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -6,7 +6,6 @@ source "virt/kvm/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
- depends on !CPU_LITTLE_ENDIAN
---help---
Say Y here to get to see options for using your Linux host to run
other operating systems inside virtual machines (guests).
@@ -76,6 +75,7 @@ config KVM_BOOK3S_64
config KVM_BOOK3S_64_HV
tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
+ depends on !CPU_LITTLE_ENDIAN
select KVM_BOOK3S_HV_POSSIBLE
select MMU_NOTIFIER
select CMA
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 7af190a266b3..c254c27f240e 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -85,9 +85,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
if (is_kvmppc_hv_enabled(vcpu->kvm))
return;
if (pending_now)
- vcpu->arch.shared->int_pending = 1;
+ kvmppc_set_int_pending(vcpu, 1);
else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
+ kvmppc_set_int_pending(vcpu, 0);
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
@@ -99,11 +99,11 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
if (is_kvmppc_hv_enabled(vcpu->kvm))
return false;
- crit_raw = vcpu->arch.shared->critical;
+ crit_raw = kvmppc_get_critical(vcpu);
crit_r1 = kvmppc_get_gpr(vcpu, 1);
/* Truncate crit indicators in 32 bit mode */
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
crit_raw &= 0xffffffff;
crit_r1 &= 0xffffffff;
}
@@ -111,15 +111,15 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
/* Critical section when crit == r1 */
crit = (crit_raw == crit_r1);
/* ... and we're in supervisor mode */
- crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
+ crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
return crit;
}
void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
{
- vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu);
- vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags;
+ kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
+ kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
vcpu->arch.mmu.reset_msr(vcpu);
}
@@ -145,6 +145,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
+ case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
default: prio = BOOK3S_IRQPRIO_MAX; break;
}
@@ -225,12 +226,12 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
switch (priority) {
case BOOK3S_IRQPRIO_DECREMENTER:
- deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
+ deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
- deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit;
+ deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
case BOOK3S_IRQPRIO_SYSTEM_RESET:
@@ -275,6 +276,9 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
vec = BOOK3S_INTERRUPT_PERFMON;
break;
+ case BOOK3S_IRQPRIO_FAC_UNAVAIL:
+ vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
+ break;
default:
deliver = 0;
printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
@@ -343,7 +347,7 @@ pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing,
{
ulong mp_pa = vcpu->arch.magic_page_pa;
- if (!(vcpu->arch.shared->msr & MSR_SF))
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF))
mp_pa = (uint32_t)mp_pa;
/* Magic page override */
@@ -367,7 +371,7 @@ EXPORT_SYMBOL_GPL(kvmppc_gfn_to_pfn);
static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
bool iswrite, struct kvmppc_pte *pte)
{
- int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR));
+ int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
int r;
if (relocated) {
@@ -498,18 +502,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
regs->ctr = kvmppc_get_ctr(vcpu);
regs->lr = kvmppc_get_lr(vcpu);
regs->xer = kvmppc_get_xer(vcpu);
- regs->msr = vcpu->arch.shared->msr;
- regs->srr0 = vcpu->arch.shared->srr0;
- regs->srr1 = vcpu->arch.shared->srr1;
+ regs->msr = kvmppc_get_msr(vcpu);
+ regs->srr0 = kvmppc_get_srr0(vcpu);
+ regs->srr1 = kvmppc_get_srr1(vcpu);
regs->pid = vcpu->arch.pid;
- regs->sprg0 = vcpu->arch.shared->sprg0;
- regs->sprg1 = vcpu->arch.shared->sprg1;
- regs->sprg2 = vcpu->arch.shared->sprg2;
- regs->sprg3 = vcpu->arch.shared->sprg3;
- regs->sprg4 = vcpu->arch.shared->sprg4;
- regs->sprg5 = vcpu->arch.shared->sprg5;
- regs->sprg6 = vcpu->arch.shared->sprg6;
- regs->sprg7 = vcpu->arch.shared->sprg7;
+ regs->sprg0 = kvmppc_get_sprg0(vcpu);
+ regs->sprg1 = kvmppc_get_sprg1(vcpu);
+ regs->sprg2 = kvmppc_get_sprg2(vcpu);
+ regs->sprg3 = kvmppc_get_sprg3(vcpu);
+ regs->sprg4 = kvmppc_get_sprg4(vcpu);
+ regs->sprg5 = kvmppc_get_sprg5(vcpu);
+ regs->sprg6 = kvmppc_get_sprg6(vcpu);
+ regs->sprg7 = kvmppc_get_sprg7(vcpu);
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -527,16 +531,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
kvmppc_set_lr(vcpu, regs->lr);
kvmppc_set_xer(vcpu, regs->xer);
kvmppc_set_msr(vcpu, regs->msr);
- vcpu->arch.shared->srr0 = regs->srr0;
- vcpu->arch.shared->srr1 = regs->srr1;
- vcpu->arch.shared->sprg0 = regs->sprg0;
- vcpu->arch.shared->sprg1 = regs->sprg1;
- vcpu->arch.shared->sprg2 = regs->sprg2;
- vcpu->arch.shared->sprg3 = regs->sprg3;
- vcpu->arch.shared->sprg4 = regs->sprg4;
- vcpu->arch.shared->sprg5 = regs->sprg5;
- vcpu->arch.shared->sprg6 = regs->sprg6;
- vcpu->arch.shared->sprg7 = regs->sprg7;
+ kvmppc_set_srr0(vcpu, regs->srr0);
+ kvmppc_set_srr1(vcpu, regs->srr1);
+ kvmppc_set_sprg0(vcpu, regs->sprg0);
+ kvmppc_set_sprg1(vcpu, regs->sprg1);
+ kvmppc_set_sprg2(vcpu, regs->sprg2);
+ kvmppc_set_sprg3(vcpu, regs->sprg3);
+ kvmppc_set_sprg4(vcpu, regs->sprg4);
+ kvmppc_set_sprg5(vcpu, regs->sprg5);
+ kvmppc_set_sprg6(vcpu, regs->sprg6);
+ kvmppc_set_sprg7(vcpu, regs->sprg7);
for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -570,10 +574,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = 0;
switch (reg->id) {
case KVM_REG_PPC_DAR:
- val = get_reg_val(reg->id, vcpu->arch.shared->dar);
+ val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
- val = get_reg_val(reg->id, vcpu->arch.shared->dsisr);
+ val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
@@ -627,6 +631,21 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
+ case KVM_REG_PPC_FSCR:
+ val = get_reg_val(reg->id, vcpu->arch.fscr);
+ break;
+ case KVM_REG_PPC_TAR:
+ val = get_reg_val(reg->id, vcpu->arch.tar);
+ break;
+ case KVM_REG_PPC_EBBHR:
+ val = get_reg_val(reg->id, vcpu->arch.ebbhr);
+ break;
+ case KVM_REG_PPC_EBBRR:
+ val = get_reg_val(reg->id, vcpu->arch.ebbrr);
+ break;
+ case KVM_REG_PPC_BESCR:
+ val = get_reg_val(reg->id, vcpu->arch.bescr);
+ break;
default:
r = -EINVAL;
break;
@@ -660,10 +679,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
r = 0;
switch (reg->id) {
case KVM_REG_PPC_DAR:
- vcpu->arch.shared->dar = set_reg_val(reg->id, val);
+ kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_DSISR:
- vcpu->arch.shared->dsisr = set_reg_val(reg->id, val);
+ kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
i = reg->id - KVM_REG_PPC_FPR0;
@@ -716,6 +735,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
set_reg_val(reg->id, val));
break;
#endif /* CONFIG_KVM_XICS */
+ case KVM_REG_PPC_FSCR:
+ vcpu->arch.fscr = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_TAR:
+ vcpu->arch.tar = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_EBBHR:
+ vcpu->arch.ebbhr = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_EBBRR:
+ vcpu->arch.ebbrr = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_BESCR:
+ vcpu->arch.bescr = set_reg_val(reg->id, val);
+ break;
default:
r = -EINVAL;
break;
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 76a64ce6a5b6..93503bbdae43 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -91,7 +91,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr)
{
- return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf];
+ return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf);
}
static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
@@ -131,7 +131,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;
dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
- kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg,
+ kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg,
sr_vsid(sre));
r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
@@ -160,7 +160,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
else
bat = &vcpu_book3s->ibat[i];
- if (vcpu->arch.shared->msr & MSR_PR) {
+ if (kvmppc_get_msr(vcpu) & MSR_PR) {
if (!bat->vp)
continue;
} else {
@@ -208,6 +208,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
u32 sre;
hva_t ptegp;
u32 pteg[16];
+ u32 pte0, pte1;
u32 ptem = 0;
int i;
int found = 0;
@@ -233,14 +234,16 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
}
for (i=0; i<16; i+=2) {
- if (ptem == pteg[i]) {
+ pte0 = be32_to_cpu(pteg[i]);
+ pte1 = be32_to_cpu(pteg[i + 1]);
+ if (ptem == pte0) {
u8 pp;
- pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF);
- pp = pteg[i+1] & 3;
+ pte->raddr = (pte1 & ~(0xFFFULL)) | (eaddr & 0xFFF);
+ pp = pte1 & 3;
- if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) ||
- (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR)))
+ if ((sr_kp(sre) && (kvmppc_get_msr(vcpu) & MSR_PR)) ||
+ (sr_ks(sre) && !(kvmppc_get_msr(vcpu) & MSR_PR)))
pp |= 4;
pte->may_write = false;
@@ -260,7 +263,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
}
dprintk_pte("MMU: Found PTE -> %x %x - %x\n",
- pteg[i], pteg[i+1], pp);
+ pte0, pte1, pp);
found = 1;
break;
}
@@ -269,8 +272,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Update PTE C and A bits, so the guest's swapper knows we used the
page */
if (found) {
- u32 pte_r = pteg[i+1];
- char __user *addr = (char __user *) &pteg[i+1];
+ u32 pte_r = pte1;
+ char __user *addr = (char __user *) (ptegp + (i+1) * sizeof(u32));
/*
* Use single-byte writes to update the HPTE, to
@@ -296,7 +299,8 @@ no_page_found:
to_book3s(vcpu)->sdr1, ptegp);
for (i=0; i<16; i+=2) {
dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n",
- i, pteg[i], pteg[i+1], ptem);
+ i, be32_to_cpu(pteg[i]),
+ be32_to_cpu(pteg[i+1]), ptem);
}
}
@@ -316,7 +320,7 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
- !(vcpu->arch.shared->msr & MSR_PR)) {
+ !(kvmppc_get_msr(vcpu) & MSR_PR)) {
pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data);
pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff);
pte->raddr &= KVM_PAM;
@@ -341,13 +345,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum)
{
- return vcpu->arch.shared->sr[srnum];
+ return kvmppc_get_sr(vcpu, srnum);
}
static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
ulong value)
{
- vcpu->arch.shared->sr[srnum] = value;
+ kvmppc_set_sr(vcpu, srnum, value);
kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT);
}
@@ -367,8 +371,9 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
ulong ea = esid << SID_SHIFT;
u32 sr;
u64 gvsid = esid;
+ u64 msr = kvmppc_get_msr(vcpu);
- if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ if (msr & (MSR_DR|MSR_IR)) {
sr = find_sr(vcpu, ea);
if (sr_valid(sr))
gvsid = sr_vsid(sr);
@@ -377,7 +382,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
/* In case we only have one of MSR_IR or MSR_DR set, let's put
that in the real-mode context (and hope RM doesn't access
high memory) */
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ switch (msr & (MSR_DR|MSR_IR)) {
case 0:
*vsid = VSID_REAL | esid;
break;
@@ -397,7 +402,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
BUG();
}
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (msr & MSR_PR)
*vsid |= VSID_PR;
return 0;
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 5fac89dfe4cd..678e75370495 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -92,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
struct kvmppc_sid_map *map;
u16 sid_map_mask;
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -279,7 +279,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
u16 sid_map_mask;
static int backwards_map = 0;
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
/* We might get collisions that trap in preceding order, so let's
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 83da1f868fd5..774a253ca4e1 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -38,7 +38,7 @@
static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
{
- kvmppc_set_msr(vcpu, MSR_SF);
+ kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
}
static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
@@ -226,7 +226,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Magic page override */
if (unlikely(mp_ea) &&
unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
- !(vcpu->arch.shared->msr & MSR_PR)) {
+ !(kvmppc_get_msr(vcpu) & MSR_PR)) {
gpte->eaddr = eaddr;
gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
@@ -269,18 +269,21 @@ do_second:
goto no_page_found;
}
- if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
+ if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp)
key = 4;
- else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
+ else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks)
key = 4;
for (i=0; i<16; i+=2) {
+ u64 pte0 = be64_to_cpu(pteg[i]);
+ u64 pte1 = be64_to_cpu(pteg[i + 1]);
+
/* Check all relevant fields of 1st dword */
- if ((pteg[i] & v_mask) == v_val) {
+ if ((pte0 & v_mask) == v_val) {
/* If large page bit is set, check pgsize encoding */
if (slbe->large &&
(vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
- pgsize = decode_pagesize(slbe, pteg[i+1]);
+ pgsize = decode_pagesize(slbe, pte1);
if (pgsize < 0)
continue;
}
@@ -297,8 +300,8 @@ do_second:
goto do_second;
}
- v = pteg[i];
- r = pteg[i+1];
+ v = be64_to_cpu(pteg[i]);
+ r = be64_to_cpu(pteg[i+1]);
pp = (r & HPTE_R_PP) | key;
if (r & HPTE_R_PP0)
pp |= 8;
@@ -310,6 +313,9 @@ do_second:
gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
gpte->page_size = pgsize;
gpte->may_execute = ((r & HPTE_R_N) ? false : true);
+ if (unlikely(vcpu->arch.disable_kernel_nx) &&
+ !(kvmppc_get_msr(vcpu) & MSR_PR))
+ gpte->may_execute = true;
gpte->may_read = false;
gpte->may_write = false;
@@ -342,14 +348,14 @@ do_second:
* non-PAPR platforms such as mac99, and this is
* what real hardware does.
*/
- char __user *addr = (char __user *) &pteg[i+1];
+ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
r |= HPTE_R_R;
put_user(r >> 8, addr + 6);
}
if (iswrite && gpte->may_write && !(r & HPTE_R_C)) {
/* Set the dirty flag */
/* Use a single byte write */
- char __user *addr = (char __user *) &pteg[i+1];
+ char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64));
r |= HPTE_R_C;
put_user(r, addr + 7);
}
@@ -479,7 +485,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
vcpu->arch.slb[i].origv = 0;
}
- if (vcpu->arch.shared->msr & MSR_IR) {
+ if (kvmppc_get_msr(vcpu) & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
}
@@ -563,7 +569,7 @@ static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid)
{
ulong mp_ea = vcpu->arch.magic_page_ea;
- return mp_ea && !(vcpu->arch.shared->msr & MSR_PR) &&
+ return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) &&
(mp_ea >> SID_SHIFT) == esid;
}
#endif
@@ -576,8 +582,9 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
u64 gvsid = esid;
ulong mp_ea = vcpu->arch.magic_page_ea;
int pagesize = MMU_PAGE_64K;
+ u64 msr = kvmppc_get_msr(vcpu);
- if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ if (msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
if (slb) {
gvsid = slb->vsid;
@@ -590,7 +597,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
}
}
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ switch (msr & (MSR_DR|MSR_IR)) {
case 0:
gvsid = VSID_REAL | esid;
break;
@@ -623,7 +630,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
gvsid |= VSID_64K;
#endif
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
*vsid = gvsid;
@@ -633,7 +640,7 @@ no_slb:
/* Catch magic page case */
if (unlikely(mp_ea) &&
unlikely(esid == (mp_ea >> SID_SHIFT)) &&
- !(vcpu->arch.shared->msr & MSR_PR)) {
+ !(kvmppc_get_msr(vcpu) & MSR_PR)) {
*vsid = VSID_REAL | esid;
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 0d513af62bba..0ac98392f363 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -58,7 +58,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
struct kvmppc_sid_map *map;
u16 sid_map_mask;
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
@@ -230,7 +230,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
u16 sid_map_mask;
static int backwards_map = 0;
- if (vcpu->arch.shared->msr & MSR_PR)
+ if (kvmppc_get_msr(vcpu) & MSR_PR)
gvsid |= VSID_PR;
/* We might get collisions that trap in preceding order, so let's
@@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
int found_inval = -1;
int r;
- if (!svcpu->slb_max)
- svcpu->slb_max = 1;
-
/* Are we overwriting? */
- for (i = 1; i < svcpu->slb_max; i++) {
+ for (i = 0; i < svcpu->slb_max; i++) {
if (!(svcpu->slb[i].esid & SLB_ESID_V))
found_inval = i;
else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
@@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
}
/* Found a spare entry that was invalidated before */
- if (found_inval > 0) {
+ if (found_inval >= 0) {
r = found_inval;
goto out;
}
@@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
ulong seg_mask = -seg_size;
int i;
- for (i = 1; i < svcpu->slb_max; i++) {
+ for (i = 0; i < svcpu->slb_max; i++) {
if ((svcpu->slb[i].esid & SLB_ESID_V) &&
(svcpu->slb[i].esid & seg_mask) == ea) {
/* Invalidate this entry */
@@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
- svcpu->slb_max = 1;
+ svcpu->slb_max = 0;
svcpu->slb[0].esid = 0;
svcpu_put(svcpu);
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index fb25ebc0af0c..80561074078d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -52,7 +52,7 @@ static void kvmppc_rmap_reset(struct kvm *kvm);
long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
- unsigned long hpt;
+ unsigned long hpt = 0;
struct revmap_entry *rev;
struct page *page = NULL;
long order = KVM_DEFAULT_HPT_ORDER;
@@ -64,22 +64,11 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
kvm->arch.hpt_cma_alloc = 0;
- /*
- * try first to allocate it from the kernel page allocator.
- * We keep the CMA reserved for failed allocation.
- */
- hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
- __GFP_NOWARN, order - PAGE_SHIFT);
-
- /* Next try to allocate from the preallocated pool */
- if (!hpt) {
- VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
- page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
- if (page) {
- hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
- kvm->arch.hpt_cma_alloc = 1;
- } else
- --order;
+ VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
+ page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ if (page) {
+ hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+ kvm->arch.hpt_cma_alloc = 1;
}
/* Lastly try successively smaller sizes from the page allocator */
@@ -596,6 +585,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
unsigned long *hptep, hpte[3], r;
unsigned long mmu_seq, psize, pte_size;
+ unsigned long gpa_base, gfn_base;
unsigned long gpa, gfn, hva, pfn;
struct kvm_memory_slot *memslot;
unsigned long *rmap;
@@ -634,7 +624,9 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
/* Translate the logical address and get the page */
psize = hpte_page_size(hpte[0], r);
- gpa = (r & HPTE_R_RPN & ~(psize - 1)) | (ea & (psize - 1));
+ gpa_base = r & HPTE_R_RPN & ~(psize - 1);
+ gfn_base = gpa_base >> PAGE_SHIFT;
+ gpa = gpa_base | (ea & (psize - 1));
gfn = gpa >> PAGE_SHIFT;
memslot = gfn_to_memslot(kvm, gfn);
@@ -646,6 +638,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!kvm->arch.using_mmu_notifiers)
return -EFAULT; /* should never get here */
+ /*
+ * This should never happen, because of the slot_is_aligned()
+ * check in kvmppc_do_h_enter().
+ */
+ if (gfn_base < memslot->base_gfn)
+ return -EFAULT;
+
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
smp_rmb();
@@ -738,7 +737,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
goto out_unlock;
hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
- rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ /* Always put the HPTE in the rmap chain for the page base address */
+ rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
lock_rmap(rmap);
/* Check if we might have been invalidated; let the guest retry if so */
@@ -1060,22 +1060,33 @@ void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
}
-static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
+static int vcpus_running(struct kvm *kvm)
+{
+ return atomic_read(&kvm->arch.vcpus_running) != 0;
+}
+
+/*
+ * Returns the number of system pages that are dirty.
+ * This can be more than 1 if we find a huge-page HPTE.
+ */
+static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
{
struct revmap_entry *rev = kvm->arch.revmap;
unsigned long head, i, j;
+ unsigned long n;
+ unsigned long v, r;
unsigned long *hptep;
- int ret = 0;
+ int npages_dirty = 0;
retry:
lock_rmap(rmapp);
if (*rmapp & KVMPPC_RMAP_CHANGED) {
*rmapp &= ~KVMPPC_RMAP_CHANGED;
- ret = 1;
+ npages_dirty = 1;
}
if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
unlock_rmap(rmapp);
- return ret;
+ return npages_dirty;
}
i = head = *rmapp & KVMPPC_RMAP_INDEX;
@@ -1083,7 +1094,22 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
j = rev[i].forw;
- if (!(hptep[1] & HPTE_R_C))
+ /*
+ * Checking the C (changed) bit here is racy since there
+ * is no guarantee about when the hardware writes it back.
+ * If the HPTE is not writable then it is stable since the
+ * page can't be written to, and we would have done a tlbie
+ * (which forces the hardware to complete any writeback)
+ * when making the HPTE read-only.
+ * If vcpus are running then this call is racy anyway
+ * since the page could get dirtied subsequently, so we
+ * expect there to be a further call which would pick up
+ * any delayed C bit writeback.
+ * Otherwise we need to do the tlbie even if C==0 in
+ * order to pick up any delayed writeback of C.
+ */
+ if (!(hptep[1] & HPTE_R_C) &&
+ (!hpte_is_writable(hptep[1]) || vcpus_running(kvm)))
continue;
if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
@@ -1095,24 +1121,33 @@ static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
}
/* Now check and modify the HPTE */
- if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
- /* need to make it temporarily absent to clear C */
- hptep[0] |= HPTE_V_ABSENT;
- kvmppc_invalidate_hpte(kvm, hptep, i);
- hptep[1] &= ~HPTE_R_C;
- eieio();
- hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
+ if (!(hptep[0] & HPTE_V_VALID))
+ continue;
+
+ /* need to make it temporarily absent so C is stable */
+ hptep[0] |= HPTE_V_ABSENT;
+ kvmppc_invalidate_hpte(kvm, hptep, i);
+ v = hptep[0];
+ r = hptep[1];
+ if (r & HPTE_R_C) {
+ hptep[1] = r & ~HPTE_R_C;
if (!(rev[i].guest_rpte & HPTE_R_C)) {
rev[i].guest_rpte |= HPTE_R_C;
note_hpte_modification(kvm, &rev[i]);
}
- ret = 1;
+ n = hpte_page_size(v, r);
+ n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (n > npages_dirty)
+ npages_dirty = n;
+ eieio();
}
- hptep[0] &= ~HPTE_V_HVLOCK;
+ v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
+ v |= HPTE_V_VALID;
+ hptep[0] = v;
} while ((i = j) != head);
unlock_rmap(rmapp);
- return ret;
+ return npages_dirty;
}
static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
@@ -1136,15 +1171,22 @@ static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long *map)
{
- unsigned long i;
+ unsigned long i, j;
unsigned long *rmapp;
struct kvm_vcpu *vcpu;
preempt_disable();
rmapp = memslot->arch.rmap;
for (i = 0; i < memslot->npages; ++i) {
- if (kvm_test_clear_dirty(kvm, rmapp) && map)
- __set_bit_le(i, map);
+ int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
+ /*
+ * Note that if npages > 0 then i must be a multiple of npages,
+ * since we always put huge-page HPTEs in the rmap chain
+ * corresponding to their page base address.
+ */
+ if (npages && map)
+ for (j = i; npages; ++j, --npages)
+ __set_bit_le(j, map);
++rmapp;
}
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 4f12e8f0c718..3589c4e3d49b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,30 +17,9 @@
* Authors: Alexander Graf <agraf@suse.de>
*/
-#ifdef __LITTLE_ENDIAN__
-#error Need to fix SLB shadow accesses in little endian mode
-#endif
-
-#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10))
-#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8)
-#define UNBOLT_SLB_ENTRY(num) \
- ld r9, SHADOW_SLB_ESID(num)(r12); \
- /* Invalid? Skip. */; \
- rldicl. r0, r9, 37, 63; \
- beq slb_entry_skip_ ## num; \
- xoris r9, r9, SLB_ESID_V@h; \
- std r9, SHADOW_SLB_ESID(num)(r12); \
- slb_entry_skip_ ## num:
-
-#define REBOLT_SLB_ENTRY(num) \
- ld r10, SHADOW_SLB_ESID(num)(r11); \
- cmpdi r10, 0; \
- beq slb_exit_skip_ ## num; \
- oris r10, r10, SLB_ESID_V@h; \
- ld r9, SHADOW_SLB_VSID(num)(r11); \
- slbmte r9, r10; \
- std r10, SHADOW_SLB_ESID(num)(r11); \
-slb_exit_skip_ ## num:
+#define SHADOW_SLB_ENTRY_LEN 0x10
+#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
+#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
/******************************************************************************
* *
@@ -64,20 +43,15 @@ slb_exit_skip_ ## num:
* SVCPU[LR] = guest LR
*/
- /* Remove LPAR shadow entries */
+BEGIN_FW_FTR_SECTION
-#if SLB_NUM_BOLTED == 3
+ /* Declare SLB shadow as 0 entries big */
- ld r12, PACA_SLBSHADOWPTR(r13)
+ ld r11, PACA_SLBSHADOWPTR(r13)
+ li r8, 0
+ stb r8, 3(r11)
- /* Remove bolted entries */
- UNBOLT_SLB_ENTRY(0)
- UNBOLT_SLB_ENTRY(1)
- UNBOLT_SLB_ENTRY(2)
-
-#else
-#error unknown number of bolted entries
-#endif
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
/* Flush SLB */
@@ -100,7 +74,7 @@ slb_loop_enter:
ld r10, 0(r11)
- rldicl. r0, r10, 37, 63
+ andis. r9, r10, SLB_ESID_V@h
beq slb_loop_enter_skip
ld r9, 8(r11)
@@ -137,23 +111,42 @@ slb_do_enter:
*
*/
- /* Restore bolted entries from the shadow and fix it along the way */
+ /* Remove all SLB entries that are in use. */
- /* We don't store anything in entry 0, so we don't need to take care of it */
+ li r0, r0
+ slbmte r0, r0
slbia
- isync
-#if SLB_NUM_BOLTED == 3
+ /* Restore bolted entries from the shadow */
ld r11, PACA_SLBSHADOWPTR(r13)
- REBOLT_SLB_ENTRY(0)
- REBOLT_SLB_ENTRY(1)
- REBOLT_SLB_ENTRY(2)
-
-#else
-#error unknown number of bolted entries
-#endif
+BEGIN_FW_FTR_SECTION
+
+ /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
+
+ li r8, SLB_NUM_BOLTED
+ stb r8, 3(r11)
+
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
+
+ /* Manually load all entries from shadow SLB */
+
+ li r8, SLBSHADOW_SAVEAREA
+ li r7, SLBSHADOW_SAVEAREA + 8
+
+ .rept SLB_NUM_BOLTED
+ LDX_BE r10, r11, r8
+ cmpdi r10, 0
+ beq 1f
+ LDX_BE r9, r11, r7
+ slbmte r9, r10
+1: addi r7, r7, SHADOW_SLB_ENTRY_LEN
+ addi r8, r8, SHADOW_SLB_ENTRY_LEN
+ .endr
+
+ isync
+ sync
slb_do_exit:
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 99d40f8977e8..3f295269af37 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -80,7 +80,7 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
return false;
/* Limit user space to its own small SPR set */
- if ((vcpu->arch.shared->msr & MSR_PR) && level > PRIV_PROBLEM)
+ if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
return false;
return true;
@@ -94,14 +94,31 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
int rs = get_rs(inst);
int ra = get_ra(inst);
int rb = get_rb(inst);
+ u32 inst_sc = 0x44000002;
switch (get_op(inst)) {
+ case 0:
+ emulated = EMULATE_FAIL;
+ if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
+ (inst == swab32(inst_sc))) {
+ /*
+ * This is the byte reversed syscall instruction of our
+ * hypercall handler. Early versions of LE Linux didn't
+ * swap the instructions correctly and ended up in
+ * illegal instructions.
+ * Just always fail hypercalls on these broken systems.
+ */
+ kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ emulated = EMULATE_DONE;
+ }
+ break;
case 19:
switch (get_xop(inst)) {
case OP_19_XOP_RFID:
case OP_19_XOP_RFI:
- kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0);
- kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
+ kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
+ kvmppc_set_msr(vcpu, kvmppc_get_srr1(vcpu));
*advance = 0;
break;
@@ -113,16 +130,16 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
- kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
+ kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
break;
case OP_31_XOP_MTMSRD:
{
ulong rs_val = kvmppc_get_gpr(vcpu, rs);
if (inst & 0x10000) {
- ulong new_msr = vcpu->arch.shared->msr;
+ ulong new_msr = kvmppc_get_msr(vcpu);
new_msr &= ~(MSR_RI | MSR_EE);
new_msr |= rs_val & (MSR_RI | MSR_EE);
- vcpu->arch.shared->msr = new_msr;
+ kvmppc_set_msr_fast(vcpu, new_msr);
} else
kvmppc_set_msr(vcpu, rs_val);
break;
@@ -179,7 +196,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
- if ((vcpu->arch.shared->msr & MSR_PR) ||
+ if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
!vcpu->arch.papr_enabled) {
emulated = EMULATE_FAIL;
break;
@@ -261,14 +278,14 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
ra_val = kvmppc_get_gpr(vcpu, ra);
addr = (ra_val + rb_val) & ~31ULL;
- if (!(vcpu->arch.shared->msr & MSR_SF))
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF))
addr &= 0xffffffff;
vaddr = addr;
r = kvmppc_st(vcpu, &addr, 32, zeros, true);
if ((r == -ENOENT) || (r == -EPERM)) {
*advance = 0;
- vcpu->arch.shared->dar = vaddr;
+ kvmppc_set_dar(vcpu, vaddr);
vcpu->arch.fault_dar = vaddr;
dsisr = DSISR_ISSTORE;
@@ -277,7 +294,7 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
else if (r == -EPERM)
dsisr |= DSISR_PROTFAULT;
- vcpu->arch.shared->dsisr = dsisr;
+ kvmppc_set_dsisr(vcpu, dsisr);
vcpu->arch.fault_dsisr = dsisr;
kvmppc_book3s_queue_irqprio(vcpu,
@@ -356,10 +373,10 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
to_book3s(vcpu)->sdr1 = spr_val;
break;
case SPRN_DSISR:
- vcpu->arch.shared->dsisr = spr_val;
+ kvmppc_set_dsisr(vcpu, spr_val);
break;
case SPRN_DAR:
- vcpu->arch.shared->dar = spr_val;
+ kvmppc_set_dar(vcpu, spr_val);
break;
case SPRN_HIOR:
to_book3s(vcpu)->hior = spr_val;
@@ -438,6 +455,31 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_GQR7:
to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
break;
+ case SPRN_FSCR:
+ vcpu->arch.fscr = spr_val;
+ break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case SPRN_BESCR:
+ vcpu->arch.bescr = spr_val;
+ break;
+ case SPRN_EBBHR:
+ vcpu->arch.ebbhr = spr_val;
+ break;
+ case SPRN_EBBRR:
+ vcpu->arch.ebbrr = spr_val;
+ break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case SPRN_TFHAR:
+ vcpu->arch.tfhar = spr_val;
+ break;
+ case SPRN_TEXASR:
+ vcpu->arch.texasr = spr_val;
+ break;
+ case SPRN_TFIAR:
+ vcpu->arch.tfiar = spr_val;
+ break;
+#endif
+#endif
case SPRN_ICTC:
case SPRN_THRM1:
case SPRN_THRM2:
@@ -455,6 +497,13 @@ int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
case SPRN_DABR:
+#ifdef CONFIG_PPC_BOOK3S_64
+ case SPRN_MMCRS:
+ case SPRN_MMCRA:
+ case SPRN_MMCR0:
+ case SPRN_MMCR1:
+ case SPRN_MMCR2:
+#endif
break;
unprivileged:
default:
@@ -493,10 +542,10 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
*spr_val = to_book3s(vcpu)->sdr1;
break;
case SPRN_DSISR:
- *spr_val = vcpu->arch.shared->dsisr;
+ *spr_val = kvmppc_get_dsisr(vcpu);
break;
case SPRN_DAR:
- *spr_val = vcpu->arch.shared->dar;
+ *spr_val = kvmppc_get_dar(vcpu);
break;
case SPRN_HIOR:
*spr_val = to_book3s(vcpu)->hior;
@@ -538,6 +587,31 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
case SPRN_GQR7:
*spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
break;
+ case SPRN_FSCR:
+ *spr_val = vcpu->arch.fscr;
+ break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case SPRN_BESCR:
+ *spr_val = vcpu->arch.bescr;
+ break;
+ case SPRN_EBBHR:
+ *spr_val = vcpu->arch.ebbhr;
+ break;
+ case SPRN_EBBRR:
+ *spr_val = vcpu->arch.ebbrr;
+ break;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case SPRN_TFHAR:
+ *spr_val = vcpu->arch.tfhar;
+ break;
+ case SPRN_TEXASR:
+ *spr_val = vcpu->arch.texasr;
+ break;
+ case SPRN_TFIAR:
+ *spr_val = vcpu->arch.tfiar;
+ break;
+#endif
+#endif
case SPRN_THRM1:
case SPRN_THRM2:
case SPRN_THRM3:
@@ -553,6 +627,14 @@ int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
case SPRN_DABR:
+#ifdef CONFIG_PPC_BOOK3S_64
+ case SPRN_MMCRS:
+ case SPRN_MMCRA:
+ case SPRN_MMCR0:
+ case SPRN_MMCR1:
+ case SPRN_MMCR2:
+ case SPRN_TIR:
+#endif
*spr_val = 0;
break;
default:
@@ -569,48 +651,17 @@ unprivileged:
u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
{
- u32 dsisr = 0;
-
- /*
- * This is what the spec says about DSISR bits (not mentioned = 0):
- *
- * 12:13 [DS] Set to bits 30:31
- * 15:16 [X] Set to bits 29:30
- * 17 [X] Set to bit 25
- * [D/DS] Set to bit 5
- * 18:21 [X] Set to bits 21:24
- * [D/DS] Set to bits 1:4
- * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS)
- * 27:31 Set to bits 11:15 (RA)
- */
-
- switch (get_op(inst)) {
- /* D-form */
- case OP_LFS:
- case OP_LFD:
- case OP_STFD:
- case OP_STFS:
- dsisr |= (inst >> 12) & 0x4000; /* bit 17 */
- dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
- break;
- /* X-form */
- case 31:
- dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
- dsisr |= (inst << 8) & 0x04000; /* bit 17 */
- dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */
- break;
- default:
- printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
- break;
- }
-
- dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
-
- return dsisr;
+ return make_dsisr(inst);
}
ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
{
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Linux's fix_alignment() assumes that DAR is valid, so can we
+ */
+ return vcpu->arch.fault_dar;
+#else
ulong dar = 0;
ulong ra = get_ra(inst);
ulong rb = get_rb(inst);
@@ -635,4 +686,5 @@ ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
}
return dar;
+#endif
}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
index 20d4ea8e656d..0d013fbc2e13 100644
--- a/arch/powerpc/kvm/book3s_exports.c
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -18,6 +18,7 @@
*/
#include <linux/export.h>
+#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8227dba5af0f..aba05bbb3e74 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -879,24 +879,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR:
*val = get_reg_val(id, vcpu->arch.iamr);
break;
- case KVM_REG_PPC_FSCR:
- *val = get_reg_val(id, vcpu->arch.fscr);
- break;
case KVM_REG_PPC_PSPB:
*val = get_reg_val(id, vcpu->arch.pspb);
break;
- case KVM_REG_PPC_EBBHR:
- *val = get_reg_val(id, vcpu->arch.ebbhr);
- break;
- case KVM_REG_PPC_EBBRR:
- *val = get_reg_val(id, vcpu->arch.ebbrr);
- break;
- case KVM_REG_PPC_BESCR:
- *val = get_reg_val(id, vcpu->arch.bescr);
- break;
- case KVM_REG_PPC_TAR:
- *val = get_reg_val(id, vcpu->arch.tar);
- break;
case KVM_REG_PPC_DPDES:
*val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
@@ -1091,24 +1076,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_IAMR:
vcpu->arch.iamr = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_FSCR:
- vcpu->arch.fscr = set_reg_val(id, *val);
- break;
case KVM_REG_PPC_PSPB:
vcpu->arch.pspb = set_reg_val(id, *val);
break;
- case KVM_REG_PPC_EBBHR:
- vcpu->arch.ebbhr = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_EBBRR:
- vcpu->arch.ebbrr = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_BESCR:
- vcpu->arch.bescr = set_reg_val(id, *val);
- break;
- case KVM_REG_PPC_TAR:
- vcpu->arch.tar = set_reg_val(id, *val);
- break;
case KVM_REG_PPC_DPDES:
vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
@@ -1280,6 +1250,17 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
goto free_vcpu;
vcpu->arch.shared = &vcpu->arch.shregs;
+#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
+ /*
+ * The shared struct is never shared on HV,
+ * so we can always use host endianness
+ */
+#ifdef __BIG_ENDIAN__
+ vcpu->arch.shared_big_endian = true;
+#else
+ vcpu->arch.shared_big_endian = false;
+#endif
+#endif
vcpu->arch.mmcr[0] = MMCR0_FC;
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
@@ -1949,6 +1930,13 @@ static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
* support pte_enc here
*/
(*sps)->enc[0].pte_enc = def->penc[linux_psize];
+ /*
+ * Add 16MB MPSS support if host supports it
+ */
+ if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
+ (*sps)->enc[1].page_shift = 24;
+ (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
+ }
(*sps)++;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 8fcc36306a02..6e6224318c36 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -42,13 +42,14 @@ static int global_invalidates(struct kvm *kvm, unsigned long flags)
/*
* If there is only one vcore, and it's currently running,
+ * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
* we can use tlbiel as long as we mark all other physical
* cores as potentially having stale TLB entries for this lpid.
* If we're not using MMU notifiers, we never take pages away
* from the guest, so we can use tlbiel if requested.
* Otherwise, don't use tlbiel.
*/
- if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcore)
+ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
global = 0;
else if (kvm->arch.using_mmu_notifiers)
global = 1;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 07c8b5b0f9d2..974793435a2e 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -86,6 +86,12 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
lbz r4, LPPACA_PMCINUSE(r3)
cmpwi r4, 0
beq 23f /* skip if not */
+BEGIN_FTR_SECTION
+ ld r3, HSTATE_MMCR(r13)
+ andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r4, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r3, HSTATE_PMC(r13)
lwz r4, HSTATE_PMC + 4(r13)
lwz r5, HSTATE_PMC + 8(r13)
@@ -737,6 +743,12 @@ skip_tm:
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
isync
+BEGIN_FTR_SECTION
+ ld r3, VCPU_MMCR(r4)
+ andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r5, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
lwz r6, VCPU_PMC + 8(r4)
@@ -1439,6 +1451,30 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
25:
/* Save PMU registers if requested */
/* r8 and cr0.eq are live here */
+BEGIN_FTR_SECTION
+ /*
+ * POWER8 seems to have a hardware bug where setting
+ * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
+ * when some counters are already negative doesn't seem
+ * to cause a performance monitor alert (and hence interrupt).
+ * The effect of this is that when saving the PMU state,
+ * if there is no PMU alert pending when we read MMCR0
+ * before freezing the counters, but one becomes pending
+ * before we read the counters, we lose it.
+ * To work around this, we need a way to freeze the counters
+ * before reading MMCR0. Normally, freezing the counters
+ * is done by writing MMCR0 (to set MMCR0[FC]) which
+ * unavoidably writes MMCR0[PMA0] as well. On POWER8,
+ * we can also freeze the counters using MMCR2, by writing
+ * 1s to all the counter freeze condition bits (there are
+ * 9 bits each for 6 counters).
+ */
+ li r3, -1 /* set all freeze bits */
+ clrrdi r3, r3, 10
+ mfspr r10, SPRN_MMCR2
+ mtspr SPRN_MMCR2, r3
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r3, 1
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
@@ -1462,6 +1498,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
+BEGIN_FTR_SECTION
+ std r10, VCPU_MMCR + 24(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
std r7, VCPU_SIAR(r9)
std r8, VCPU_SDAR(r9)
mfspr r3, SPRN_PMC1
@@ -1485,12 +1524,10 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
BEGIN_FTR_SECTION
- mfspr r4, SPRN_MMCR2
mfspr r5, SPRN_SIER
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
mfspr r8, SPRN_MMCRS
- std r4, VCPU_MMCR + 24(r9)
std r5, VCPU_SIER(r9)
stw r6, VCPU_PMC + 24(r9)
stw r7, VCPU_PMC + 28(r9)
@@ -2227,6 +2264,7 @@ machine_check_realmode:
beq mc_cont
/* If not, deliver a machine check. SRR0/1 are already set */
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
+ ld r11, VCPU_MSR(r9)
bl kvmppc_msr_interrupt
b fast_interrupt_c_return
@@ -2431,3 +2469,21 @@ kvmppc_msr_interrupt:
li r0, 1
1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
blr
+
+/*
+ * This works around a hardware bug on POWER8E processors, where
+ * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
+ * performance monitor interrupt. Instead, when we need to have
+ * an interrupt pending, we have to arrange for a counter to overflow.
+ */
+kvmppc_fix_pmao:
+ li r3, 0
+ mtspr SPRN_MMCR2, r3
+ lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
+ ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
+ mtspr SPRN_MMCR0, r3
+ lis r3, 0x7fff
+ ori r3, r3, 0xffff
+ mtspr SPRN_PMC6, r3
+ isync
+ blr
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3533c999194a..e2c29e381dc7 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -104,8 +104,27 @@ kvm_start_lightweight:
stb r3, HSTATE_RESTORE_HID5(r13)
/* Load up guest SPRG3 value, since it's user readable */
- ld r3, VCPU_SHARED(r4)
- ld r3, VCPU_SHARED_SPRG3(r3)
+ lwz r3, VCPU_SHAREDBE(r4)
+ cmpwi r3, 0
+ ld r5, VCPU_SHARED(r4)
+ beq sprg3_little_endian
+sprg3_big_endian:
+#ifdef __BIG_ENDIAN__
+ ld r3, VCPU_SHARED_SPRG3(r5)
+#else
+ addi r5, r5, VCPU_SHARED_SPRG3
+ ldbrx r3, 0, r5
+#endif
+ b after_sprg3_load
+sprg3_little_endian:
+#ifdef __LITTLE_ENDIAN__
+ ld r3, VCPU_SHARED_SPRG3(r5)
+#else
+ addi r5, r5, VCPU_SHARED_SPRG3
+ ldbrx r3, 0, r5
+#endif
+
+after_sprg3_load:
mtspr SPRN_SPRG3, r3
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index c1abd95063f4..6c8011fd57e6 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -165,16 +165,18 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt)
static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store)
{
- u64 dsisr;
- struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
+ u32 dsisr;
+ u64 msr = kvmppc_get_msr(vcpu);
- shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0);
- shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0);
- shared->dar = eaddr;
+ msr = kvmppc_set_field(msr, 33, 36, 0);
+ msr = kvmppc_set_field(msr, 42, 47, 0);
+ kvmppc_set_msr(vcpu, msr);
+ kvmppc_set_dar(vcpu, eaddr);
/* Page Fault */
dsisr = kvmppc_set_field(0, 33, 33, 1);
if (is_store)
- shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
+ dsisr = kvmppc_set_field(dsisr, 38, 38, 1);
+ kvmppc_set_dsisr(vcpu, dsisr);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
}
@@ -660,7 +662,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
if (!kvmppc_inst_is_paired_single(vcpu, inst))
return EMULATE_FAIL;
- if (!(vcpu->arch.shared->msr & MSR_FP)) {
+ if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
return EMULATE_AGAIN;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 02f1defd8bb9..8eef1e519077 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -53,6 +53,7 @@
static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
ulong msr);
+static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
/* Some compatibility defines */
#ifdef CONFIG_PPC_BOOK3S_32
@@ -89,6 +90,7 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
#endif
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
vcpu->cpu = -1;
}
@@ -115,6 +117,9 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
svcpu->ctr = vcpu->arch.ctr;
svcpu->lr = vcpu->arch.lr;
svcpu->pc = vcpu->arch.pc;
+#ifdef CONFIG_PPC_BOOK3S_64
+ svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
+#endif
svcpu->in_use = true;
}
@@ -158,6 +163,9 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
vcpu->arch.fault_dar = svcpu->fault_dar;
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
vcpu->arch.last_inst = svcpu->last_inst;
+#ifdef CONFIG_PPC_BOOK3S_64
+ vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
+#endif
svcpu->in_use = false;
out:
@@ -246,14 +254,15 @@ static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
{
- ulong smsr = vcpu->arch.shared->msr;
+ ulong guest_msr = kvmppc_get_msr(vcpu);
+ ulong smsr = guest_msr;
/* Guest MSR values */
- smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
+ smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
/* Process MSR values */
smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
/* External providers the guest reserved */
- smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
+ smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
/* 64-bit Process MSR values */
#ifdef CONFIG_PPC_BOOK3S_64
smsr |= MSR_ISF | MSR_HV;
@@ -263,14 +272,14 @@ static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
{
- ulong old_msr = vcpu->arch.shared->msr;
+ ulong old_msr = kvmppc_get_msr(vcpu);
#ifdef EXIT_DEBUG
printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
#endif
msr &= to_book3s(vcpu)->msr_mask;
- vcpu->arch.shared->msr = msr;
+ kvmppc_set_msr_fast(vcpu, msr);
kvmppc_recalc_shadow_msr(vcpu);
if (msr & MSR_POW) {
@@ -281,11 +290,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
/* Unset POW bit after we woke up */
msr &= ~MSR_POW;
- vcpu->arch.shared->msr = msr;
+ kvmppc_set_msr_fast(vcpu, msr);
}
}
- if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
+ if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
(old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
kvmppc_mmu_flush_segments(vcpu);
kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
@@ -317,7 +326,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
}
/* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
+ if (kvmppc_get_msr(vcpu) & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
}
@@ -427,8 +436,8 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
/* patch dcbz into reserved instruction, so we trap */
for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
- if ((page[i] & 0xff0007ff) == INS_DCBZ)
- page[i] &= 0xfffffff7;
+ if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
+ page[i] &= cpu_to_be32(0xfffffff7);
kunmap_atomic(page);
put_page(hpage);
@@ -438,7 +447,7 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
ulong mp_pa = vcpu->arch.magic_page_pa;
- if (!(vcpu->arch.shared->msr & MSR_SF))
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF))
mp_pa = (uint32_t)mp_pa;
if (unlikely(mp_pa) &&
@@ -459,8 +468,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
int page_found = 0;
struct kvmppc_pte pte;
bool is_mmio = false;
- bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
- bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
+ bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
+ bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
u64 vsid;
relocated = data ? dr : ir;
@@ -480,7 +489,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.page_size = MMU_PAGE_64K;
}
- switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
+ switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
case 0:
pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
break;
@@ -488,7 +497,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
case MSR_IR:
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
- if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
+ if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
else
pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
@@ -511,22 +520,25 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (page_found == -ENOENT) {
/* Page not found in guest PTE entries */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
- vcpu->arch.shared->msr |=
- vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+ u64 ssrr1 = vcpu->arch.shadow_srr1;
+ u64 msr = kvmppc_get_msr(vcpu);
+ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
+ kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
+ kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EPERM) {
/* Storage protection */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
- vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
- vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
- vcpu->arch.shared->msr |=
- vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
+ u32 dsisr = vcpu->arch.fault_dsisr;
+ u64 ssrr1 = vcpu->arch.shadow_srr1;
+ u64 msr = kvmppc_get_msr(vcpu);
+ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
+ dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
+ kvmppc_set_dsisr(vcpu, dsisr);
+ kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
kvmppc_book3s_queue_irqprio(vcpu, vec);
} else if (page_found == -EINVAL) {
/* Page not found in guest SLB */
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
} else if (!is_mmio &&
kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
@@ -606,6 +618,25 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
kvmppc_recalc_shadow_msr(vcpu);
}
+/* Give up facility (TAR / EBB / DSCR) */
+static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
+ /* Facility not available to the guest, ignore giveup request*/
+ return;
+ }
+
+ switch (fac) {
+ case FSCR_TAR_LG:
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ mtspr(SPRN_TAR, current->thread.tar);
+ vcpu->arch.shadow_fscr &= ~FSCR_TAR;
+ break;
+ }
+#endif
+}
+
static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
{
ulong srr0 = kvmppc_get_pc(vcpu);
@@ -614,11 +645,12 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
if (ret == -ENOENT) {
- ulong msr = vcpu->arch.shared->msr;
+ ulong msr = kvmppc_get_msr(vcpu);
msr = kvmppc_set_field(msr, 33, 33, 1);
msr = kvmppc_set_field(msr, 34, 36, 0);
- vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
+ msr = kvmppc_set_field(msr, 42, 47, 0);
+ kvmppc_set_msr_fast(vcpu, msr);
kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
return EMULATE_AGAIN;
}
@@ -651,7 +683,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
return RESUME_GUEST;
- if (!(vcpu->arch.shared->msr & msr)) {
+ if (!(kvmppc_get_msr(vcpu) & msr)) {
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
return RESUME_GUEST;
}
@@ -683,16 +715,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif
if (msr & MSR_FP) {
+ preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
t->fp_save_area = &vcpu->arch.fp;
+ preempt_enable();
}
if (msr & MSR_VEC) {
#ifdef CONFIG_ALTIVEC
+ preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
t->vr_save_area = &vcpu->arch.vr;
+ preempt_enable();
#endif
}
@@ -716,18 +752,90 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
return;
if (lost_ext & MSR_FP) {
+ preempt_disable();
enable_kernel_fp();
load_fp_state(&vcpu->arch.fp);
+ preempt_enable();
}
#ifdef CONFIG_ALTIVEC
if (lost_ext & MSR_VEC) {
+ preempt_disable();
enable_kernel_altivec();
load_vr_state(&vcpu->arch.vr);
+ preempt_enable();
}
#endif
current->thread.regs->msr |= lost_ext;
}
+#ifdef CONFIG_PPC_BOOK3S_64
+
+static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
+{
+ /* Inject the Interrupt Cause field and trigger a guest interrupt */
+ vcpu->arch.fscr &= ~(0xffULL << 56);
+ vcpu->arch.fscr |= (fac << 56);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
+}
+
+static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+ enum emulation_result er = EMULATE_FAIL;
+
+ if (!(kvmppc_get_msr(vcpu) & MSR_PR))
+ er = kvmppc_emulate_instruction(vcpu->run, vcpu);
+
+ if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
+ /* Couldn't emulate, trigger interrupt in guest */
+ kvmppc_trigger_fac_interrupt(vcpu, fac);
+ }
+}
+
+/* Enable facilities (TAR, EBB, DSCR) for the guest */
+static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
+{
+ bool guest_fac_enabled;
+ BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
+
+ /*
+ * Not every facility is enabled by FSCR bits, check whether the
+ * guest has this facility enabled at all.
+ */
+ switch (fac) {
+ case FSCR_TAR_LG:
+ case FSCR_EBB_LG:
+ guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
+ break;
+ case FSCR_TM_LG:
+ guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
+ break;
+ default:
+ guest_fac_enabled = false;
+ break;
+ }
+
+ if (!guest_fac_enabled) {
+ /* Facility not enabled by the guest */
+ kvmppc_trigger_fac_interrupt(vcpu, fac);
+ return RESUME_GUEST;
+ }
+
+ switch (fac) {
+ case FSCR_TAR_LG:
+ /* TAR switching isn't lazy in Linux yet */
+ current->thread.tar = mfspr(SPRN_TAR);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ vcpu->arch.shadow_fscr |= FSCR_TAR;
+ break;
+ default:
+ kvmppc_emulate_fac(vcpu, fac);
+ break;
+ }
+
+ return RESUME_GUEST;
+}
+#endif
+
int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
@@ -784,7 +892,9 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
r = RESUME_GUEST;
} else {
- vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
+ u64 msr = kvmppc_get_msr(vcpu);
+ msr |= shadow_srr1 & 0x58000000;
+ kvmppc_set_msr_fast(vcpu, msr);
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
}
@@ -824,8 +934,8 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
} else {
- vcpu->arch.shared->dar = dar;
- vcpu->arch.shared->dsisr = fault_dsisr;
+ kvmppc_set_dar(vcpu, dar);
+ kvmppc_set_dsisr(vcpu, fault_dsisr);
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
r = RESUME_GUEST;
}
@@ -833,7 +943,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
case BOOK3S_INTERRUPT_DATA_SEGMENT:
if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
- vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
+ kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
kvmppc_book3s_queue_irqprio(vcpu,
BOOK3S_INTERRUPT_DATA_SEGMENT);
}
@@ -871,7 +981,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
program_interrupt:
flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
- if (vcpu->arch.shared->msr & MSR_PR) {
+ if (kvmppc_get_msr(vcpu) & MSR_PR) {
#ifdef EXIT_DEBUG
printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
#endif
@@ -913,7 +1023,7 @@ program_interrupt:
case BOOK3S_INTERRUPT_SYSCALL:
if (vcpu->arch.papr_enabled &&
(kvmppc_get_last_sc(vcpu) == 0x44000022) &&
- !(vcpu->arch.shared->msr & MSR_PR)) {
+ !(kvmppc_get_msr(vcpu) & MSR_PR)) {
/* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3);
int i;
@@ -945,7 +1055,7 @@ program_interrupt:
gprs[i] = kvmppc_get_gpr(vcpu, i);
vcpu->arch.osi_needed = 1;
r = RESUME_HOST_NV;
- } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
+ } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
(((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
/* KVM PV hypercalls */
kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
@@ -986,14 +1096,26 @@ program_interrupt:
}
case BOOK3S_INTERRUPT_ALIGNMENT:
if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
- vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
- kvmppc_get_last_inst(vcpu));
- vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
- kvmppc_get_last_inst(vcpu));
+ u32 last_inst = kvmppc_get_last_inst(vcpu);
+ u32 dsisr;
+ u64 dar;
+
+ dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
+ dar = kvmppc_alignment_dar(vcpu, last_inst);
+
+ kvmppc_set_dsisr(vcpu, dsisr);
+ kvmppc_set_dar(vcpu, dar);
+
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
}
r = RESUME_GUEST;
break;
+#ifdef CONFIG_PPC_BOOK3S_64
+ case BOOK3S_INTERRUPT_FAC_UNAVAIL:
+ kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
+ r = RESUME_GUEST;
+ break;
+#endif
case BOOK3S_INTERRUPT_MACHINE_CHECK:
case BOOK3S_INTERRUPT_TRACE:
kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
@@ -1054,7 +1176,7 @@ static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
}
} else {
for (i = 0; i < 16; i++)
- sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
+ sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
for (i = 0; i < 8; i++) {
sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
@@ -1110,6 +1232,15 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
+ case KVM_REG_PPC_LPCR:
+ /*
+ * We are only interested in the LPCR_ILE bit
+ */
+ if (vcpu->arch.intr_msr & MSR_LE)
+ *val = get_reg_val(id, LPCR_ILE);
+ else
+ *val = get_reg_val(id, 0);
+ break;
default:
r = -EINVAL;
break;
@@ -1118,6 +1249,14 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
return r;
}
+static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
+{
+ if (new_lpcr & LPCR_ILE)
+ vcpu->arch.intr_msr |= MSR_LE;
+ else
+ vcpu->arch.intr_msr &= ~MSR_LE;
+}
+
static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
union kvmppc_one_reg *val)
{
@@ -1128,6 +1267,9 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
to_book3s(vcpu)->hior = set_reg_val(id, *val);
to_book3s(vcpu)->hior_explicit = true;
break;
+ case KVM_REG_PPC_LPCR:
+ kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
+ break;
default:
r = -EINVAL;
break;
@@ -1170,8 +1312,14 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
goto uninit_vcpu;
/* the real shared page fills the last 4k of our page */
vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
-
#ifdef CONFIG_PPC_BOOK3S_64
+ /* Always start the shared struct in native endian mode */
+#ifdef __BIG_ENDIAN__
+ vcpu->arch.shared_big_endian = true;
+#else
+ vcpu->arch.shared_big_endian = false;
+#endif
+
/*
* Default to the same as the host if we're on sufficiently
* recent machine that we have 1TB segments;
@@ -1180,6 +1328,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
vcpu->arch.pvr = 0x3C0301;
if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
vcpu->arch.pvr = mfspr(SPRN_PVR);
+ vcpu->arch.intr_msr = MSR_SF;
#else
/* default to book3s_32 (750) */
vcpu->arch.pvr = 0x84202;
@@ -1187,7 +1336,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
vcpu->arch.slb_nr = 64;
- vcpu->arch.shadow_msr = MSR_USER64;
+ vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
err = kvmppc_mmu_init(vcpu);
if (err < 0)
@@ -1264,7 +1413,7 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
/* Preload FPU if it's enabled */
- if (vcpu->arch.shared->msr & MSR_FP)
+ if (kvmppc_get_msr(vcpu) & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
kvmppc_fix_ee_before_entry();
@@ -1277,6 +1426,9 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/* Make sure we save the guest FPU/Altivec/VSX state */
kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
+ /* Make sure we save the guest TAR/EBB/DSCR state */
+ kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
+
out:
vcpu->mode = OUTSIDE_GUEST_MODE;
return ret;
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 5efa97b993d8..52a63bfe3f07 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -57,7 +57,7 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
for (i = 0; ; ++i) {
if (i == 8)
goto done;
- if ((*hpte & HPTE_V_VALID) == 0)
+ if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0)
break;
hpte += 2;
}
@@ -67,8 +67,8 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
goto done;
}
- hpte[0] = kvmppc_get_gpr(vcpu, 6);
- hpte[1] = kvmppc_get_gpr(vcpu, 7);
+ hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
+ hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
kvmppc_set_gpr(vcpu, 4, pte_index | i);
@@ -93,6 +93,8 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ pte[0] = be64_to_cpu(pte[0]);
+ pte[1] = be64_to_cpu(pte[1]);
ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -169,6 +171,8 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ pte[0] = be64_to_cpu(pte[0]);
+ pte[1] = be64_to_cpu(pte[1]);
/* tsl = AVPN */
flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
@@ -207,6 +211,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ pte[0] = be64_to_cpu(pte[0]);
+ pte[1] = be64_to_cpu(pte[1]);
ret = H_NOT_FOUND;
if ((pte[0] & HPTE_V_VALID) == 0 ||
@@ -225,6 +231,8 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
rb = compute_tlbie_rb(v, r, pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
+ pte[0] = cpu_to_be64(pte[0]);
+ pte[1] = cpu_to_be64(pte[1]);
copy_to_user((void __user *)pteg, pte, sizeof(pte));
ret = H_SUCCESS;
@@ -270,7 +278,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
- vcpu->arch.shared->msr |= MSR_EE;
+ kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
index 7a053157483b..edb14ba992b3 100644
--- a/arch/powerpc/kvm/book3s_rtas.c
+++ b/arch/powerpc/kvm/book3s_rtas.c
@@ -205,6 +205,32 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
return rc;
}
+static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
+{
+#ifdef __LITTLE_ENDIAN__
+ int i;
+
+ args->token = be32_to_cpu(args->token);
+ args->nargs = be32_to_cpu(args->nargs);
+ args->nret = be32_to_cpu(args->nret);
+ for (i = 0; i < args->nargs; i++)
+ args->args[i] = be32_to_cpu(args->args[i]);
+#endif
+}
+
+static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
+{
+#ifdef __LITTLE_ENDIAN__
+ int i;
+
+ for (i = 0; i < args->nret; i++)
+ args->args[i] = cpu_to_be32(args->args[i]);
+ args->token = cpu_to_be32(args->token);
+ args->nargs = cpu_to_be32(args->nargs);
+ args->nret = cpu_to_be32(args->nret);
+#endif
+}
+
int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
{
struct rtas_token_definition *d;
@@ -223,6 +249,8 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc)
goto fail;
+ kvmppc_rtas_swap_endian_in(&args);
+
/*
* args->rets is a pointer into args->args. Now that we've
* copied args we need to fix it up to point into our copy,
@@ -247,6 +275,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
if (rc == 0) {
args.rets = orig_rets;
+ kvmppc_rtas_swap_endian_out(&args);
rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
if (rc)
goto fail;
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 1e0cc2adfd40..acee37cde840 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -90,6 +90,15 @@ kvmppc_handler_trampoline_enter:
LOAD_GUEST_SEGMENTS
#ifdef CONFIG_PPC_BOOK3S_64
+BEGIN_FTR_SECTION
+ /* Save host FSCR */
+ mfspr r8, SPRN_FSCR
+ std r8, HSTATE_HOST_FSCR(r13)
+ /* Set FSCR during guest execution */
+ ld r9, SVCPU_SHADOW_FSCR(r13)
+ mtspr SPRN_FSCR, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
/* Some guests may need to have dcbz set to 32 byte length.
*
* Usually we ensure that by patching the guest's instructions
@@ -255,6 +264,10 @@ BEGIN_FTR_SECTION
cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
beq- ld_last_inst
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+BEGIN_FTR_SECTION
+ cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
+ beq- ld_last_inst
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
#endif
b no_ld_last_inst
@@ -311,6 +324,18 @@ no_ld_last_inst:
no_dcbz32_off:
+BEGIN_FTR_SECTION
+ /* Save guest FSCR on a FAC_UNAVAIL interrupt */
+ cmpwi r12, BOOK3S_INTERRUPT_FAC_UNAVAIL
+ bne+ no_fscr_save
+ mfspr r7, SPRN_FSCR
+ std r7, SVCPU_SHADOW_FSCR(r13)
+no_fscr_save:
+ /* Restore host FSCR */
+ ld r8, HSTATE_HOST_FSCR(r13)
+ mtspr SPRN_FSCR, r8
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
#endif /* CONFIG_PPC_BOOK3S_64 */
/*
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 89b7f821f6c4..002d51764143 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -19,6 +19,7 @@
#include "booke.h"
#include "e500.h"
+#define XOP_DCBTLS 166
#define XOP_MSGSND 206
#define XOP_MSGCLR 238
#define XOP_TLBIVAX 786
@@ -103,6 +104,15 @@ static int kvmppc_e500_emul_ehpriv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return emulated;
}
+static int kvmppc_e500_emul_dcbtls(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+
+ /* Always fail to lock the cache */
+ vcpu_e500->l1csr0 |= L1CSR0_CUL;
+ return EMULATE_DONE;
+}
+
int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
@@ -116,6 +126,10 @@ int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 31:
switch (get_xop(inst)) {
+ case XOP_DCBTLS:
+ emulated = kvmppc_e500_emul_dcbtls(vcpu);
+ break;
+
#ifdef CONFIG_KVM_E500MC
case XOP_MSGSND:
emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
@@ -222,6 +236,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val;
+ vcpu_e500->l1csr1 &= ~(L1CSR1_ICFI | L1CSR1_ICLFR);
break;
case SPRN_HID0:
vcpu_e500->hid0 = spr_val;
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c2b887be2c29..da86d9ba3476 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -97,10 +97,10 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
switch (sprn) {
case SPRN_SRR0:
- vcpu->arch.shared->srr0 = spr_val;
+ kvmppc_set_srr0(vcpu, spr_val);
break;
case SPRN_SRR1:
- vcpu->arch.shared->srr1 = spr_val;
+ kvmppc_set_srr1(vcpu, spr_val);
break;
/* XXX We need to context-switch the timebase for
@@ -114,16 +114,16 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
break;
case SPRN_SPRG0:
- vcpu->arch.shared->sprg0 = spr_val;
+ kvmppc_set_sprg0(vcpu, spr_val);
break;
case SPRN_SPRG1:
- vcpu->arch.shared->sprg1 = spr_val;
+ kvmppc_set_sprg1(vcpu, spr_val);
break;
case SPRN_SPRG2:
- vcpu->arch.shared->sprg2 = spr_val;
+ kvmppc_set_sprg2(vcpu, spr_val);
break;
case SPRN_SPRG3:
- vcpu->arch.shared->sprg3 = spr_val;
+ kvmppc_set_sprg3(vcpu, spr_val);
break;
/* PIR can legally be written, but we ignore it */
@@ -150,10 +150,10 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
switch (sprn) {
case SPRN_SRR0:
- spr_val = vcpu->arch.shared->srr0;
+ spr_val = kvmppc_get_srr0(vcpu);
break;
case SPRN_SRR1:
- spr_val = vcpu->arch.shared->srr1;
+ spr_val = kvmppc_get_srr1(vcpu);
break;
case SPRN_PVR:
spr_val = vcpu->arch.pvr;
@@ -173,16 +173,16 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
break;
case SPRN_SPRG0:
- spr_val = vcpu->arch.shared->sprg0;
+ spr_val = kvmppc_get_sprg0(vcpu);
break;
case SPRN_SPRG1:
- spr_val = vcpu->arch.shared->sprg1;
+ spr_val = kvmppc_get_sprg1(vcpu);
break;
case SPRN_SPRG2:
- spr_val = vcpu->arch.shared->sprg2;
+ spr_val = kvmppc_get_sprg2(vcpu);
break;
case SPRN_SPRG3:
- spr_val = vcpu->arch.shared->sprg3;
+ spr_val = kvmppc_get_sprg3(vcpu);
break;
/* Note: SPRG4-7 are user-readable, so we don't get
* a trap. */
diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c
index efbd9962a209..b68d0dc9479a 100644
--- a/arch/powerpc/kvm/mpic.c
+++ b/arch/powerpc/kvm/mpic.c
@@ -126,6 +126,8 @@ static int openpic_cpu_write_internal(void *opaque, gpa_t addr,
u32 val, int idx);
static int openpic_cpu_read_internal(void *opaque, gpa_t addr,
u32 *ptr, int idx);
+static inline void write_IRQreg_idr(struct openpic *opp, int n_IRQ,
+ uint32_t val);
enum irq_type {
IRQ_TYPE_NORMAL = 0,
@@ -528,7 +530,6 @@ static void openpic_reset(struct openpic *opp)
/* Initialise IRQ sources */
for (i = 0; i < opp->max_irq; i++) {
opp->src[i].ivpr = opp->ivpr_reset;
- opp->src[i].idr = opp->idr_reset;
switch (opp->src[i].type) {
case IRQ_TYPE_NORMAL:
@@ -543,6 +544,8 @@ static void openpic_reset(struct openpic *opp)
case IRQ_TYPE_FSLSPECIAL:
break;
}
+
+ write_IRQreg_idr(opp, i, opp->idr_reset);
}
/* Initialise IRQ destinations */
for (i = 0; i < MAX_CPU; i++) {
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 3cf541a53e2a..bab20f410443 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -125,6 +125,27 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
+ int i;
+
+ shared->sprg0 = swab64(shared->sprg0);
+ shared->sprg1 = swab64(shared->sprg1);
+ shared->sprg2 = swab64(shared->sprg2);
+ shared->sprg3 = swab64(shared->sprg3);
+ shared->srr0 = swab64(shared->srr0);
+ shared->srr1 = swab64(shared->srr1);
+ shared->dar = swab64(shared->dar);
+ shared->msr = swab64(shared->msr);
+ shared->dsisr = swab32(shared->dsisr);
+ shared->int_pending = swab32(shared->int_pending);
+ for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
+ shared->sr[i] = swab32(shared->sr[i]);
+}
+#endif
+
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
int nr = kvmppc_get_gpr(vcpu, 11);
@@ -135,7 +156,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
unsigned long r2 = 0;
- if (!(vcpu->arch.shared->msr & MSR_SF)) {
+ if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
/* 32 bit mode */
param1 &= 0xffffffff;
param2 &= 0xffffffff;
@@ -146,8 +167,28 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
switch (nr) {
case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
{
- vcpu->arch.magic_page_pa = param1;
- vcpu->arch.magic_page_ea = param2;
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
+ /* Book3S can be little endian, find it out here */
+ int shared_big_endian = true;
+ if (vcpu->arch.intr_msr & MSR_LE)
+ shared_big_endian = false;
+ if (shared_big_endian != vcpu->arch.shared_big_endian)
+ kvmppc_swab_shared(vcpu);
+ vcpu->arch.shared_big_endian = shared_big_endian;
+#endif
+
+ if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
+ /*
+ * Older versions of the Linux magic page code had
+ * a bug where they would map their trampoline code
+ * NX. If that's the case, remove !PR NX capability.
+ */
+ vcpu->arch.disable_kernel_nx = true;
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
+
+ vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
+ vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
@@ -375,6 +416,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
case KVM_CAP_PPC_RTAS:
+ case KVM_CAP_PPC_FIXUP_HCALL:
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS:
#endif
@@ -1015,10 +1057,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
u32 inst_nop = 0x60000000;
#ifdef CONFIG_KVM_BOOKE_HV
u32 inst_sc1 = 0x44000022;
- pvinfo->hcall[0] = inst_sc1;
- pvinfo->hcall[1] = inst_nop;
- pvinfo->hcall[2] = inst_nop;
- pvinfo->hcall[3] = inst_nop;
+ pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
+ pvinfo->hcall[1] = cpu_to_be32(inst_nop);
+ pvinfo->hcall[2] = cpu_to_be32(inst_nop);
+ pvinfo->hcall[3] = cpu_to_be32(inst_nop);
#else
u32 inst_lis = 0x3c000000;
u32 inst_ori = 0x60000000;
@@ -1034,10 +1076,10 @@ static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
* sc
* nop
*/
- pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
- pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
- pvinfo->hcall[2] = inst_sc;
- pvinfo->hcall[3] = inst_nop;
+ pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
+ pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
+ pvinfo->hcall[2] = cpu_to_be32(inst_sc);
+ pvinfo->hcall[3] = cpu_to_be32(inst_nop);
#endif
pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h
index 8b22e4748344..e1357cd8dc1f 100644
--- a/arch/powerpc/kvm/trace_pr.h
+++ b/arch/powerpc/kvm/trace_pr.h
@@ -255,7 +255,7 @@ TRACE_EVENT(kvm_exit,
__entry->exit_nr = exit_nr;
__entry->pc = kvmppc_get_pc(vcpu);
__entry->dar = kvmppc_get_fault_dar(vcpu);
- __entry->msr = vcpu->arch.shared->msr;
+ __entry->msr = kvmppc_get_msr(vcpu);
__entry->srr1 = vcpu->arch.shadow_srr1;
__entry->last_inst = vcpu->arch.last_inst;
),
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 06ba83b036d3..350aa58a6f95 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -269,9 +269,9 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
- unsigned long size = 0;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ int size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -324,9 +324,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
- unsigned long size = 0;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
+ int size = 0;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -406,9 +406,9 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
const char *uname, int depth,
void *data) {
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be64 *addr_prop;
- __be32 *page_count_prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be64 *addr_prop;
+ const __be32 *page_count_prop;
unsigned int expected_pages;
long unsigned int phys_addr;
long unsigned int block_size;
@@ -550,8 +550,8 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
const char *uname, int depth,
void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- __be32 *prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *prop;
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index eb923654ba80..7e70ae968e5f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -86,11 +86,6 @@ int pgd_huge(pgd_t pgd)
*/
return ((pgd_val(pgd) & 0x3) != 0x0);
}
-
-int pmd_huge_support(void)
-{
- return 1;
-}
#else
int pmd_huge(pmd_t pmd)
{
@@ -106,11 +101,6 @@ int pgd_huge(pgd_t pgd)
{
return 0;
}
-
-int pmd_huge_support(void)
-{
- return 0;
-}
#endif
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 9d1d33cd2be5..964a5f61488a 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -97,7 +97,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
static void __slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
- * appropriately too. */
+ * and PR KVM appropriately too. */
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data, ksp_vsid_data;
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 18c104820198..6e19b0ad5d26 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -199,8 +199,8 @@ static void __init efika_setup_arch(void)
static int __init efika_probe(void)
{
- char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "model", NULL);
+ const char *model = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "model", NULL);
if (model == NULL)
return 0;
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index c665d7de6c99..7044fd36197b 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -574,8 +574,8 @@ chrp_init2(void)
static int __init chrp_probe(void)
{
- char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
- "device_type", NULL);
+ const char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
+ "device_type", NULL);
if (dtype == NULL)
return 0;
if (strcmp(dtype, "chrp"))
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 360ad80c754c..f343183add07 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -61,7 +61,7 @@ int __init early_init_dt_scan_opal(unsigned long node,
const char *uname, int depth, void *data)
{
const void *basep, *entryp, *sizep;
- unsigned long basesz, entrysz, runtimesz;
+ int basesz, entrysz, runtimesz;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
return 0;
@@ -77,11 +77,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
opal.entry = of_read_number(entryp, entrysz/4);
opal.size = of_read_number(sizep, runtimesz/4);
- pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
+ pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
opal.base, basep, basesz);
- pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
+ pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
opal.entry, entryp, entrysz);
- pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%ld)\n",
+ pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
opal.size, sizep, runtimesz);
powerpc_firmware_features |= FW_FEATURE_OPAL;
@@ -102,7 +102,7 @@ int __init early_init_dt_scan_opal(unsigned long node,
int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data)
{
- unsigned long i, psize, size;
+ int i, psize, size;
const __be32 *prop;
if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
@@ -359,7 +359,7 @@ int opal_get_chars(uint32_t vtermno, char *buf, int count)
if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
return 0;
len = cpu_to_be64(count);
- rc = opal_console_read(vtermno, &len, buf);
+ rc = opal_console_read(vtermno, &len, buf);
if (rc == OPAL_SUCCESS)
return be64_to_cpu(len);
return 0;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 2db8cc691bf4..099d2df976a2 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -665,7 +665,7 @@ static int __init pseries_probe_fw_features(unsigned long node,
void *data)
{
const char *prop;
- unsigned long len;
+ int len;
static int hypertas_found;
static int vec5_found;
@@ -698,7 +698,7 @@ static int __init pseries_probe_fw_features(unsigned long node,
static int __init pSeries_probe(void)
{
unsigned long root = of_get_flat_dt_root();
- char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
+ const char *dtype = of_get_flat_dt_prop(root, "device_type", NULL);
if (dtype == NULL)
return 0;
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 1bd0eba4d355..e9056e438575 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -152,9 +152,9 @@ EXPORT_SYMBOL_GPL(dcr_resource_len);
#ifdef CONFIG_PPC_DCR_MMIO
-u64 of_translate_dcr_address(struct device_node *dev,
- unsigned int dcr_n,
- unsigned int *out_stride)
+static u64 of_translate_dcr_address(struct device_node *dev,
+ unsigned int dcr_n,
+ unsigned int *out_stride)
{
struct device_node *dp;
const u32 *p;