summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig24
-rw-r--r--arch/powerpc/Makefile49
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts25
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi43
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi10
-rw-r--r--arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig1
-rw-r--r--arch/powerpc/configs/chroma_defconfig2
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/pasemi_defconfig1
-rw-r--r--arch/powerpc/configs/ps3_defconfig6
-rw-r--r--arch/powerpc/configs/wii_defconfig2
-rw-r--r--arch/powerpc/include/asm/asm-compat.h11
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h7
-rw-r--r--arch/powerpc/include/asm/gpio.h57
-rw-r--r--arch/powerpc/include/asm/hvcall.h25
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h7
-rw-r--r--arch/powerpc/include/asm/lppaca.h196
-rw-r--r--arch/powerpc/include/asm/lv1call.h4
-rw-r--r--arch/powerpc/include/asm/mpic.h18
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h1
-rw-r--r--arch/powerpc/include/asm/pSeries_reconfig.h12
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h10
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/ptrace.h6
-rw-r--r--arch/powerpc/include/asm/reg_booke.h5
-rw-r--r--arch/powerpc/include/asm/switch_to.h6
-rw-r--r--arch/powerpc/include/asm/thread_info.h14
-rw-r--r--arch/powerpc/include/asm/topology.h36
-rw-r--r--arch/powerpc/include/asm/vio.h46
-rw-r--r--arch/powerpc/kernel/Makefile2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S92
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S6
-rw-r--r--arch/powerpc/kernel/head_44x.S8
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S8
-rw-r--r--arch/powerpc/kernel/idle.c23
-rw-r--r--arch/powerpc/kernel/init_task.c29
-rw-r--r--arch/powerpc/kernel/irq.c29
-rw-r--r--arch/powerpc/kernel/machine_kexec.c7
-rw-r--r--arch/powerpc/kernel/misc_32.S203
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kernel/process.c52
-rw-r--r--arch/powerpc/kernel/prom_init.c17
-rw-r--r--arch/powerpc/kernel/ptrace.c44
-rw-r--r--arch/powerpc/kernel/ptrace32.c32
-rw-r--r--arch/powerpc/kernel/setup_32.c3
-rw-r--r--arch/powerpc/kernel/signal_32.c17
-rw-r--r--arch/powerpc/kernel/smp.c76
-rw-r--r--arch/powerpc/kernel/traps.c10
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vio.c273
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c13
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c22
-rw-r--r--arch/powerpc/kvm/book3s_hv.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S42
-rw-r--r--arch/powerpc/lib/copyuser_64.S6
-rw-r--r--arch/powerpc/lib/mem_64.S6
-rw-r--r--arch/powerpc/lib/memcpy_64.S6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/net/bpf_jit.h8
-rw-r--r--arch/powerpc/net/bpf_jit_64.S108
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c26
-rw-r--r--arch/powerpc/perf/core-book3s.c3
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c3
-rw-r--r--arch/powerpc/platforms/44x/Kconfig2
-rw-r--r--arch/powerpc/platforms/85xx/common.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c13
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype43
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c8
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c10
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig22
-rw-r--r--arch/powerpc/platforms/ps3/mm.c77
-rw-r--r--arch/powerpc/platforms/ps3/platform.h16
-rw-r--r--arch/powerpc/platforms/ps3/repository.c198
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig6
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c4
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h4
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c7
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c3
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c61
-rw-r--r--arch/powerpc/sysdev/mpic.c54
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c12
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c42
-rw-r--r--arch/powerpc/sysdev/scom.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c7
95 files changed, 1456 insertions, 996 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index feab3bad6d0f..00b9874e2240 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -27,15 +27,6 @@ config MMU
bool
default y
-config GENERIC_CMOS_UPDATE
- def_bool y
-
-config GENERIC_TIME_VSYSCALL
- def_bool y
-
-config GENERIC_CLOCKEVENTS
- def_bool y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool PPC64
@@ -87,10 +78,6 @@ config ARCH_HAS_ILOG2_U64
bool
default y if 64BIT
-config ARCH_HAS_CPU_IDLE_WAIT
- bool
- default y
-
config GENERIC_HWEIGHT
bool
default y
@@ -141,9 +128,13 @@ config PPC
select IRQ_FORCED_THREADING
select HAVE_RCU_TABLE_FREE if SMP
select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_BPF_JIT if (PPC64 && NET)
+ select HAVE_BPF_JIT if PPC64
select HAVE_ARCH_JUMP_LABEL
select ARCH_HAVE_NMI_SAFE_CMPXCHG
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CMOS_UPDATE
+ select GENERIC_TIME_VSYSCALL
+ select GENERIC_CLOCKEVENTS
config EARLY_PRINTK
bool
@@ -284,7 +275,6 @@ config HIGHMEM
bool "High memory support"
depends on PPC32
-source kernel/time/Kconfig
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
source "fs/Kconfig.binfmt"
@@ -353,7 +343,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
- depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP && !PPC_47x)) && EXPERIMENTAL
+ depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -370,7 +360,7 @@ config KEXEC
config CRASH_DUMP
bool "Build a kdump crash kernel"
- depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP && !PPC_47x)
+ depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
select RELOCATABLE if PPC64 || 44x
select DYNAMIC_MEMSTART if FSL_BOOKE
help
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 6524c6e21896..950d1f7a5a39 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -69,6 +69,16 @@ LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc
CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple
+
+CFLAGS-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=power7,-mtune=power4)
+CFLAGS-$(CONFIG_CELL_CPU) += $(call cc-option,-mcpu=cell)
+CFLAGS-$(CONFIG_POWER4_CPU) += $(call cc-option,-mcpu=power4)
+CFLAGS-$(CONFIG_POWER5_CPU) += $(call cc-option,-mcpu=power5)
+CFLAGS-$(CONFIG_POWER6_CPU) += $(call cc-option,-mcpu=power6)
+CFLAGS-$(CONFIG_POWER7_CPU) += $(call cc-option,-mcpu=power7)
+
+CFLAGS-$(CONFIG_TUNE_CELL) += $(call cc-option,-mtune=cell)
+
KBUILD_CPPFLAGS += -Iarch/$(ARCH)
KBUILD_AFLAGS += -Iarch/$(ARCH)
KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
@@ -76,32 +86,11 @@ CPP = $(CC) -E $(KBUILD_CFLAGS)
CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
-ifeq ($(CONFIG_PPC64),y)
-GCC_BROKEN_VEC := $(call cc-ifversion, -lt, 0400, y)
-
-ifeq ($(CONFIG_POWER4_ONLY),y)
-ifeq ($(CONFIG_ALTIVEC),y)
-ifeq ($(GCC_BROKEN_VEC),y)
- KBUILD_CFLAGS += $(call cc-option,-mcpu=970)
-else
- KBUILD_CFLAGS += $(call cc-option,-mcpu=power4)
-endif
-else
- KBUILD_CFLAGS += $(call cc-option,-mcpu=power4)
-endif
-else
- KBUILD_CFLAGS += $(call cc-option,-mtune=power4)
-endif
-endif
-
KBUILD_LDFLAGS_MODULE += arch/powerpc/lib/crtsavres.o
-ifeq ($(CONFIG_TUNE_CELL),y)
- KBUILD_CFLAGS += $(call cc-option,-mtune=cell)
-endif
-
-# No AltiVec instruction when building kernel
+# No AltiVec or VSX instructions when building kernel
KBUILD_CFLAGS += $(call cc-option,-mno-altivec)
+KBUILD_CFLAGS += $(call cc-option,-mno-vsx)
# No SPE instruction when building kernel
# (We use all available options to help semi-broken compilers)
@@ -160,6 +149,7 @@ core-$(CONFIG_KVM) += arch/powerpc/kvm/
core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
+drivers-$(CONFIG_CRYPTO_DEV_NX) += drivers/crypto/nx/
# Default to zImage, override when needed
all: zImage
@@ -234,10 +224,11 @@ archprepare: checkbin
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
TOUT := .tmp_gas_check
-# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
-# instructions.
-# gcc-3.4 and binutils-2.14 are a fatal combination.
+# Check gcc and binutils versions:
+# - gcc-3.4 and binutils-2.14 are a fatal combination
+# - Require gcc 4.0 or above on 64-bit
+# - gcc-4.2.0 has issues compiling modules on 64-bit
checkbin:
@if test "$(call cc-version)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
@@ -247,6 +238,12 @@ checkbin:
false; \
fi ; \
fi
+ @if test "$(call cc-version)" -lt "0400" \
+ && test "x${CONFIG_PPC64}" = "xy" ; then \
+ echo -n "Sorry, GCC v4.0 or above is required to build " ; \
+ echo "the 64-bit powerpc kernel." ; \
+ false ; \
+ fi
@if test "$(call cc-fullversion)" = "040200" \
&& test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 7bda373f10ef..9d4917aebe6b 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -373,5 +373,30 @@
0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
+
+ MSI: ppc4xx-msi@C10000000 {
+ compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
+ reg = < 0xC 0x10000000 0x100
+ 0xC 0x10000000 0x100>;
+ sdr-base = <0x36C>;
+ msi-data = <0x00004440>;
+ msi-mask = <0x0000ffe0>;
+ interrupts =<0 1 2 3 4 5 6 7>;
+ interrupt-parent = <&MSI>;
+ #interrupt-cells = <1>;
+ #address-cells = <0>;
+ #size-cells = <0>;
+ msi-available-ranges = <0x0 0x100>;
+ interrupt-map = <
+ 0 &UIC3 0x18 1
+ 1 &UIC3 0x19 1
+ 2 &UIC3 0x1A 1
+ 3 &UIC3 0x1B 1
+ 4 &UIC3 0x1C 1
+ 5 &UIC3 0x1D 1
+ 6 &UIC3 0x1E 1
+ 7 &UIC3 0x1F 1
+ >;
+ };
};
};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
new file mode 100644
index 000000000000..1cf0b77b1efe
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi
@@ -0,0 +1,43 @@
+/*
+ * PQ3 MPIC Message (Group B) device tree stub [ controller @ offset 0x42400 ]
+ *
+ * Copyright 2012 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+message@42400 {
+ compatible = "fsl,mpic-v3.1-msgr";
+ reg = <0x42400 0x200>;
+ interrupts = <
+ 0xb4 2 0 0
+ 0xb5 2 0 0
+ 0xb6 2 0 0
+ 0xb7 2 0 0>;
+};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
index fdedf7b1fe0f..71c30eb10056 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -53,6 +53,16 @@ timer@41100 {
3 0 3 0>;
};
+message@41400 {
+ compatible = "fsl,mpic-v3.1-msgr";
+ reg = <0x41400 0x200>;
+ interrupts = <
+ 0xb0 2 0 0
+ 0xb1 2 0 0
+ 0xb2 2 0 0
+ 0xb3 2 0 0>;
+};
+
msi@41600 {
compatible = "fsl,mpic-msi";
reg = <0x41600 0x80>;
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
index 0db9ba0423ff..c09598b31de1 100644
--- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
+++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig
@@ -100,6 +100,7 @@ CONFIG_SND_MIXER_OSS=y
CONFIG_SND_PCM_OSS=y
# CONFIG_SND_SUPPORT_OLD_API is not set
CONFIG_SND_SOC=y
+CONFIG_SND_POWERPC_SOC=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
index f104ccde6b53..b1f9597fe312 100644
--- a/arch/powerpc/configs/chroma_defconfig
+++ b/arch/powerpc/configs/chroma_defconfig
@@ -32,7 +32,7 @@ CONFIG_RD_LZMA=y
CONFIG_INITRAMFS_COMPRESSION_GZIP=y
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
-CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 1196c34163b7..07b7f2af2dca 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -1,5 +1,4 @@
CONFIG_PPC64=y
-CONFIG_POWER4_ONLY=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index e74d3a483705..9ef2cc13e1b4 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -8,7 +8,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 2244d370f24d..02ac96b679b8 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -1,5 +1,4 @@
CONFIG_PPC64=y
-CONFIG_POWER4_ONLY=y
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d6b6df5e8743..62bb723c5b54 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -141,6 +141,7 @@ CONFIG_SND_INTEL8X0=y
# CONFIG_SND_PPC is not set
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
+CONFIG_SND_POWERPC_SOC=y
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 5b0e2926becd..d1828427ae55 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -143,6 +143,7 @@ CONFIG_SND_INTEL8X0=y
# CONFIG_SND_PPC is not set
# CONFIG_SND_USB is not set
CONFIG_SND_SOC=y
+CONFIG_SND_POWERPC_SOC=y
CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
CONFIG_HID_BELKIN=y
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index f4deb0b78cf0..840a2c2d0430 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -1,5 +1,4 @@
CONFIG_PPC64=y
-CONFIG_POWER4_ONLY=y
CONFIG_ALTIVEC=y
# CONFIG_VIRT_CPU_ACCOUNTING is not set
CONFIG_SMP=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index ded867871e97..c2f4b4a86ece 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -6,7 +6,6 @@ CONFIG_NR_CPUS=2
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
-CONFIG_SPARSE_IRQ=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
@@ -25,7 +24,6 @@ CONFIG_PS3_DISK=y
CONFIG_PS3_ROM=y
CONFIG_PS3_FLASH=y
CONFIG_PS3_VRAM=m
-CONFIG_PS3_LPM=m
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_HIGH_RES_TIMERS=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -53,8 +51,6 @@ CONFIG_IP_PNP_DHCP=y
# CONFIG_INET_DIAG is not set
CONFIG_IPV6=y
CONFIG_BT=m
-CONFIG_BT_L2CAP=y
-CONFIG_BT_SCO=y
CONFIG_BT_RFCOMM=m
CONFIG_BT_RFCOMM_TTY=y
CONFIG_BT_BNEP=m
@@ -63,7 +59,6 @@ CONFIG_BT_BNEP_PROTO_FILTER=y
CONFIG_BT_HIDP=m
CONFIG_BT_HCIBTUSB=m
CONFIG_CFG80211=m
-# CONFIG_WIRELESS_EXT_SYSFS is not set
CONFIG_MAC80211=m
CONFIG_MAC80211_RC_PID=y
# CONFIG_MAC80211_RC_MINSTREL is not set
@@ -181,7 +176,6 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_WRITECOUNT=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_DEBUG_LIST=y
-CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_FTRACE is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_CRYPTO_CCM=m
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index 175295fbf4f3..1e2b7d062aa4 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -9,7 +9,7 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_ELF_CORE is not set
-CONFIG_PERF_COUNTERS=y
+CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
CONFIG_MODULES=y
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index decad950f11a..5d7fbe1950f9 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -29,18 +29,9 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
+#define PPC_MTOCRF(FXM, RS) MTOCRF((FXM), (RS))
#define PPC_LR_STKOFF 16
#define PPC_MIN_STKFRM 112
-
-/* Move to CR, single-entry optimized version. Only available
- * on POWER4 and later.
- */
-#ifdef CONFIG_POWER4_ONLY
-#define PPC_MTOCRF stringify_in_c(mtocrf)
-#else
-#define PPC_MTOCRF stringify_in_c(mtcrf)
-#endif
-
#else /* 32-bit */
/* operations for longs and pointers */
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index ce516e5eb0d3..ac3eedb9b74a 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -9,7 +9,7 @@
* Note: This implementation is limited to a power of 2 number of
* threads per core and the same number for each core in the system
* (though it would work if some processors had less threads as long
- * as the CPU numbers are still allocated, just not brought offline).
+ * as the CPU numbers are still allocated, just not brought online).
*
* However, the API allows for a different implementation in the future
* if needed, as long as you only use the functions and not the variables
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 548da3aa0a30..d58fc4e4149c 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -288,13 +288,6 @@ label##_hv: \
/* Exception addition: Hard disable interrupts */
#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
-/* Exception addition: Keep interrupt state */
-#define ENABLE_INTS \
- ld r11,PACAKMSR(r13); \
- ld r12,_MSR(r1); \
- rlwimi r11,r12,0,MSR_EE; \
- mtmsrd r11,1
-
#define ADD_NVGPRS \
bl .save_nvgprs
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h
index 38762edb5e58..b3799d88ffcf 100644
--- a/arch/powerpc/include/asm/gpio.h
+++ b/arch/powerpc/include/asm/gpio.h
@@ -1,53 +1,4 @@
-/*
- * Generic GPIO API implementation for PowerPC.
- *
- * Copyright (c) 2007-2008 MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __ASM_POWERPC_GPIO_H
-#define __ASM_POWERPC_GPIO_H
-
-#include <linux/errno.h>
-#include <asm-generic/gpio.h>
-
-#ifdef CONFIG_GPIOLIB
-
-/*
- * We don't (yet) implement inlined/rapid versions for on-chip gpios.
- * Just call gpiolib.
- */
-static inline int gpio_get_value(unsigned int gpio)
-{
- return __gpio_get_value(gpio);
-}
-
-static inline void gpio_set_value(unsigned int gpio, int value)
-{
- __gpio_set_value(gpio, value);
-}
-
-static inline int gpio_cansleep(unsigned int gpio)
-{
- return __gpio_cansleep(gpio);
-}
-
-static inline int gpio_to_irq(unsigned int gpio)
-{
- return __gpio_to_irq(gpio);
-}
-
-static inline int irq_to_gpio(unsigned int irq)
-{
- return -EINVAL;
-}
-
-#endif /* CONFIG_GPIOLIB */
-
-#endif /* __ASM_POWERPC_GPIO_H */
+#ifndef __LINUX_GPIO_H
+#warning Include linux/gpio.h instead of asm/gpio.h
+#include <linux/gpio.h>
+#endif
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 1c324ff55ea8..612252388190 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -77,8 +77,27 @@
#define H_MR_CONDITION -43
#define H_NOT_ENOUGH_RESOURCES -44
#define H_R_STATE -45
-#define H_RESCINDEND -46
-#define H_MULTI_THREADS_ACTIVE -9005
+#define H_RESCINDED -46
+#define H_P2 -55
+#define H_P3 -56
+#define H_P4 -57
+#define H_P5 -58
+#define H_P6 -59
+#define H_P7 -60
+#define H_P8 -61
+#define H_P9 -62
+#define H_TOO_BIG -64
+#define H_OVERLAP -68
+#define H_INTERRUPT -69
+#define H_BAD_DATA -70
+#define H_NOT_ACTIVE -71
+#define H_SG_LIST -72
+#define H_OP_MODE -73
+#define H_COP_HW -74
+#define H_UNSUPPORTED_FLAG_START -256
+#define H_UNSUPPORTED_FLAG_END -511
+#define H_MULTI_THREADS_ACTIVE -9005
+#define H_OUTSTANDING_COP_OPS -9006
/* Long Busy is a condition that can be returned by the firmware
@@ -240,6 +259,8 @@
#define H_GET_MPP 0x2D4
#define H_HOME_NODE_ASSOCIATIVITY 0x2EC
#define H_BEST_ENERGY 0x2F4
+#define H_RANDOM 0x300
+#define H_COP 0x304
#define H_GET_MPP_X 0x314
#define MAX_HCALL_OPCODE H_GET_MPP_X
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index e648af92ced1..0e40843a1c6e 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -18,10 +18,6 @@
#include <linux/atomic.h>
-/* Define a way to iterate across irqs. */
-#define for_each_irq(i) \
- for ((i) = 0; (i) < NR_IRQS; ++(i))
-
extern atomic_t ppc_n_lost_interrupts;
/* This number is used when no interrupt has been assigned */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index aa795ccef294..fd07f43d6622 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -81,12 +81,13 @@ struct kvmppc_vcpu_book3s {
u64 sdr1;
u64 hior;
u64 msr_mask;
- u64 vsid_next;
#ifdef CONFIG_PPC_BOOK3S_32
u32 vsid_pool[VSID_POOL_SIZE];
+ u32 vsid_next;
#else
- u64 vsid_first;
- u64 vsid_max;
+ u64 proto_vsid_first;
+ u64 proto_vsid_max;
+ u64 proto_vsid_next;
#endif
int context_id[SID_CONTEXTS];
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index a76254af0aaa..531fe0c3108f 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -20,18 +20,16 @@
#define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__
-/* These definitions relate to hypervisors that only exist when using
+/*
+ * These definitions relate to hypervisors that only exist when using
* a server type processor
*/
#ifdef CONFIG_PPC_BOOK3S
-//=============================================================================
-//
-// This control block contains the data that is shared between the
-// hypervisor (PLIC) and the OS.
-//
-//
-//----------------------------------------------------------------------------
+/*
+ * This control block contains the data that is shared between the
+ * hypervisor and the OS.
+ */
#include <linux/cache.h>
#include <linux/threads.h>
#include <asm/types.h>
@@ -43,123 +41,65 @@
*/
#define NR_LPPACAS 1
-
-/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
- * alignment is sufficient to prevent this */
+/*
+ * The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
+ * alignment is sufficient to prevent this
+ */
struct lppaca {
-//=============================================================================
-// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
-// NOTE: The xDynXyz fields are fields that will be dynamically changed by
-// PLIC when preparing to bring a processor online or when dispatching a
-// virtual processor!
-//=============================================================================
- u32 desc; // Eye catcher 0xD397D781 x00-x03
- u16 size; // Size of this struct x04-x05
- u16 reserved1; // Reserved x06-x07
- u16 reserved2:14; // Reserved x08-x09
- u8 shared_proc:1; // Shared processor indicator ...
- u8 secondary_thread:1; // Secondary thread indicator ...
- volatile u8 dyn_proc_status:8; // Dynamic Status of this proc x0A-x0A
- u8 secondary_thread_count; // Secondary thread count x0B-x0B
- volatile u16 dyn_hv_phys_proc_index;// Dynamic HV Physical Proc Index0C-x0D
- volatile u16 dyn_hv_log_proc_index;// Dynamic HV Logical Proc Indexx0E-x0F
- u32 decr_val; // Value for Decr programming x10-x13
- u32 pmc_val; // Value for PMC regs x14-x17
- volatile u32 dyn_hw_node_id; // Dynamic Hardware Node id x18-x1B
- volatile u32 dyn_hw_proc_id; // Dynamic Hardware Proc Id x1C-x1F
- volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23
- u32 dsei_data; // DSEI data x24-x27
- u64 sprg3; // SPRG3 value x28-x2F
- u8 reserved3[40]; // Reserved x30-x57
- volatile u8 vphn_assoc_counts[8]; // Virtual processor home node
- // associativity change counters x58-x5F
- u8 reserved4[32]; // Reserved x60-x7F
-
-//=============================================================================
-// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
-//=============================================================================
- // This Dword contains a byte for each type of interrupt that can occur.
- // The IPI is a count while the others are just a binary 1 or 0.
- union {
- u64 any_int;
- struct {
- u16 reserved; // Reserved - cleared by #mpasmbl
- u8 xirr_int; // Indicates xXirrValue is valid or Immed IO
- u8 ipi_cnt; // IPI Count
- u8 decr_int; // DECR interrupt occurred
- u8 pdc_int; // PDC interrupt occurred
- u8 quantum_int; // Interrupt quantum reached
- u8 old_plic_deferred_ext_int; // Old PLIC has a deferred XIRR pending
- } fields;
- } int_dword;
-
- // Whenever any fields in this Dword are set then PLIC will defer the
- // processing of external interrupts. Note that PLIC will store the
- // XIRR directly into the xXirrValue field so that another XIRR will
- // not be presented until this one clears. The layout of the low
- // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the
- // entire Dword is zero or not. A non-zero value in the low order
- // 2-bytes will result in SLIC being granted the highest thread
- // priority upon return. A 0 will return to SLIC as medium priority.
- u64 plic_defer_ints_area; // Entire Dword
-
- // Used to pass the real SRR0/1 from PLIC to SLIC as well as to
- // pass the target SRR0/1 from SLIC to PLIC on a SetAsrAndRfid.
- u64 saved_srr0; // Saved SRR0 x10-x17
- u64 saved_srr1; // Saved SRR1 x18-x1F
-
- // Used to pass parms from the OS to PLIC for SetAsrAndRfid
- u64 saved_gpr3; // Saved GPR3 x20-x27
- u64 saved_gpr4; // Saved GPR4 x28-x2F
- union {
- u64 saved_gpr5; /* Saved GPR5 x30-x37 */
- struct {
- u8 cede_latency_hint; /* x30 */
- u8 reserved[7]; /* x31-x36 */
- } fields;
- } gpr5_dword;
-
-
- u8 dtl_enable_mask; // Dispatch Trace Log mask x38-x38
- u8 donate_dedicated_cpu; // Donate dedicated CPU cycles x39-x39
- u8 fpregs_in_use; // FP regs in use x3A-x3A
- u8 pmcregs_in_use; // PMC regs in use x3B-x3B
- volatile u32 saved_decr; // Saved Decr Value x3C-x3F
- volatile u64 emulated_time_base;// Emulated TB for this thread x40-x47
- volatile u64 cur_plic_latency; // Unaccounted PLIC latency x48-x4F
- u64 tot_plic_latency; // Accumulated PLIC latency x50-x57
- u64 wait_state_cycles; // Wait cycles for this proc x58-x5F
- u64 end_of_quantum; // TB at end of quantum x60-x67
- u64 pdc_saved_sprg1; // Saved SPRG1 for PMC int x68-x6F
- u64 pdc_saved_srr0; // Saved SRR0 for PMC int x70-x77
- volatile u32 virtual_decr; // Virtual DECR for shared procsx78-x7B
- u16 slb_count; // # of SLBs to maintain x7C-x7D
- u8 idle; // Indicate OS is idle x7E
- u8 vmxregs_in_use; // VMX registers in use x7F
-
-
-//=============================================================================
-// CACHE_LINE_3 0x0100 - 0x017F: This line is shared with other processors
-//=============================================================================
- // This is the yield_count. An "odd" value (low bit on) means that
- // the processor is yielded (either because of an OS yield or a PLIC
- // preempt). An even value implies that the processor is currently
- // executing.
- // NOTE: This value will ALWAYS be zero for dedicated processors and
- // will NEVER be zero for shared processors (ie, initialized to a 1).
- volatile u32 yield_count; // PLIC increments each dispatchx00-x03
- volatile u32 dispersion_count; // dispatch changed phys cpu x04-x07
- volatile u64 cmo_faults; // CMO page fault count x08-x0F
- volatile u64 cmo_fault_time; // CMO page fault time x10-x17
- u8 reserved7[104]; // Reserved x18-x7F
-
-//=============================================================================
-// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
-//=============================================================================
- u32 page_ins; // CMO Hint - # page ins by OS x00-x03
- u8 reserved8[148]; // Reserved x04-x97
- volatile u64 dtl_idx; // Dispatch Trace Log head idx x98-x9F
- u8 reserved9[96]; // Reserved xA0-xFF
+ /* cacheline 1 contains read-only data */
+
+ u32 desc; /* Eye catcher 0xD397D781 */
+ u16 size; /* Size of this struct */
+ u16 reserved1;
+ u16 reserved2:14;
+ u8 shared_proc:1; /* Shared processor indicator */
+ u8 secondary_thread:1; /* Secondary thread indicator */
+ u8 reserved3[14];
+ volatile u32 dyn_hw_node_id; /* Dynamic hardware node id */
+ volatile u32 dyn_hw_proc_id; /* Dynamic hardware proc id */
+ u8 reserved4[56];
+ volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
+ /* associativity change counters */
+ u8 reserved5[32];
+
+ /* cacheline 2 contains local read-write data */
+
+ u8 reserved6[48];
+ u8 cede_latency_hint;
+ u8 reserved7[7];
+ u8 dtl_enable_mask; /* Dispatch Trace Log mask */
+ u8 donate_dedicated_cpu; /* Donate dedicated CPU cycles */
+ u8 fpregs_in_use;
+ u8 pmcregs_in_use;
+ u8 reserved8[28];
+ u64 wait_state_cycles; /* Wait cycles for this proc */
+ u8 reserved9[28];
+ u16 slb_count; /* # of SLBs to maintain */
+ u8 idle; /* Indicate OS is idle */
+ u8 vmxregs_in_use;
+
+ /* cacheline 3 is shared with other processors */
+
+ /*
+ * This is the yield_count. An "odd" value (low bit on) means that
+ * the processor is yielded (either because of an OS yield or a
+ * hypervisor preempt). An even value implies that the processor is
+ * currently executing.
+ * NOTE: This value will ALWAYS be zero for dedicated processors and
+ * will NEVER be zero for shared processors (ie, initialized to a 1).
+ */
+ volatile u32 yield_count;
+ volatile u32 dispersion_count; /* dispatch changed physical cpu */
+ volatile u64 cmo_faults; /* CMO page fault count */
+ volatile u64 cmo_fault_time; /* CMO page fault time */
+ u8 reserved10[104];
+
+ /* cacheline 4-5 */
+
+ u32 page_ins; /* CMO Hint - # page ins by OS */
+ u8 reserved11[148];
+ volatile u64 dtl_idx; /* Dispatch Trace Log head index */
+ u8 reserved12[96];
} __attribute__((__aligned__(0x400)));
extern struct lppaca lppaca[];
@@ -172,13 +112,13 @@ extern struct lppaca lppaca[];
* ESID is stored in the lower 64bits, then the VSID.
*/
struct slb_shadow {
- u32 persistent; // Number of persistent SLBs x00-x03
- u32 buffer_length; // Total shadow buffer length x04-x07
- u64 reserved; // Alignment x08-x0f
+ u32 persistent; /* Number of persistent SLBs */
+ u32 buffer_length; /* Total shadow buffer length */
+ u64 reserved;
struct {
u64 esid;
u64 vsid;
- } save_area[SLB_NUM_BOLTED]; // x10-x40
+ } save_area[SLB_NUM_BOLTED];
} ____cacheline_aligned;
extern struct slb_shadow slb_shadow[];
diff --git a/arch/powerpc/include/asm/lv1call.h b/arch/powerpc/include/asm/lv1call.h
index 233f9ecae761..f5117674bf92 100644
--- a/arch/powerpc/include/asm/lv1call.h
+++ b/arch/powerpc/include/asm/lv1call.h
@@ -265,8 +265,8 @@ LV1_CALL(get_spe_irq_outlet, 2, 1, 78 )
LV1_CALL(set_spe_privilege_state_area_1_register, 3, 0, 79 )
LV1_CALL(create_repository_node, 6, 0, 90 )
LV1_CALL(read_repository_node, 5, 2, 91 )
-LV1_CALL(modify_repository_node_value, 6, 0, 92 )
-LV1_CALL(remove_repository_node, 4, 0, 93 )
+LV1_CALL(write_repository_node, 6, 0, 92 )
+LV1_CALL(delete_repository_node, 4, 0, 93 )
LV1_CALL(read_htab_entries, 2, 5, 95 )
LV1_CALL(set_dabr, 2, 0, 96 )
LV1_CALL(get_total_execution_time, 2, 1, 103 )
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index c65b9294376e..c9f698a994be 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -275,9 +275,6 @@ struct mpic
unsigned int isu_mask;
/* Number of sources */
unsigned int num_sources;
- /* default senses array */
- unsigned char *senses;
- unsigned int senses_count;
/* vector numbers used for internal sources (ipi/timers) */
unsigned int ipi_vecs[4];
@@ -415,21 +412,6 @@ extern struct mpic *mpic_alloc(struct device_node *node,
extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
phys_addr_t phys_addr);
-/* Set default sense codes
- *
- * @mpic: controller
- * @senses: array of sense codes
- * @count: size of above array
- *
- * Optionally provide an array (indexed on hardware interrupt numbers
- * for this MPIC) of default sense codes for the chip. Those are linux
- * sense codes IRQ_TYPE_*
- *
- * The driver gets ownership of the pointer, don't dispose of it or
- * anything like that. __init only.
- */
-extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count);
-
/* Initialize the controller. After this has been called, none of the above
* should be called again for this mpic
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
index 3ec37dc9003e..326d33ca55cd 100644
--- a/arch/powerpc/include/asm/mpic_msgr.h
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <asm/smp.h>
struct mpic_msgr {
u32 __iomem *base;
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h
index 23cd6cc30bcf..c07edfe98b98 100644
--- a/arch/powerpc/include/asm/pSeries_reconfig.h
+++ b/arch/powerpc/include/asm/pSeries_reconfig.h
@@ -13,6 +13,18 @@
#define PSERIES_RECONFIG_REMOVE 0x0002
#define PSERIES_DRCONF_MEM_ADD 0x0003
#define PSERIES_DRCONF_MEM_REMOVE 0x0004
+#define PSERIES_UPDATE_PROPERTY 0x0005
+
+/**
+ * pSeries_reconfig_notify - Notifier value structure for OFDT property updates
+ *
+ * @node: Device tree node which owns the property being updated
+ * @property: Updated property
+ */
+struct pSeries_reconfig_prop_update {
+ struct device_node *node;
+ struct property *property;
+};
#ifdef CONFIG_PPC_PSERIES
extern int pSeries_reconfig_notifier_register(struct notifier_block *);
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 50f73aa2ba21..15444204a3a1 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -369,7 +369,15 @@ BEGIN_FTR_SECTION \
END_FTR_SECTION_IFCLR(CPU_FTR_601)
#endif
-
+#ifdef CONFIG_PPC64
+#define MTOCRF(FXM, RS) \
+ BEGIN_FTR_SECTION_NESTED(848); \
+ mtcrf (FXM), (RS); \
+ FTR_SECTION_ELSE_NESTED(848); \
+ mtocrf (FXM), (RS); \
+ ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
+#endif
+
/*
* This instruction is not implemented on the PPC 603 or 601; however, on
* the 403GCX and 405GP tlbia IS defined and tlbie is not.
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8e2d0371fe1e..55e85631c42e 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -74,9 +74,6 @@ struct task_struct;
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-extern void prepare_to_copy(struct task_struct *tsk);
-
/* Create a new kernel thread. */
extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
@@ -386,7 +383,6 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
-void cpu_idle_wait(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 84cc7840cd18..9c21ed42aba6 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -354,12 +354,6 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
#define PTRACE_GETREGS64 22
#define PTRACE_SETREGS64 23
-/* (old) PTRACE requests with inverted arguments */
-#define PPC_PTRACE_GETREGS 0x99 /* Get GPRs 0 - 31 */
-#define PPC_PTRACE_SETREGS 0x98 /* Set GPRs 0 - 31 */
-#define PPC_PTRACE_GETFPREGS 0x97 /* Get FPRs 0 - 31 */
-#define PPC_PTRACE_SETFPREGS 0x96 /* Set FPRs 0 - 31 */
-
/* Calls to trace a 64bit program from a 32bit program */
#define PPC_PTRACE_PEEKTEXT_3264 0x95
#define PPC_PTRACE_PEEKDATA_3264 0x94
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index b86faa9107da..8a97aa7289d3 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -15,11 +15,6 @@
#ifndef __ASM_POWERPC_REG_BOOKE_H__
#define __ASM_POWERPC_REG_BOOKE_H__
-#ifdef CONFIG_BOOKE_WDT
-extern u32 booke_wdt_enabled;
-extern u32 booke_wdt_period;
-#endif /* CONFIG_BOOKE_WDT */
-
/* Machine State Register (MSR) Fields */
#define MSR_GS (1<<28) /* Guest state */
#define MSR_UCLE (1<<26) /* User-mode cache lock enable */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index caf82d0a00de..1a6320290d26 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -21,7 +21,6 @@ extern void disable_kernel_fp(void);
extern void enable_kernel_fp(void);
extern void flush_fp_to_thread(struct task_struct *);
extern void enable_kernel_altivec(void);
-extern void giveup_altivec(struct task_struct *);
extern void load_up_altivec(struct task_struct *);
extern int emulate_altivec(struct pt_regs *);
extern void __giveup_vsx(struct task_struct *);
@@ -40,10 +39,15 @@ static inline void discard_lazy_cpu_state(void)
#ifdef CONFIG_ALTIVEC
extern void flush_altivec_to_thread(struct task_struct *);
+extern void giveup_altivec(struct task_struct *);
+extern void giveup_altivec_notask(void);
#else
static inline void flush_altivec_to_thread(struct task_struct *t)
{
}
+static inline void giveup_altivec(struct task_struct *t)
+{
+}
#endif
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 4a741c7efd02..a556ccc16b58 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -62,21 +62,8 @@ struct thread_info {
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
-/* thread information allocation */
-
-#if THREAD_SHIFT >= PAGE_SHIFT
-
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
-#else /* THREAD_SHIFT < PAGE_SHIFT */
-
-#define __HAVE_ARCH_THREAD_INFO_ALLOCATOR
-
-extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node);
-extern void free_thread_info(struct thread_info *ti);
-
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
@@ -126,7 +113,6 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_NOERROR (1<<TIF_NOERROR)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
-#define _TIF_RUNLATCH (1<<TIF_RUNLATCH)
#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index c97185885c6d..852ed1b384f6 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -18,12 +18,6 @@ struct device_node;
*/
#define RECLAIM_DISTANCE 10
-/*
- * Avoid creating an extra level of balancing (SD_ALLNODES) on the largest
- * POWER7 boxes which have a maximum of 32 nodes.
- */
-#define SD_NODES_PER_DOMAIN 32
-
#include <asm/mmzone.h>
static inline int cpu_to_node(int cpu)
@@ -51,36 +45,6 @@ static inline int pcibus_to_node(struct pci_bus *bus)
cpu_all_mask : \
cpumask_of_node(pcibus_to_node(bus)))
-/* sched_domains SD_NODE_INIT for PPC64 machines */
-#define SD_NODE_INIT (struct sched_domain) { \
- .min_interval = 8, \
- .max_interval = 32, \
- .busy_factor = 32, \
- .imbalance_pct = 125, \
- .cache_nice_tries = 1, \
- .busy_idx = 3, \
- .idle_idx = 1, \
- .newidle_idx = 0, \
- .wake_idx = 0, \
- .forkexec_idx = 0, \
- \
- .flags = 1*SD_LOAD_BALANCE \
- | 0*SD_BALANCE_NEWIDLE \
- | 1*SD_BALANCE_EXEC \
- | 1*SD_BALANCE_FORK \
- | 0*SD_BALANCE_WAKE \
- | 1*SD_WAKE_AFFINE \
- | 0*SD_PREFER_LOCAL \
- | 0*SD_SHARE_CPUPOWER \
- | 0*SD_POWERSAVINGS_BALANCE \
- | 0*SD_SHARE_PKG_RESOURCES \
- | 1*SD_SERIALIZE \
- | 0*SD_PREFER_SIBLING \
- , \
- .last_balance = jiffies, \
- .balance_interval = 1, \
-}
-
extern int __node_distance(int, int);
#define node_distance(a, b) __node_distance(a, b)
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 6bfd5ffe1d4f..b19adf751dd9 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -46,6 +46,48 @@
struct iommu_table;
+/*
+ * Platform Facilities Option (PFO)-specific data
+ */
+
+/* Starting unit address for PFO devices on the VIO BUS */
+#define VIO_BASE_PFO_UA 0x50000000
+
+/**
+ * vio_pfo_op - PFO operation parameters
+ *
+ * @flags: h_call subfunctions and modifiers
+ * @in: Input data block logical real address
+ * @inlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @out: Output data block logical real address
+ * @outlen: If non-negative, the length of the input data block. If negative,
+ * the length of the input data descriptor list in bytes.
+ * @csbcpb: Logical real address of the 4k naturally-aligned storage block
+ * containing the CSB & optional FC field specific CPB
+ * @timeout: # of milliseconds to retry h_call, 0 for no timeout.
+ * @hcall_err: pointer to return the h_call return value, else NULL
+ */
+struct vio_pfo_op {
+ u64 flags;
+ s64 in;
+ s64 inlen;
+ s64 out;
+ s64 outlen;
+ u64 csbcpb;
+ void *done;
+ unsigned long handle;
+ unsigned int timeout;
+ long hcall_err;
+};
+
+/* End PFO specific data */
+
+enum vio_dev_family {
+ VDEVICE, /* The OF node is a child of /vdevice */
+ PFO, /* The OF node is a child of /ibm,platform-facilities */
+};
+
/**
* vio_dev - This structure is used to describe virtual I/O devices.
*
@@ -58,6 +100,7 @@ struct vio_dev {
const char *name;
const char *type;
uint32_t unit_address;
+ uint32_t resource_id;
unsigned int irq;
struct {
size_t desired;
@@ -65,6 +108,7 @@ struct vio_dev {
size_t allocated;
atomic_t allocs_failed;
} cmo;
+ enum vio_dev_family family;
struct device dev;
};
@@ -95,6 +139,8 @@ extern void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired);
extern void __devinit vio_unregister_device(struct vio_dev *dev);
+extern int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op);
+
struct device_node;
extern struct vio_dev *vio_register_device_node(
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index f5808a35688c..83afacd3ba7b 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -28,7 +28,7 @@ endif
obj-y := cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
- init_task.o process.o systbl.o idle.o \
+ process.o systbl.o idle.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 34b8afe94a50..4554dc2fe857 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -188,10 +188,6 @@ int main(void)
DEFINE(SLBSHADOW_STACKESID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
- DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
- DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
- DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
- DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
DEFINE(LPPACA_YIELDCOUNT, offsetof(struct lppaca, yield_count));
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index f8a7a1a1a9f4..ed1718feb9d9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -63,15 +63,9 @@ system_call_common:
std r0,GPR0(r1)
std r10,GPR1(r1)
ACCOUNT_CPU_USER_ENTRY(r10, r11)
- /*
- * This "crclr so" clears CR0.SO, which is the error indication on
- * return from this system call. There must be no cmp instruction
- * between it and the "mfcr r9" below, otherwise if XER.SO is set,
- * CR0.SO will get set, causing all system calls to appear to fail.
- */
- crclr so
std r2,GPR2(r1)
std r3,GPR3(r1)
+ mfcr r2
std r4,GPR4(r1)
std r5,GPR5(r1)
std r6,GPR6(r1)
@@ -82,18 +76,20 @@ system_call_common:
std r11,GPR10(r1)
std r11,GPR11(r1)
std r11,GPR12(r1)
+ std r11,_XER(r1)
+ std r11,_CTR(r1)
std r9,GPR13(r1)
- mfcr r9
mflr r10
+ /*
+ * This clears CR0.SO (bit 28), which is the error indication on
+ * return from this system call.
+ */
+ rldimi r2,r11,28,(63-28)
li r11,0xc01
- std r9,_CCR(r1)
std r10,_LINK(r1)
std r11,_TRAP(r1)
- mfxer r9
- mfctr r10
- std r9,_XER(r1)
- std r10,_CTR(r1)
std r3,ORIG_GPR3(r1)
+ std r2,_CCR(r1)
ld r2,PACATOC(r13)
addi r9,r1,STACK_FRAME_OVERHEAD
ld r11,exception_marker@toc(r2)
@@ -154,7 +150,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
ld r10,TI_FLAGS(r11)
andi. r11,r10,_TIF_SYSCALL_T_OR_A
bne- syscall_dotrace
-syscall_dotrace_cont:
+.Lsyscall_dotrace_cont:
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -211,7 +207,7 @@ syscall_exit:
cmpld r3,r11
ld r5,_CCR(r1)
bge- syscall_error
-syscall_error_cont:
+.Lsyscall_error_cont:
ld r7,_NIP(r1)
BEGIN_FTR_SECTION
stdcx. r0,0,r1 /* to clear the reservation */
@@ -246,7 +242,7 @@ syscall_error:
oris r5,r5,0x1000 /* Set SO bit in CR */
neg r3,r3
std r5,_CCR(r1)
- b syscall_error_cont
+ b .Lsyscall_error_cont
/* Traced system call support */
syscall_dotrace:
@@ -268,7 +264,7 @@ syscall_dotrace:
addi r9,r1,STACK_FRAME_OVERHEAD
clrrdi r10,r1,THREAD_SHIFT
ld r10,TI_FLAGS(r10)
- b syscall_dotrace_cont
+ b .Lsyscall_dotrace_cont
syscall_enosys:
li r3,-ENOSYS
@@ -588,23 +584,19 @@ _GLOBAL(ret_from_except_lite)
fast_exc_return_irq:
restore:
/*
- * This is the main kernel exit path, we first check if we
- * have to change our interrupt state.
+ * This is the main kernel exit path. First we check if we
+ * are about to re-enable interrupts
*/
ld r5,SOFTE(r1)
lbz r6,PACASOFTIRQEN(r13)
- cmpwi cr1,r5,0
- cmpw cr0,r5,r6
- beq cr0,4f
+ cmpwi cr0,r5,0
+ beq restore_irq_off
- /* We do, handle disable first, which is easy */
- bne cr1,3f;
- li r0,0
- stb r0,PACASOFTIRQEN(r13);
- TRACE_DISABLE_INTS
- b 4f
+ /* We are enabling, were we already enabled ? Yes, just return */
+ cmpwi cr0,r6,1
+ beq cr0,do_restore
-3: /*
+ /*
* We are about to soft-enable interrupts (we are hard disabled
* at this point). We check if there's anything that needs to
* be replayed first.
@@ -626,7 +618,7 @@ restore_no_replay:
/*
* Final return path. BookE is handled in a different file
*/
-4:
+do_restore:
#ifdef CONFIG_PPC_BOOK3E
b .exception_return_book3e
#else
@@ -700,6 +692,25 @@ fast_exception_return:
#endif /* CONFIG_PPC_BOOK3E */
/*
+ * We are returning to a context with interrupts soft disabled.
+ *
+ * However, we may also about to hard enable, so we need to
+ * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
+ * or that bit can get out of sync and bad things will happen
+ */
+restore_irq_off:
+ ld r3,_MSR(r1)
+ lbz r7,PACAIRQHAPPENED(r13)
+ andi. r0,r3,MSR_EE
+ beq 1f
+ rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS
+ stb r7,PACAIRQHAPPENED(r13)
+1: li r0,0
+ stb r0,PACASOFTIRQEN(r13);
+ TRACE_DISABLE_INTS
+ b do_restore
+
+ /*
* Something did happen, check if a re-emit is needed
* (this also clears paca->irq_happened)
*/
@@ -748,6 +759,9 @@ restore_check_irq_replay:
#endif /* CONFIG_PPC_BOOK3E */
1: b .ret_from_except /* What else to do here ? */
+
+
+3:
do_work:
#ifdef CONFIG_PREEMPT
andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -767,16 +781,6 @@ do_work:
SOFT_DISABLE_INTS(r3,r4)
1: bl .preempt_schedule_irq
- /* Hard-disable interrupts again (and update PACA) */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 0
-#else
- ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
- li r0,PACA_IRQ_HARD_DIS
- stb r0,PACAIRQHAPPENED(r13)
-
/* Re-test flags and eventually loop */
clrrdi r9,r1,THREAD_SHIFT
ld r4,TI_FLAGS(r9)
@@ -787,14 +791,6 @@ do_work:
user_work:
#endif /* CONFIG_PREEMPT */
- /* Enable interrupts */
-#ifdef CONFIG_PPC_BOOK3E
- wrteei 1
-#else
- ori r10,r10,MSR_EE
- mtmsrd r10,1
-#endif /* CONFIG_PPC_BOOK3E */
-
andi. r0,r4,_TIF_NEED_RESCHED
beq 1f
bl .restore_interrupts
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index cb705fdbb458..f7bed44ee165 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -94,12 +94,10 @@ machine_check_pSeries_1:
data_access_pSeries:
HMT_MEDIUM
SET_SCRATCH0(r13)
-#ifndef CONFIG_POWER4_ONLY
BEGIN_FTR_SECTION
b data_access_check_stab
data_access_not_stab:
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
-#endif
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
KVMTEST, 0x300)
@@ -301,7 +299,6 @@ machine_check_fwnmi:
EXC_STD, KVMTEST, 0x200)
KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
-#ifndef CONFIG_POWER4_ONLY
/* moved from 0x300 */
data_access_check_stab:
GET_PACA(r13)
@@ -328,7 +325,6 @@ do_stab_bolted_pSeries:
GET_SCRATCH0(r10)
std r10,PACA_EXSLB+EX_R13(r13)
EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
-#endif /* CONFIG_POWER4_ONLY */
KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
@@ -768,8 +764,8 @@ alignment_common:
std r3,_DAR(r1)
std r4,_DSISR(r1)
bl .save_nvgprs
+ DISABLE_INTS
addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
bl .alignment_exception
b .ret_from_except
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 7dd2981bcc50..22d608e8bb7d 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -778,14 +778,6 @@ _GLOBAL(__fixup_440A_mcheck)
blr
/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The 44x core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
- blr
-
-/*
* extern void giveup_fpu(struct task_struct *prev)
*
* The 44x core does not have an FPU.
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 28e62598d0e8..de80e0f9a2bd 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -874,14 +874,6 @@ _GLOBAL(__setup_e500mc_ivors)
sync
blr
-/*
- * extern void giveup_altivec(struct task_struct *prev)
- *
- * The e500 core does not have an AltiVec unit.
- */
-_GLOBAL(giveup_altivec)
- blr
-
#ifdef CONFIG_SPE
/*
* extern void giveup_spe(struct task_struct *prev)
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 6d2209ac0c44..2099d9a879e8 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -113,29 +113,6 @@ void cpu_idle(void)
}
}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs come out of the old
- * idle loop and start using the new idle loop.
- * Required while changing idle handler on SMP systems.
- * Caller must have changed idle handler to the new value before the call.
- * This window may be larger on shared systems.
- */
-void cpu_idle_wait(void)
-{
- int cpu;
- smp_mb();
-
- /* kick all the CPUs so that they exit out of old idle routine */
- get_online_cpus();
- for_each_online_cpu(cpu) {
- if (cpu != smp_processor_id())
- smp_send_reschedule(cpu);
- }
- put_online_cpus();
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
int powersave_nap;
#ifdef CONFIG_SYSCTL
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
deleted file mode 100644
index d076d465dbd1..000000000000
--- a/arch/powerpc/kernel/init_task.c
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/mm.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/init_task.h>
-#include <linux/fs.h>
-#include <linux/mqueue.h>
-#include <asm/uaccess.h>
-
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-/*
- * Initial thread structure.
- *
- * We need to make sure that this is 16384-byte aligned due to the
- * way process stacks are handled. This is done by having a special
- * "init_task" linker map entry..
- */
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
-
-/*
- * Initial task structure.
- *
- * All other task structs will be allocated on slabs in fork.c
- */
-struct task_struct init_task = INIT_TASK(init_task);
-
-EXPORT_SYMBOL(init_task);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 5ec1b2354ca6..7835a5e1ea5f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en)
*/
if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
__hard_irq_disable();
+#ifdef CONFIG_TRACE_IRQFLAG
+ else {
+ /*
+ * We should already be hard disabled here. We had bugs
+ * where that wasn't the case so let's dbl check it and
+ * warn if we are wrong. Only do that when IRQ tracing
+ * is enabled as mfmsr() can be costly.
+ */
+ if (WARN_ON(mfmsr() & MSR_EE))
+ __hard_irq_disable();
+ }
+#endif /* CONFIG_TRACE_IRQFLAG */
+
set_soft_enabled(0);
/*
@@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore);
* if they are currently disabled. This is typically called before
* schedule() or do_signal() when returning to userspace. We do it
* in C to avoid the burden of dealing with lockdep etc...
+ *
+ * NOTE: This is called with interrupts hard disabled but not marked
+ * as such in paca->irq_happened, so we need to resync this.
*/
void restore_interrupts(void)
{
- if (irqs_disabled())
+ if (irqs_disabled()) {
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
local_irq_enable();
+ } else
+ __hard_irq_enable();
}
#endif /* CONFIG_PPC64 */
@@ -330,14 +349,10 @@ void migrate_irqs(void)
alloc_cpumask_var(&mask, GFP_KERNEL);
- for_each_irq(irq) {
+ for_each_irq_desc(irq, desc) {
struct irq_data *data;
struct irq_chip *chip;
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
-
data = irq_desc_get_irq_data(desc);
if (irqd_is_per_cpu(data))
continue;
@@ -572,7 +587,7 @@ int irq_choose_cpu(const struct cpumask *mask)
{
int cpuid;
- if (cpumask_equal(mask, cpu_all_mask)) {
+ if (cpumask_equal(mask, cpu_online_mask)) {
static int irq_rover;
static DEFINE_RAW_SPINLOCK(irq_rover_lock);
unsigned long flags;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index c957b1202bdc..5df777794403 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -23,14 +23,11 @@
void machine_kexec_mask_interrupts(void) {
unsigned int i;
+ struct irq_desc *desc;
- for_each_irq(i) {
- struct irq_desc *desc = irq_to_desc(i);
+ for_each_irq_desc(i, desc) {
struct irq_chip *chip;
- if (!desc)
- continue;
-
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7cd07b42ca1a..386d57f66f28 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -738,8 +738,23 @@ relocate_new_kernel:
mr r5, r31
li r0, 0
-#elif defined(CONFIG_44x) && !defined(CONFIG_PPC_47x)
+#elif defined(CONFIG_44x)
+ /* Save our parameters */
+ mr r29, r3
+ mr r30, r4
+ mr r31, r5
+
+#ifdef CONFIG_PPC_47x
+ /* Check for 47x cores */
+ mfspr r3,SPRN_PVR
+ srwi r3,r3,16
+ cmplwi cr0,r3,PVR_476@h
+ beq setup_map_47x
+ cmplwi cr0,r3,PVR_476_ISS@h
+ beq setup_map_47x
+#endif /* CONFIG_PPC_47x */
+
/*
* Code for setting up 1:1 mapping for PPC440x for KEXEC
*
@@ -753,16 +768,15 @@ relocate_new_kernel:
* 5) Invalidate the tmp mapping.
*
* - Based on the kexec support code for FSL BookE
- * - Doesn't support 47x yet.
*
*/
- /* Save our parameters */
- mr r29, r3
- mr r30, r4
- mr r31, r5
- /* Load our MSR_IS and TID to MMUCR for TLB search */
- mfspr r3,SPRN_PID
+ /*
+ * Load the PID with kernel PID (0).
+ * Also load our MSR_IS and TID to MMUCR for TLB search.
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3
mfmsr r4
andi. r4,r4,MSR_IS@l
beq wmmucr
@@ -900,6 +914,179 @@ next_tlb:
li r3, 0
tlbwe r3, r24, PPC44x_TLB_PAGEID
sync
+ b ppc44x_map_done
+
+#ifdef CONFIG_PPC_47x
+
+ /* 1:1 mapping for 47x */
+
+setup_map_47x:
+
+ /*
+ * Load the kernel pid (0) to PID and also to MMUCR[TID].
+ * Also set the MSR IS->MMUCR STS
+ */
+ li r3, 0
+ mtspr SPRN_PID, r3 /* Set PID */
+ mfmsr r4 /* Get MSR */
+ andi. r4, r4, MSR_IS@l /* TS=1? */
+ beq 1f /* If not, leave STS=0 */
+ oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
+1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
+ sync
+
+ /* Find the entry we are running from */
+ bl 2f
+2: mflr r23
+ tlbsx r23, 0, r23
+ tlbre r24, r23, 0 /* TLB Word 0 */
+ tlbre r25, r23, 1 /* TLB Word 1 */
+ tlbre r26, r23, 2 /* TLB Word 2 */
+
+
+ /*
+ * Invalidates all the tlb entries by writing to 256 RPNs(r4)
+ * of 4k page size in all 4 ways (0-3 in r3).
+ * This would invalidate the entire UTLB including the one we are
+ * running from. However the shadow TLB entries would help us
+ * to continue the execution, until we flush them (rfi/isync).
+ */
+ addis r3, 0, 0x8000 /* specify the way */
+ addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
+ addi r5, 0, 0
+ b clear_utlb_entry
+
+ /* Align the loop to speed things up. from head_44x.S */
+ .align 6
+
+clear_utlb_entry:
+
+ tlbwe r4, r3, 0
+ tlbwe r5, r3, 1
+ tlbwe r5, r3, 2
+ addis r3, r3, 0x2000 /* Increment the way */
+ cmpwi r3, 0
+ bne clear_utlb_entry
+ addis r3, 0, 0x8000
+ addis r4, r4, 0x100 /* Increment the EPN */
+ cmpwi r4, 0
+ bne clear_utlb_entry
+
+ /* Create the entries in the other address space */
+ mfmsr r5
+ rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
+ xori r7, r7, 1 /* r7 = !TS */
+
+ insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
+
+ /*
+ * write out the TLB entries for the tmp mapping
+ * Use way '0' so that we could easily invalidate it later.
+ */
+ lis r3, 0x8000 /* Way '0' */
+
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Update the msr to the new TS */
+ insrwi r5, r7, 1, 26
+
+ bl 1f
+1: mflr r6
+ addi r6, r6, (2f-1b)
+
+ mtspr SPRN_SRR0, r6
+ mtspr SPRN_SRR1, r5
+ rfi
+
+ /*
+ * Now we are in the tmp address space.
+ * Create a 1:1 mapping for 0-2GiB in the original TS.
+ */
+2:
+ li r3, 0
+ li r4, 0 /* TLB Word 0 */
+ li r5, 0 /* TLB Word 1 */
+ li r6, 0
+ ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
+
+ li r8, 0 /* PageIndex */
+
+ xori r7, r7, 1 /* revert back to original TS */
+
+write_utlb:
+ rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
+ /* ERPN = 0 as we don't use memory above 2G */
+
+ mr r4, r5 /* EPN = RPN */
+ ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
+ insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
+
+ tlbwe r4, r3, 0 /* Write out the entries */
+ tlbwe r5, r3, 1
+ tlbwe r6, r3, 2
+ addi r8, r8, 1
+ cmpwi r8, 8 /* Have we completed ? */
+ bne write_utlb
+
+ /* make sure we complete the TLB write up */
+ isync
+
+ /*
+ * Prepare to jump to the 1:1 mapping.
+ * 1) Extract page size of the tmp mapping
+ * DSIZ = TLB_Word0[22:27]
+ * 2) Calculate the physical address of the address
+ * to jump to.
+ */
+ rlwinm r10, r24, 0, 22, 27
+
+ cmpwi r10, PPC47x_TLB0_4K
+ bne 0f
+ li r10, 0x1000 /* r10 = 4k */
+ bl 1f
+
+0:
+ /* Defaults to 256M */
+ lis r10, 0x1000
+
+ bl 1f
+1: mflr r4
+ addi r4, r4, (2f-1b) /* virtual address of 2f */
+
+ subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
+ not r10, r11 /* Pagemask = ~(offsetmask) */
+
+ and r5, r25, r10 /* Physical page */
+ and r6, r4, r11 /* offset within the current page */
+
+ or r5, r5, r6 /* Physical address for 2f */
+
+ /* Switch the TS in MSR to the original one */
+ mfmsr r8
+ insrwi r8, r7, 1, 26
+
+ mtspr SPRN_SRR1, r8
+ mtspr SPRN_SRR0, r5
+ rfi
+
+2:
+ /* Invalidate the tmp mapping */
+ lis r3, 0x8000 /* Way '0' */
+
+ clrrwi r24, r24, 12 /* Clear the valid bit */
+ tlbwe r24, r3, 0
+ tlbwe r25, r3, 1
+ tlbwe r26, r3, 2
+
+ /* Make sure we complete the TLB write and flush the shadow TLB */
+ isync
+
+#endif
+
+ppc44x_map_done:
+
/* Restore the parameters */
mr r3, r29
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 0bb1f98613ba..fbe1a12dc7f1 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -36,10 +36,7 @@ struct lppaca lppaca[] = {
[0 ... (NR_LPPACAS-1)] = {
.desc = 0xd397d781, /* "LpPa" */
.size = sizeof(struct lppaca),
- .dyn_proc_status = 2,
- .decr_val = 0x00ff0000,
.fpregs_in_use = 1,
- .end_of_quantum = 0xfffffffffffffffful,
.slb_count = 64,
.vmxregs_in_use = 0,
.page_ins = 0,
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4937c9690090..710f400476de 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -124,7 +124,7 @@ void enable_kernel_altivec(void)
if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
giveup_altivec(current);
else
- giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
+ giveup_altivec_notask();
#else
giveup_altivec(last_task_used_altivec);
#endif /* CONFIG_SMP */
@@ -711,18 +711,21 @@ release_thread(struct task_struct *t)
}
/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
+ * this gets called so that we can store coprocessor state into memory and
+ * copy the current task into the new thread.
*/
-void prepare_to_copy(struct task_struct *tsk)
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
- flush_fp_to_thread(current);
- flush_altivec_to_thread(current);
- flush_vsx_to_thread(current);
- flush_spe_to_thread(current);
+ flush_fp_to_thread(src);
+ flush_altivec_to_thread(src);
+ flush_vsx_to_thread(src);
+ flush_spe_to_thread(src);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
- flush_ptrace_hw_breakpoint(tsk);
+ flush_ptrace_hw_breakpoint(src);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
+ *dst = *src;
+ return 0;
}
/*
@@ -1252,37 +1255,6 @@ void __ppc64_runlatch_off(void)
}
#endif /* CONFIG_PPC64 */
-#if THREAD_SHIFT < PAGE_SHIFT
-
-static struct kmem_cache *thread_info_cache;
-
-struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
-{
- struct thread_info *ti;
-
- ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node);
- if (unlikely(ti == NULL))
- return NULL;
-#ifdef CONFIG_DEBUG_STACK_USAGE
- memset(ti, 0, THREAD_SIZE);
-#endif
- return ti;
-}
-
-void free_thread_info(struct thread_info *ti)
-{
- kmem_cache_free(thread_info_cache, ti);
-}
-
-void thread_info_cache_init(void)
-{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
- THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
-}
-
-#endif /* THREAD_SHIFT < PAGE_SHIFT */
-
unsigned long arch_align_stack(unsigned long sp)
{
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 99860273211b..1b488e5305c5 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -680,6 +680,9 @@ static void __init early_cmdline_parse(void)
#define OV3_VMX 0x40 /* VMX/Altivec */
#define OV3_DFP 0x20 /* decimal FP */
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
/* Option vector 5: PAPR/OF options supported */
#define OV5_LPAR 0x80 /* logical partitioning supported */
#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
@@ -701,6 +704,8 @@ static void __init early_cmdline_parse(void)
#define OV5_XCMO 0x00
#endif
#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
+#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
/* Option Vector 6: IBM PAPR hints */
#define OV6_LINUX 0x02 /* Linux is our OS */
@@ -744,11 +749,12 @@ static unsigned char ibm_architecture_vec[] = {
OV3_FP | OV3_VMX | OV3_DFP,
/* option vector 4: IBM PAPR implementation */
- 2 - 2, /* length */
+ 3 - 2, /* length */
0, /* don't halt */
+ OV4_MIN_ENT_CAP, /* minimum VP entitled capacity */
/* option vector 5: PAPR/OF options */
- 13 - 2, /* length */
+ 18 - 2, /* length */
0, /* don't ignore, don't halt */
OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
OV5_DONATE_DEDICATE_CPU | OV5_MSI,
@@ -762,8 +768,13 @@ static unsigned char ibm_architecture_vec[] = {
* must match by the macro below. Update the definition if
* the structure layout changes.
*/
-#define IBM_ARCH_VEC_NRCORES_OFFSET 100
+#define IBM_ARCH_VEC_NRCORES_OFFSET 101
W(NR_CPUS), /* number of cores supported */
+ 0,
+ 0,
+ 0,
+ 0,
+ OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 8d8e028893be..c10fc28b9092 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1432,40 +1432,6 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data)
#endif
}
-/*
- * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
- * we mark them as obsolete now, they will be removed in a future version
- */
-static long arch_ptrace_old(struct task_struct *child, long request,
- unsigned long addr, unsigned long data)
-{
- void __user *datavp = (void __user *) data;
-
- switch (request) {
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_GPR, 0, 32 * sizeof(long),
- datavp);
-
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_GPR, 0, 32 * sizeof(long),
- datavp);
-
- case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
- return copy_regset_to_user(child, &user_ppc_native_view,
- REGSET_FPR, 0, 32 * sizeof(double),
- datavp);
-
- case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */
- return copy_regset_from_user(child, &user_ppc_native_view,
- REGSET_FPR, 0, 32 * sizeof(double),
- datavp);
- }
-
- return -EPERM;
-}
-
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
@@ -1687,14 +1653,6 @@ long arch_ptrace(struct task_struct *child, long request,
datavp);
#endif
- /* Old reverse args ptrace callss */
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */
- case PPC_PTRACE_SETFPREGS: /* Get FPRs 0 - 31. */
- ret = arch_ptrace_old(child, request, addr, data);
- break;
-
default:
ret = ptrace_request(child, request, addr, data);
break;
@@ -1710,7 +1668,7 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
- secure_computing(regs->gpr[0]);
+ secure_computing_strict(regs->gpr[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 469349d14a97..8c21658719d9 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -39,30 +39,6 @@
* in exit.c or in signal.c.
*/
-/*
- * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls,
- * we mark them as obsolete now, they will be removed in a future version
- */
-static long compat_ptrace_old(struct task_struct *child, long request,
- long addr, long data)
-{
- switch (request) {
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- return copy_regset_to_user(child,
- task_user_regset_view(current), 0,
- 0, 32 * sizeof(compat_long_t),
- compat_ptr(data));
-
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- return copy_regset_from_user(child,
- task_user_regset_view(current), 0,
- 0, 32 * sizeof(compat_long_t),
- compat_ptr(data));
- }
-
- return -EPERM;
-}
-
/* Macros to workout the correct index for the FPR in the thread struct */
#define FPRNUMBER(i) (((i) - PT_FPR0) >> 1)
#define FPRHALF(i) (((i) - PT_FPR0) & 1)
@@ -308,8 +284,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case PTRACE_SETVSRREGS:
case PTRACE_GETREGS64:
case PTRACE_SETREGS64:
- case PPC_PTRACE_GETFPREGS:
- case PPC_PTRACE_SETFPREGS:
case PTRACE_KILL:
case PTRACE_SINGLESTEP:
case PTRACE_DETACH:
@@ -322,12 +296,6 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
ret = arch_ptrace(child, request, addr, data);
break;
- /* Old reverse args ptrace callss */
- case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */
- case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */
- ret = compat_ptrace_old(child, request, addr, data);
- break;
-
default:
ret = compat_ptrace_request(child, request, addr, data);
break;
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9825f29d1faf..ec8a53fa9e8f 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -150,6 +150,9 @@ notrace void __init machine_init(u64 dt_ptr)
}
#ifdef CONFIG_BOOKE_WDT
+extern u32 booke_wdt_enabled;
+extern u32 booke_wdt_period;
+
/* Checks wdt=x and wdt_period=xx command-line option */
notrace int __init early_parse_wdt(char *p)
{
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 45eb998557f8..61f6aff25edc 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -204,10 +204,10 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka,
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
- __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
+ __get_user(new_ka->sa.sa_restorer, &act->sa_restorer) ||
+ __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
+ __get_user(mask, &act->sa_mask))
return -EFAULT;
- __get_user(new_ka->sa.sa_flags, &act->sa_flags);
- __get_user(mask, &act->sa_mask);
siginitset(&new_ka->sa.sa_mask, mask);
return 0;
}
@@ -244,17 +244,8 @@ static inline int restore_general_regs(struct pt_regs *regs,
long sys_sigsuspend(old_sigset_t mask)
{
sigset_t blocked;
-
- current->saved_sigmask = current->blocked;
-
- mask &= _BLOCKABLE;
siginitset(&blocked, mask);
- set_current_blocked(&blocked);
-
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- set_restore_sigmask();
- return -ERESTARTNOHAND;
+ return sigsuspend(&blocked);
}
long sys_sigaction(int sig, struct old_sigaction __user *act,
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d9f94410fd7f..e4cb34322de4 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -57,27 +57,9 @@
#define DBG(fmt...)
#endif
-
-/* Store all idle threads, this can be reused instead of creating
-* a new thread. Also avoids complicated thread destroy functionality
-* for idle threads.
-*/
#ifdef CONFIG_HOTPLUG_CPU
-/*
- * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is
- * removed after init for !CONFIG_HOTPLUG_CPU.
- */
-static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
-#define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x))
-#define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p))
-
/* State of each CPU during hotplug phases */
static DEFINE_PER_CPU(int, cpu_state) = { 0 };
-
-#else
-static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
-#define get_idle_for_cpu(x) (idle_thread_array[(x)])
-#define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p))
#endif
struct thread_info *secondary_ti;
@@ -429,60 +411,19 @@ int generic_check_cpu_restart(unsigned int cpu)
}
#endif
-struct create_idle {
- struct work_struct work;
- struct task_struct *idle;
- struct completion done;
- int cpu;
-};
-
-static void __cpuinit do_fork_idle(struct work_struct *work)
+static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
{
- struct create_idle *c_idle =
- container_of(work, struct create_idle, work);
-
- c_idle->idle = fork_idle(c_idle->cpu);
- complete(&c_idle->done);
-}
-
-static int __cpuinit create_idle(unsigned int cpu)
-{
- struct thread_info *ti;
- struct create_idle c_idle = {
- .cpu = cpu,
- .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
- };
- INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
-
- c_idle.idle = get_idle_for_cpu(cpu);
-
- /* We can't use kernel_thread since we must avoid to
- * reschedule the child. We use a workqueue because
- * we want to fork from a kernel thread, not whatever
- * userspace process happens to be trying to online us.
- */
- if (!c_idle.idle) {
- schedule_work(&c_idle.work);
- wait_for_completion(&c_idle.done);
- } else
- init_idle(c_idle.idle, cpu);
- if (IS_ERR(c_idle.idle)) {
- pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle));
- return PTR_ERR(c_idle.idle);
- }
- ti = task_thread_info(c_idle.idle);
+ struct thread_info *ti = task_thread_info(idle);
#ifdef CONFIG_PPC64
- paca[cpu].__current = c_idle.idle;
+ paca[cpu].__current = idle;
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
#endif
ti->cpu = cpu;
- current_set[cpu] = ti;
-
- return 0;
+ secondary_ti = current_set[cpu] = ti;
}
-int __cpuinit __cpu_up(unsigned int cpu)
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc, c;
@@ -490,12 +431,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
(smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)))
return -EINVAL;
- /* Make sure we have an idle thread */
- rc = create_idle(cpu);
- if (rc)
- return rc;
-
- secondary_ti = current_set[cpu];
+ cpu_idle_thread_init(cpu, tidle);
/* Make sure callin-map entry is 0 (can be leftover a CPU
* hotplug
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 6aa0c663e247..158972341a2d 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
addr, regs->nip, regs->link, code);
}
- if (!arch_irq_disabled_regs(regs))
+ if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
local_irq_enable();
memset(&info, 0, sizeof(info));
@@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs)
return;
}
- local_irq_enable();
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
#ifdef CONFIG_MATH_EMULATION
/* (reason & REASON_ILLEGAL) would be the obvious thing here,
@@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs)
{
int sig, code, fixed = 0;
+ /* We restore the interrupt state now */
+ if (!arch_irq_disabled_regs(regs))
+ local_irq_enable();
+
/* we don't implement logging of alignment exceptions */
if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
fixed = fix_alignment(regs);
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 4d5a3edff49e..e830289d2e48 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -89,6 +89,16 @@ _GLOBAL(load_up_altivec)
/* restore registers and return */
blr
+_GLOBAL(giveup_altivec_notask)
+ mfmsr r3
+ andis. r4,r3,MSR_VEC@h
+ bnelr /* Already enabled? */
+ oris r3,r3,MSR_VEC@h
+ SYNC
+ MTMSRD(r3) /* enable use of VMX now */
+ isync
+ blr
+
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index a3a99901c8ec..cb87301ccd55 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -14,7 +14,9 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <linux/cpu.h>
#include <linux/types.h>
+#include <linux/delay.h>
#include <linux/stat.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -709,13 +711,26 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
struct vio_driver *viodrv = to_vio_driver(dev->driver);
unsigned long flags;
size_t size;
+ bool dma_capable = false;
+
+ /* A device requires entitlement if it has a DMA window property */
+ switch (viodev->family) {
+ case VDEVICE:
+ if (of_get_property(viodev->dev.of_node,
+ "ibm,my-dma-window", NULL))
+ dma_capable = true;
+ break;
+ case PFO:
+ dma_capable = false;
+ break;
+ default:
+ dev_warn(dev, "unknown device family: %d\n", viodev->family);
+ BUG();
+ break;
+ }
- /*
- * Check to see that device has a DMA window and configure
- * entitlement for the device.
- */
- if (of_get_property(viodev->dev.of_node,
- "ibm,my-dma-window", NULL)) {
+ /* Configure entitlement for the device. */
+ if (dma_capable) {
/* Check that the driver is CMO enabled and get desired DMA */
if (!viodrv->get_desired_dma) {
dev_err(dev, "%s: device driver does not support CMO\n",
@@ -1050,6 +1065,94 @@ static void vio_cmo_sysfs_init(void) { }
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
+
+/*
+ * Platform Facilities Option (PFO) support
+ */
+
+/**
+ * vio_h_cop_sync - Perform a synchronous PFO co-processor operation
+ *
+ * @vdev - Pointer to a struct vio_dev for device
+ * @op - Pointer to a struct vio_pfo_op for the operation parameters
+ *
+ * Calls the hypervisor to synchronously perform the PFO operation
+ * described in @op. In the case of a busy response from the hypervisor,
+ * the operation will be re-submitted indefinitely unless a non-zero timeout
+ * is specified or an error occurs. The timeout places a limit on when to
+ * stop re-submitting a operation, the total time can be exceeded if an
+ * operation is in progress.
+ *
+ * If op->hcall_ret is not NULL, this will be set to the return from the
+ * last h_cop_op call or it will be 0 if an error not involving the h_call
+ * was encountered.
+ *
+ * Returns:
+ * 0 on success,
+ * -EINVAL if the h_call fails due to an invalid parameter,
+ * -E2BIG if the h_call can not be performed synchronously,
+ * -EBUSY if a timeout is specified and has elapsed,
+ * -EACCES if the memory area for data/status has been rescinded, or
+ * -EPERM if a hardware fault has been indicated
+ */
+int vio_h_cop_sync(struct vio_dev *vdev, struct vio_pfo_op *op)
+{
+ struct device *dev = &vdev->dev;
+ unsigned long deadline = 0;
+ long hret = 0;
+ int ret = 0;
+
+ if (op->timeout)
+ deadline = jiffies + msecs_to_jiffies(op->timeout);
+
+ while (true) {
+ hret = plpar_hcall_norets(H_COP, op->flags,
+ vdev->resource_id,
+ op->in, op->inlen, op->out,
+ op->outlen, op->csbcpb);
+
+ if (hret == H_SUCCESS ||
+ (hret != H_NOT_ENOUGH_RESOURCES &&
+ hret != H_BUSY && hret != H_RESOURCE) ||
+ (op->timeout && time_after(deadline, jiffies)))
+ break;
+
+ dev_dbg(dev, "%s: hcall ret(%ld), retrying.\n", __func__, hret);
+ }
+
+ switch (hret) {
+ case H_SUCCESS:
+ ret = 0;
+ break;
+ case H_OP_MODE:
+ case H_TOO_BIG:
+ ret = -E2BIG;
+ break;
+ case H_RESCINDED:
+ ret = -EACCES;
+ break;
+ case H_HARDWARE:
+ ret = -EPERM;
+ break;
+ case H_NOT_ENOUGH_RESOURCES:
+ case H_RESOURCE:
+ case H_BUSY:
+ ret = -EBUSY;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ dev_dbg(dev, "%s: Sync h_cop_op failure (ret:%d) (hret:%ld)\n",
+ __func__, ret, hret);
+
+ op->hcall_err = hret;
+ return ret;
+}
+EXPORT_SYMBOL(vio_h_cop_sync);
+
static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
{
const unsigned char *dma_window;
@@ -1211,35 +1314,87 @@ static void __devinit vio_dev_release(struct device *dev)
struct vio_dev *vio_register_device_node(struct device_node *of_node)
{
struct vio_dev *viodev;
+ struct device_node *parent_node;
const unsigned int *unit_address;
+ const unsigned int *pfo_resid = NULL;
+ enum vio_dev_family family;
+ const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
- /* we need the 'device_type' property, in order to match with drivers */
- if (of_node->type == NULL) {
- printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
- __func__,
- of_node->name ? of_node->name : "<unknown>");
+ /*
+ * Determine if this node is a under the /vdevice node or under the
+ * /ibm,platform-facilities node. This decides the device's family.
+ */
+ parent_node = of_get_parent(of_node);
+ if (parent_node) {
+ if (!strcmp(parent_node->full_name, "/ibm,platform-facilities"))
+ family = PFO;
+ else if (!strcmp(parent_node->full_name, "/vdevice"))
+ family = VDEVICE;
+ else {
+ pr_warn("%s: parent(%s) of %s not recognized.\n",
+ __func__,
+ parent_node->full_name,
+ of_node_name);
+ of_node_put(parent_node);
+ return NULL;
+ }
+ of_node_put(parent_node);
+ } else {
+ pr_warn("%s: could not determine the parent of node %s.\n",
+ __func__, of_node_name);
return NULL;
}
- unit_address = of_get_property(of_node, "reg", NULL);
- if (unit_address == NULL) {
- printk(KERN_WARNING "%s: node %s missing 'reg'\n",
- __func__,
- of_node->name ? of_node->name : "<unknown>");
- return NULL;
+ if (family == PFO) {
+ if (of_get_property(of_node, "interrupt-controller", NULL)) {
+ pr_debug("%s: Skipping the interrupt controller %s.\n",
+ __func__, of_node_name);
+ return NULL;
+ }
}
/* allocate a vio_dev for this node */
viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
- if (viodev == NULL)
+ if (viodev == NULL) {
+ pr_warn("%s: allocation failure for VIO device.\n", __func__);
return NULL;
+ }
- viodev->irq = irq_of_parse_and_map(of_node, 0);
+ /* we need the 'device_type' property, in order to match with drivers */
+ viodev->family = family;
+ if (viodev->family == VDEVICE) {
+ if (of_node->type != NULL)
+ viodev->type = of_node->type;
+ else {
+ pr_warn("%s: node %s is missing the 'device_type' "
+ "property.\n", __func__, of_node_name);
+ goto out;
+ }
+
+ unit_address = of_get_property(of_node, "reg", NULL);
+ if (unit_address == NULL) {
+ pr_warn("%s: node %s missing 'reg'\n",
+ __func__, of_node_name);
+ goto out;
+ }
+ dev_set_name(&viodev->dev, "%x", *unit_address);
+ viodev->irq = irq_of_parse_and_map(of_node, 0);
+ viodev->unit_address = *unit_address;
+ } else {
+ /* PFO devices need their resource_id for submitting COP_OPs
+ * This is an optional field for devices, but is required when
+ * performing synchronous ops */
+ pfo_resid = of_get_property(of_node, "ibm,resource-id", NULL);
+ if (pfo_resid != NULL)
+ viodev->resource_id = *pfo_resid;
+
+ unit_address = NULL;
+ dev_set_name(&viodev->dev, "%s", of_node_name);
+ viodev->type = of_node_name;
+ viodev->irq = 0;
+ }
- dev_set_name(&viodev->dev, "%x", *unit_address);
viodev->name = of_node->name;
- viodev->type = of_node->type;
- viodev->unit_address = *unit_address;
viodev->dev.of_node = of_node_get(of_node);
if (firmware_has_feature(FW_FEATURE_CMO))
@@ -1267,16 +1422,51 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
}
return viodev;
+
+out: /* Use this exit point for any return prior to device_register */
+ kfree(viodev);
+
+ return NULL;
}
EXPORT_SYMBOL(vio_register_device_node);
+/*
+ * vio_bus_scan_for_devices - Scan OF and register each child device
+ * @root_name - OF node name for the root of the subtree to search.
+ * This must be non-NULL
+ *
+ * Starting from the root node provide, register the device node for
+ * each child beneath the root.
+ */
+static void vio_bus_scan_register_devices(char *root_name)
+{
+ struct device_node *node_root, *node_child;
+
+ if (!root_name)
+ return;
+
+ node_root = of_find_node_by_name(NULL, root_name);
+ if (node_root) {
+
+ /*
+ * Create struct vio_devices for each virtual device in
+ * the device tree. Drivers will associate with them later.
+ */
+ node_child = of_get_next_child(node_root, NULL);
+ while (node_child) {
+ vio_register_device_node(node_child);
+ node_child = of_get_next_child(node_root, node_child);
+ }
+ of_node_put(node_root);
+ }
+}
+
/**
* vio_bus_init: - Initialize the virtual IO bus
*/
static int __init vio_bus_init(void)
{
int err;
- struct device_node *node_vroot;
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_sysfs_init();
@@ -1301,19 +1491,8 @@ static int __init vio_bus_init(void)
if (firmware_has_feature(FW_FEATURE_CMO))
vio_cmo_bus_init();
- node_vroot = of_find_node_by_name(NULL, "vdevice");
- if (node_vroot) {
- struct device_node *of_node;
-
- /*
- * Create struct vio_devices for each virtual device in
- * the device tree. Drivers will associate with them later.
- */
- for (of_node = node_vroot->child; of_node != NULL;
- of_node = of_node->sibling)
- vio_register_device_node(of_node);
- of_node_put(node_vroot);
- }
+ vio_bus_scan_register_devices("vdevice");
+ vio_bus_scan_register_devices("ibm,platform-facilities");
return 0;
}
@@ -1436,12 +1615,28 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
{
const uint32_t *unit_address;
char kobj_name[20];
+ struct device_node *vnode_parent;
+ const char *dev_type;
+
+ vnode_parent = of_get_parent(vnode);
+ if (!vnode_parent)
+ return NULL;
+
+ dev_type = of_get_property(vnode_parent, "device_type", NULL);
+ of_node_put(vnode_parent);
+ if (!dev_type)
+ return NULL;
/* construct the kobject name from the device node */
- unit_address = of_get_property(vnode, "reg", NULL);
- if (!unit_address)
+ if (!strcmp(dev_type, "vdevice")) {
+ unit_address = of_get_property(vnode, "reg", NULL);
+ if (!unit_address)
+ return NULL;
+ snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
+ } else if (!strcmp(dev_type, "ibm,platform-facilities"))
+ snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
+ else
return NULL;
- snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
return vio_find_name(kobj_name);
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 6f87f39a1ac2..10fc8ec9d2a8 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -194,14 +194,14 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
backwards_map = !backwards_map;
/* Uh-oh ... out of mappings. Let's flush! */
- if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
- vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
+ if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
+ vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
memset(vcpu_book3s->sid_map, 0,
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
kvmppc_mmu_pte_flush(vcpu, 0, 0);
kvmppc_mmu_flush_segments(vcpu);
}
- map->host_vsid = vcpu_book3s->vsid_next++;
+ map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
map->guest_vsid = gvsid;
map->valid = true;
@@ -319,9 +319,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
return -1;
vcpu3s->context_id[0] = err;
- vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1;
- vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
- vcpu3s->vsid_next = vcpu3s->vsid_first;
+ vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
+ << USER_ESID_BITS) - 1;
+ vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS;
+ vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index ddc485a529f2..c3beaeef3f60 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
!(memslot->userspace_addr & (s - 1))) {
start &= ~(s - 1);
pgsize = s;
+ get_page(hpage);
+ put_page(page);
page = hpage;
}
}
@@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
err = 0;
out:
- if (got) {
- if (PageHuge(page))
- page = compound_head(page);
+ if (got)
put_page(page);
- }
return err;
up_err:
@@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
SetPageDirty(page);
out_put:
- if (page)
- put_page(page);
+ if (page) {
+ /*
+ * We drop pages[0] here, not page because page might
+ * have been set to the head page of a compound, but
+ * we have to drop the reference on the correct tail
+ * page to match the get inside gup()
+ */
+ put_page(pages[0]);
+ }
return ret;
out_unlock:
@@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
pa = *physp;
}
page = pfn_to_page(pa >> PAGE_SHIFT);
+ get_page(page);
} else {
hva = gfn_to_hva_memslot(memslot, gfn);
npages = get_user_pages_fast(hva, 1, 1, pages);
@@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
page = compound_head(page);
psize <<= compound_order(page);
}
- if (!kvm->arch.using_mmu_notifiers)
- get_page(page);
offset = gpa & (psize - 1);
if (nb_ret)
*nb_ret = psize - offset;
@@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
{
struct page *page = virt_to_page(va);
- page = compound_head(page);
put_page(page);
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 01294a5099dd..108d1f580177 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id)
continue;
pfn = physp[j] >> PAGE_SHIFT;
page = pfn_to_page(pfn);
- if (PageHuge(page))
- page = compound_head(page);
SetPageDirty(page);
put_page(page);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index def880aea63a..cec4daddbf31 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -463,6 +463,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
/* insert R and C bits from PTE */
rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
args[j] |= rcbits << (56 - 5);
+ hp[0] = 0;
continue;
}
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 0676ae249b9f..6e6e9cef34a8 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -197,7 +197,8 @@ kvmppc_interrupt:
/* Save guest PC and MSR */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
- andi. r0,r12,0x2
+ andi. r0, r12, 0x2
+ cmpwi cr1, r0, 0
beq 1f
mfspr r3,SPRN_HSRR0
mfspr r4,SPRN_HSRR1
@@ -250,6 +251,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
beq ld_last_prev_inst
cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT
beq- ld_last_inst
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST
+ beq- ld_last_inst
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
b no_ld_last_inst
@@ -316,23 +323,17 @@ no_dcbz32_off:
* Having set up SRR0/1 with the address where we want
* to continue with relocation on (potentially in module
* space), we either just go straight there with rfi[d],
- * or we jump to an interrupt handler with bctr if there
- * is an interrupt to be handled first. In the latter
- * case, the rfi[d] at the end of the interrupt handler
- * will get us back to where we want to continue.
+ * or we jump to an interrupt handler if there is an
+ * interrupt to be handled first. In the latter case,
+ * the rfi[d] at the end of the interrupt handler will
+ * get us back to where we want to continue.
*/
- cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- beq 1f
- cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
- beq 1f
- cmpwi r12, BOOK3S_INTERRUPT_PERFMON
-1: mtctr r12
-
/* Register usage at this point:
*
* R1 = host R1
* R2 = host R2
+ * R10 = raw exit handler id
* R12 = exit handler id
* R13 = shadow vcpu (32-bit) or PACA (64-bit)
* SVCPU.* = guest *
@@ -342,12 +343,25 @@ no_dcbz32_off:
PPC_LL r6, HSTATE_HOST_MSR(r13)
PPC_LL r8, HSTATE_VMHANDLER(r13)
- /* Restore host msr -> SRR1 */
+#ifdef CONFIG_PPC64
+BEGIN_FTR_SECTION
+ beq cr1, 1f
+ mtspr SPRN_HSRR1, r6
+ mtspr SPRN_HSRR0, r8
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+#endif
+1: /* Restore host msr -> SRR1 */
mtsrr1 r6
/* Load highmem handler address */
mtsrr0 r8
/* RFI into the highmem handler, or jump to interrupt handler */
- beqctr
+ cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
+ beqa BOOK3S_INTERRUPT_EXTERNAL
+ cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
+ beqa BOOK3S_INTERRUPT_DECREMENTER
+ cmpwi r12, BOOK3S_INTERRUPT_PERFMON
+ beqa BOOK3S_INTERRUPT_PERFMON
+
RFI
kvmppc_handler_trampoline_exit_end:
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 773d38f90aaa..d73a59014900 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -30,7 +30,7 @@ _GLOBAL(__copy_tofrom_user_base)
dcbt 0,r4
beq .Lcopy_page_4K
andi. r6,r6,7
- PPC_MTOCRF 0x01,r5
+ PPC_MTOCRF(0x01,r5)
blt cr1,.Lshort_copy
/* Below we want to nop out the bne if we're on a CPU that has the
* CPU_FTR_UNALIGNED_LD_STD bit set and the CPU_FTR_CP_USE_DCBTZ bit
@@ -186,7 +186,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
- PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
+ PPC_MTOCRF(0x01,r6) /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
@@ -201,7 +201,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f
37: lwzx r0,r7,r4
83: stwx r0,r7,r3
-3: PPC_MTOCRF 0x01,r5
+3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index 11ce045e21fd..f4fcb0bc6563 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -19,7 +19,7 @@ _GLOBAL(memset)
rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0
- PPC_MTOCRF 1,r0
+ PPC_MTOCRF(1,r0)
mr r6,r3
blt cr1,8f
beq+ 3f /* if already 8-byte aligned */
@@ -49,7 +49,7 @@ _GLOBAL(memset)
bdnz 4b
5: srwi. r0,r5,3
clrlwi r5,r5,29
- PPC_MTOCRF 1,r0
+ PPC_MTOCRF(1,r0)
beq 8f
bf 29,6f
std r4,0(r6)
@@ -65,7 +65,7 @@ _GLOBAL(memset)
std r4,0(r6)
addi r6,r6,8
8: cmpwi r5,0
- PPC_MTOCRF 1,r5
+ PPC_MTOCRF(1,r5)
beqlr+
bf 29,9f
stw r4,0(r6)
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index e178922b2c21..82fea3963e15 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -12,7 +12,7 @@
.align 7
_GLOBAL(memcpy)
std r3,48(r1) /* save destination pointer for return value */
- PPC_MTOCRF 0x01,r5
+ PPC_MTOCRF(0x01,r5)
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
@@ -154,7 +154,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
.Ldst_unaligned:
- PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
+ PPC_MTOCRF(0x01,r6) # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi cr1,r5,16
@@ -169,7 +169,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
-3: PPC_MTOCRF 0x01,r5
+3: PPC_MTOCRF(0x01,r5)
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index fb05b123218f..1a6de0a7d8eb 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -271,7 +271,8 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
unsigned long gpage_npages[MMU_PAGE_COUNT];
-static int __init do_gpage_early_setup(char *param, char *val)
+static int __init do_gpage_early_setup(char *param, char *val,
+ const char *unused)
{
static phys_addr_t size;
unsigned long npages;
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index af1ab5e9a691..5c3cf2d04e41 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -48,7 +48,13 @@
/*
* Assembly helpers from arch/powerpc/net/bpf_jit.S:
*/
-extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
+#define DECLARE_LOAD_FUNC(func) \
+ extern u8 func[], func##_negative_offset[], func##_positive_offset[]
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+DECLARE_LOAD_FUNC(sk_load_byte_msh);
#define FUNCTION_DESCR_SIZE 24
diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S
index ff4506e85cce..55ba3855a97f 100644
--- a/arch/powerpc/net/bpf_jit_64.S
+++ b/arch/powerpc/net/bpf_jit_64.S
@@ -31,14 +31,13 @@
* then branch directly to slow_path_XXX if required. (In fact, could
* load a spare GPR with the address of slow_path_generic and pass size
* as an argument, making the call site a mtlr, li and bllr.)
- *
- * Technically, the "is addr < 0" check is unnecessary & slowing down
- * the ABS path, as it's statically checked on generation.
*/
.globl sk_load_word
sk_load_word:
cmpdi r_addr, 0
- blt bpf_error
+ blt bpf_slow_path_word_neg
+ .globl sk_load_word_positive_offset
+sk_load_word_positive_offset:
/* Are we accessing past headlen? */
subi r_scratch1, r_HL, 4
cmpd r_scratch1, r_addr
@@ -51,7 +50,9 @@ sk_load_word:
.globl sk_load_half
sk_load_half:
cmpdi r_addr, 0
- blt bpf_error
+ blt bpf_slow_path_half_neg
+ .globl sk_load_half_positive_offset
+sk_load_half_positive_offset:
subi r_scratch1, r_HL, 2
cmpd r_scratch1, r_addr
blt bpf_slow_path_half
@@ -61,7 +62,9 @@ sk_load_half:
.globl sk_load_byte
sk_load_byte:
cmpdi r_addr, 0
- blt bpf_error
+ blt bpf_slow_path_byte_neg
+ .globl sk_load_byte_positive_offset
+sk_load_byte_positive_offset:
cmpd r_HL, r_addr
ble bpf_slow_path_byte
lbzx r_A, r_D, r_addr
@@ -69,22 +72,20 @@ sk_load_byte:
/*
* BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf)
- * r_addr is the offset value, already known positive
+ * r_addr is the offset value
*/
.globl sk_load_byte_msh
sk_load_byte_msh:
+ cmpdi r_addr, 0
+ blt bpf_slow_path_byte_msh_neg
+ .globl sk_load_byte_msh_positive_offset
+sk_load_byte_msh_positive_offset:
cmpd r_HL, r_addr
ble bpf_slow_path_byte_msh
lbzx r_X, r_D, r_addr
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
-bpf_error:
- /* Entered with cr0 = lt */
- li r3, 0
- /* Generated code will 'blt epilogue', returning 0. */
- blr
-
/* Call out to skb_copy_bits:
* We'll need to back up our volatile regs first; we have
* local variable space at r1+(BPF_PPC_STACK_BASIC).
@@ -136,3 +137,84 @@ bpf_slow_path_byte_msh:
lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1)
rlwinm r_X, r_X, 2, 32-4-2, 31-2
blr
+
+/* Call out to bpf_internal_load_pointer_neg_helper:
+ * We'll need to back up our volatile regs first; we have
+ * local variable space at r1+(BPF_PPC_STACK_BASIC).
+ * Allocate a new stack frame here to remain ABI-compliant in
+ * stashing LR.
+ */
+#define sk_negative_common(SIZE) \
+ mflr r0; \
+ std r0, 16(r1); \
+ /* R3 goes in parameter space of caller's frame */ \
+ std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \
+ /* R3 = r_skb, as passed */ \
+ mr r4, r_addr; \
+ li r5, SIZE; \
+ bl bpf_internal_load_pointer_neg_helper; \
+ /* R3 != 0 on success */ \
+ addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \
+ ld r0, 16(r1); \
+ ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \
+ ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \
+ mtlr r0; \
+ cmpldi r3, 0; \
+ beq bpf_error_slow; /* cr0 = EQ */ \
+ mr r_addr, r3; \
+ ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \
+ /* Great success! */
+
+bpf_slow_path_word_neg:
+ lis r_scratch1,-32 /* SKF_LL_OFF */
+ cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ .globl sk_load_word_negative_offset
+sk_load_word_negative_offset:
+ sk_negative_common(4)
+ lwz r_A, 0(r_addr)
+ blr
+
+bpf_slow_path_half_neg:
+ lis r_scratch1,-32 /* SKF_LL_OFF */
+ cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ .globl sk_load_half_negative_offset
+sk_load_half_negative_offset:
+ sk_negative_common(2)
+ lhz r_A, 0(r_addr)
+ blr
+
+bpf_slow_path_byte_neg:
+ lis r_scratch1,-32 /* SKF_LL_OFF */
+ cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ .globl sk_load_byte_negative_offset
+sk_load_byte_negative_offset:
+ sk_negative_common(1)
+ lbz r_A, 0(r_addr)
+ blr
+
+bpf_slow_path_byte_msh_neg:
+ lis r_scratch1,-32 /* SKF_LL_OFF */
+ cmpd r_addr, r_scratch1 /* addr < SKF_* */
+ blt bpf_error /* cr0 = LT */
+ .globl sk_load_byte_msh_negative_offset
+sk_load_byte_msh_negative_offset:
+ sk_negative_common(1)
+ lbz r_X, 0(r_addr)
+ rlwinm r_X, r_X, 2, 32-4-2, 31-2
+ blr
+
+bpf_error_slow:
+ /* fabricate a cr0 = lt */
+ li r_scratch1, -1
+ cmpdi r_scratch1, 0
+bpf_error:
+ /* Entered with cr0 = lt */
+ li r3, 0
+ /* Generated code will 'blt epilogue', returning 0. */
+ blr
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 73619d3aeb6c..2dc8b1484845 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
PPC_BLR();
}
+#define CHOOSE_LOAD_FUNC(K, func) \
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
+
/* Assemble the body code between the prologue & epilogue. */
static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
struct codegen_context *ctx,
@@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
/*** Absolute loads from packet header/data ***/
case BPF_S_LD_W_ABS:
- func = sk_load_word;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_word);
goto common_load;
case BPF_S_LD_H_ABS:
- func = sk_load_half;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_half);
goto common_load;
case BPF_S_LD_B_ABS:
- func = sk_load_byte;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
common_load:
- /*
- * Load from [K]. Reference with the (negative)
- * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported.
- */
+ /* Load from [K]. */
ctx->seen |= SEEN_DATAREF;
- if ((int)K < 0)
- return -ENOTSUPP;
PPC_LI64(r_scratch1, func);
PPC_MTLR(r_scratch1);
PPC_LI32(r_addr, K);
@@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
common_load_ind:
/*
* Load from [X + K]. Negative offsets are tested for
- * in the helper functions, and result in a 'ret 0'.
+ * in the helper functions.
*/
ctx->seen |= SEEN_DATAREF | SEEN_XREG;
PPC_LI64(r_scratch1, func);
@@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
break;
case BPF_S_LDX_B_MSH:
- /*
- * x86 version drops packet (RET 0) when K<0, whereas
- * interpreter does allow K<0 (__load_pointer, special
- * ancillary data). common_load returns ENOTSUPP if K<0,
- * so we fall back to interpreter & filter works.
- */
- func = sk_load_byte_msh;
+ func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
goto common_load;
break;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 02aee03e713c..8f84bcba18da 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1299,8 +1299,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) {
struct perf_sample_data data;
- perf_sample_data_init(&data, ~0ULL);
- data.period = event->hw.last_period;
+ perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 0a6d2a9d569c..106c53354675 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -613,8 +613,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (record) {
struct perf_sample_data data;
- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
+ perf_sample_data_init(&data, 0, event->hw.last_period);
if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 2e4e64abfab4..8abf6fb8f410 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -23,6 +23,8 @@ config BLUESTONE
default n
select PPC44x_SIMPLE
select APM821xx
+ select PCI_MSI
+ select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII
help
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 9fef5302adc1..67dac22b4363 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -21,6 +21,12 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = {
{ .compatible = "fsl,qe", },
{ .compatible = "fsl,cpm2", },
{ .compatible = "fsl,srio", },
+ /* So that the DMA channel nodes can be probed individually: */
+ { .compatible = "fsl,eloplus-dma", },
+ /* For the PMC driver */
+ { .compatible = "fsl,mpc8548-guts", },
+ /* Probably unnecessary? */
+ { .compatible = "gpio-leds", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 9a6f04406e0d..d208ebccb91c 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -399,12 +399,6 @@ static int __init board_fixups(void)
machine_arch_initcall(mpc8568_mds, board_fixups);
machine_arch_initcall(mpc8569_mds, board_fixups);
-static struct of_device_id mpc85xx_ids[] = {
- { .compatible = "fsl,mpc8548-guts", },
- { .compatible = "gpio-leds", },
- {},
-};
-
static int __init mpc85xx_publish_devices(void)
{
if (machine_is(mpc8568_mds))
@@ -412,10 +406,7 @@ static int __init mpc85xx_publish_devices(void)
if (machine_is(mpc8569_mds))
simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio");
- mpc85xx_common_publish_devices();
- of_platform_bus_probe(NULL, mpc85xx_ids, NULL);
-
- return 0;
+ return mpc85xx_common_publish_devices();
}
machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index e74b7cde9aee..f700c81a1321 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -460,18 +460,7 @@ static void __init p1022_ds_setup_arch(void)
pr_info("Freescale P1022 DS reference board\n");
}
-static struct of_device_id __initdata p1022_ds_ids[] = {
- /* So that the DMA channel nodes can be probed individually: */
- { .compatible = "fsl,eloplus-dma", },
- {},
-};
-
-static int __init p1022_ds_publish_devices(void)
-{
- mpc85xx_common_publish_devices();
- return of_platform_bus_probe(NULL, p1022_ds_ids, NULL);
-}
-machine_device_initcall(p1022_ds, p1022_ds_publish_devices);
+machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices);
machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier);
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 425db18580a2..61c9550819a2 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -78,6 +78,36 @@ config PPC_BOOK3E_64
endchoice
+choice
+ prompt "CPU selection"
+ depends on PPC64
+ default GENERIC_CPU
+ help
+ This will create a kernel which is optimised for a particular CPU.
+ The resulting kernel may not run on other CPUs, so use this with care.
+
+ If unsure, select Generic.
+
+config GENERIC_CPU
+ bool "Generic"
+
+config CELL_CPU
+ bool "Cell Broadband Engine"
+
+config POWER4_CPU
+ bool "POWER4"
+
+config POWER5_CPU
+ bool "POWER5"
+
+config POWER6_CPU
+ bool "POWER6"
+
+config POWER7_CPU
+ bool "POWER7"
+
+endchoice
+
config PPC_BOOK3S
def_bool y
depends on PPC_BOOK3S_32 || PPC_BOOK3S_64
@@ -86,15 +116,6 @@ config PPC_BOOK3E
def_bool y
depends on PPC_BOOK3E_64
-config POWER4_ONLY
- bool "Optimize for POWER4"
- depends on PPC64 && PPC_BOOK3S
- default n
- ---help---
- Cause the compiler to optimize for POWER4/POWER5/PPC970 processors.
- The resulting binary will not work on POWER3 or RS64 processors
- when compiled with binutils 2.15 or later.
-
config 6xx
def_bool y
depends on PPC32 && PPC_BOOK3S
@@ -258,7 +279,7 @@ config PPC_ICSWX_PID
default y
---help---
The PID register in server is used explicitly for ICSWX. In
- embedded systems PID managment is done by the system.
+ embedded systems PID management is done by the system.
config PPC_ICSWX_USE_SIGILL
bool "Should a bad CT cause a SIGILL?"
@@ -266,7 +287,7 @@ config PPC_ICSWX_USE_SIGILL
default n
---help---
Should a bad CT used for "non-record form ICSWX" cause an
- illegal intruction signal or should it be silent as
+ illegal instruction signal or should it be silent as
architected.
If in doubt, say N here.
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index d09f3e8e6867..85825b5401e5 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
- if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
+ if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
@@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (rc)
return rc;
- /* We rely on being able to stash a virq in a u16 */
- BUILD_BUG_ON(NR_IRQS > 65536);
-
list_for_each_entry(entry, &dev->msi_list, list) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
@@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device)
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
- msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
+ /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
+ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index f9a48af335cb..8c6dc42ecf65 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void)
{
int i;
- for (i = 1; i < NR_IRQS; i++)
+ for (i = 1; i < nr_irqs; i++)
beat_destruct_irq_plug(i);
}
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index 996c5ff7824b..fc536f2971c0 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -366,11 +366,20 @@ static void kw_i2c_timeout(unsigned long data)
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
+
+ /*
+ * If the timer is pending, that means we raced with the
+ * irq, in which case we just return
+ */
+ if (timer_pending(&host->timeout_timer))
+ goto skip;
+
kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr));
if (host->state != state_idle) {
host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT;
add_timer(&host->timeout_timer);
}
+ skip:
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1494,6 +1503,7 @@ static int __init pmac_i2c_create_platform_devices(void)
if (bus->platform_dev == NULL)
return -ENOMEM;
bus->platform_dev->dev.platform_data = bus;
+ bus->platform_dev->dev.of_node = bus->busnode;
platform_device_add(bus->platform_dev);
}
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 66ad93de1d55..c4e630576ff2 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -57,9 +57,9 @@ static int max_real_irqs;
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+/* The max irq number this driver deals with is 128; see max_irqs */
+static DECLARE_BITMAP(ppc_lost_interrupts, 128);
+static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
static int pmac_irq_cascade = -1;
static struct irq_domain *pmac_pic_host;
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 476d9d9b2405..46b7f0232523 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -7,7 +7,6 @@ config PPC_PS3
select USB_OHCI_BIG_ENDIAN_MMIO
select USB_ARCH_HAS_EHCI
select USB_EHCI_BIG_ENDIAN_MMIO
- select MEMORY_HOTPLUG
select PPC_PCI_CHOICE
help
This option enables support for the Sony PS3 game console
@@ -74,7 +73,7 @@ config PS3_PS3AV
help
Include support for the PS3 AV Settings driver.
- This support is required for graphics and sound. In
+ This support is required for PS3 graphics and sound. In
general, all users will say Y or M.
config PS3_SYS_MANAGER
@@ -85,9 +84,22 @@ config PS3_SYS_MANAGER
help
Include support for the PS3 System Manager.
- This support is required for system control. In
+ This support is required for PS3 system control. In
general, all users will say Y or M.
+config PS3_REPOSITORY_WRITE
+ bool "PS3 Repository write support" if PS3_ADVANCED
+ depends on PPC_PS3
+ default n
+ help
+ Enables support for writing to the PS3 System Repository.
+
+ This support is intended for bootloaders that need to store data
+ in the repository for later boot stages.
+
+ If in doubt, say N here and reduce the size of the kernel by a
+ small amount.
+
config PS3_STORAGE
depends on PPC_PS3
tristate
@@ -122,7 +134,7 @@ config PS3_FLASH
This support is required to access the PS3 FLASH ROM, which
contains the boot loader and some boot options.
- In general, all users will say Y or M.
+ In general, PS3 OtherOS users will say Y or M.
As this driver needs a fixed buffer of 256 KiB of memory, it can
be disabled on the kernel command line using "ps3flash=off", to
@@ -156,7 +168,7 @@ config PS3GELIC_UDBG
via the Ethernet port (UDP port number 18194).
This driver uses a trivial implementation and is independent
- from the main network driver.
+ from the main PS3 gelic network driver.
If in doubt, say N here.
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index de2aea421707..0c9f643d9e2a 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/memory_hotplug.h>
#include <linux/memblock.h>
#include <linux/slab.h>
@@ -79,12 +78,14 @@ enum {
* @base: base address
* @size: size in bytes
* @offset: difference between base and rm.size
+ * @destroy: flag if region should be destroyed upon shutdown
*/
struct mem_region {
u64 base;
u64 size;
unsigned long offset;
+ int destroy;
};
/**
@@ -96,7 +97,7 @@ struct mem_region {
* The HV virtual address space (vas) allows for hotplug memory regions.
* Memory regions can be created and destroyed in the vas at runtime.
* @rm: real mode (bootmem) region
- * @r1: hotplug memory region(s)
+ * @r1: highmem region(s)
*
* ps3 addresses
* virt_addr: a cpu 'translated' effective address
@@ -222,10 +223,6 @@ void ps3_mm_vas_destroy(void)
}
}
-/*============================================================================*/
-/* memory hotplug routines */
-/*============================================================================*/
-
/**
* ps3_mm_region_create - create a memory region in the vas
* @r: pointer to a struct mem_region to accept initialized values
@@ -262,6 +259,7 @@ static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
goto zero_region;
}
+ r->destroy = 1;
r->offset = r->base - map.rm.size;
return result;
@@ -279,7 +277,14 @@ static void ps3_mm_region_destroy(struct mem_region *r)
{
int result;
+ if (!r->destroy) {
+ pr_info("%s:%d: Not destroying high region: %llxh %llxh\n",
+ __func__, __LINE__, r->base, r->size);
+ return;
+ }
+
DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
+
if (r->base) {
result = lv1_release_memory(r->base);
BUG_ON(result);
@@ -288,50 +293,36 @@ static void ps3_mm_region_destroy(struct mem_region *r)
}
}
-/**
- * ps3_mm_add_memory - hot add memory
- */
-
-static int __init ps3_mm_add_memory(void)
+static int ps3_mm_get_repository_highmem(struct mem_region *r)
{
int result;
- unsigned long start_addr;
- unsigned long start_pfn;
- unsigned long nr_pages;
-
- if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
- return -ENODEV;
- BUG_ON(!mem_init_done);
+ /* Assume a single highmem region. */
- start_addr = map.rm.size;
- start_pfn = start_addr >> PAGE_SHIFT;
- nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ result = ps3_repository_read_highmem_info(0, &r->base, &r->size);
- DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
- __func__, __LINE__, start_addr, start_pfn, nr_pages);
-
- result = add_memory(0, start_addr, map.r1.size);
+ if (result)
+ goto zero_region;
- if (result) {
- pr_err("%s:%d: add_memory failed: (%d)\n",
- __func__, __LINE__, result);
- return result;
+ if (!r->base || !r->size) {
+ result = -1;
+ goto zero_region;
}
- memblock_add(start_addr, map.r1.size);
+ r->offset = r->base - map.rm.size;
- result = online_pages(start_pfn, nr_pages);
+ DBG("%s:%d: Found high region in repository: %llxh %llxh\n",
+ __func__, __LINE__, r->base, r->size);
- if (result)
- pr_err("%s:%d: online_pages failed: (%d)\n",
- __func__, __LINE__, result);
+ return 0;
+zero_region:
+ DBG("%s:%d: No high region in repository.\n", __func__, __LINE__);
+
+ r->size = r->base = r->offset = 0;
return result;
}
-device_initcall(ps3_mm_add_memory);
-
/*============================================================================*/
/* dma routines */
/*============================================================================*/
@@ -1217,13 +1208,23 @@ void __init ps3_mm_init(void)
BUG_ON(map.rm.base);
BUG_ON(!map.rm.size);
+ /* Check if we got the highmem region from an earlier boot step */
- /* arrange to do this in ps3_mm_add_memory */
- ps3_mm_region_create(&map.r1, map.total - map.rm.size);
+ if (ps3_mm_get_repository_highmem(&map.r1))
+ ps3_mm_region_create(&map.r1, map.total - map.rm.size);
/* correct map.total for the real total amount of memory we use */
map.total = map.rm.size + map.r1.size;
+ if (!map.r1.size) {
+ DBG("%s:%d: No highmem region found\n", __func__, __LINE__);
+ } else {
+ DBG("%s:%d: Adding highmem region: %llxh %llxh\n",
+ __func__, __LINE__, map.rm.size,
+ map.total - map.rm.size);
+ memblock_add(map.rm.size, map.total - map.rm.size);
+ }
+
DBG(" <- %s:%d\n", __func__, __LINE__);
}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 1a633ed0fe98..d71329a8e325 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -188,6 +188,22 @@ int ps3_repository_read_rm_size(unsigned int ppe_id, u64 *rm_size);
int ps3_repository_read_region_total(u64 *region_total);
int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size,
u64 *region_total);
+int ps3_repository_read_highmem_region_count(unsigned int *region_count);
+int ps3_repository_read_highmem_base(unsigned int region_index,
+ u64 *highmem_base);
+int ps3_repository_read_highmem_size(unsigned int region_index,
+ u64 *highmem_size);
+int ps3_repository_read_highmem_info(unsigned int region_index,
+ u64 *highmem_base, u64 *highmem_size);
+
+int ps3_repository_write_highmem_region_count(unsigned int region_count);
+int ps3_repository_write_highmem_base(unsigned int region_index,
+ u64 highmem_base);
+int ps3_repository_write_highmem_size(unsigned int region_index,
+ u64 highmem_size);
+int ps3_repository_write_highmem_info(unsigned int region_index,
+ u64 highmem_base, u64 highmem_size);
+int ps3_repository_delete_highmem_info(unsigned int region_index);
/* repository pme info */
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 7bdfea336f5e..9b47ba7a5de7 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -779,6 +779,72 @@ int ps3_repository_read_mm_info(u64 *rm_base, u64 *rm_size, u64 *region_total)
}
/**
+ * ps3_repository_read_highmem_region_count - Read the number of highmem regions
+ *
+ * Bootloaders must arrange the repository nodes such that regions are indexed
+ * with a region_index from 0 to region_count-1.
+ */
+
+int ps3_repository_read_highmem_region_count(unsigned int *region_count)
+{
+ int result;
+ u64 v1 = 0;
+
+ result = read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("highmem", 0),
+ make_field("region", 0),
+ make_field("count", 0),
+ 0,
+ &v1, NULL);
+ *region_count = v1;
+ return result;
+}
+
+
+int ps3_repository_read_highmem_base(unsigned int region_index,
+ u64 *highmem_base)
+{
+ return read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("base", 0),
+ 0,
+ highmem_base, NULL);
+}
+
+int ps3_repository_read_highmem_size(unsigned int region_index,
+ u64 *highmem_size)
+{
+ return read_node(PS3_LPAR_ID_CURRENT,
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("size", 0),
+ 0,
+ highmem_size, NULL);
+}
+
+/**
+ * ps3_repository_read_highmem_info - Read high memory region info
+ * @region_index: Region index, {0,..,region_count-1}.
+ * @highmem_base: High memory base address.
+ * @highmem_size: High memory size.
+ *
+ * Bootloaders that preallocate highmem regions must place the
+ * region info into the repository at these well known nodes.
+ */
+
+int ps3_repository_read_highmem_info(unsigned int region_index,
+ u64 *highmem_base, u64 *highmem_size)
+{
+ int result;
+
+ *highmem_base = 0;
+ result = ps3_repository_read_highmem_base(region_index, highmem_base);
+ return result ? result
+ : ps3_repository_read_highmem_size(region_index, highmem_size);
+}
+
+/**
* ps3_repository_read_num_spu_reserved - Number of physical spus reserved.
* @num_spu: Number of physical spus.
*/
@@ -1002,6 +1068,138 @@ int ps3_repository_read_lpm_privileges(unsigned int be_index, u64 *lpar,
lpar, rights);
}
+#if defined(CONFIG_PS3_REPOSITORY_WRITE)
+
+static int create_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
+{
+ int result;
+
+ dump_node(0, n1, n2, n3, n4, v1, v2);
+
+ result = lv1_create_repository_node(n1, n2, n3, n4, v1, v2);
+
+ if (result) {
+ pr_devel("%s:%d: lv1_create_repository_node failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int delete_node(u64 n1, u64 n2, u64 n3, u64 n4)
+{
+ int result;
+
+ dump_node(0, n1, n2, n3, n4, 0, 0);
+
+ result = lv1_delete_repository_node(n1, n2, n3, n4);
+
+ if (result) {
+ pr_devel("%s:%d: lv1_delete_repository_node failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+static int write_node(u64 n1, u64 n2, u64 n3, u64 n4, u64 v1, u64 v2)
+{
+ int result;
+
+ result = create_node(n1, n2, n3, n4, v1, v2);
+
+ if (!result)
+ return 0;
+
+ result = lv1_write_repository_node(n1, n2, n3, n4, v1, v2);
+
+ if (result) {
+ pr_devel("%s:%d: lv1_write_repository_node failed: %s\n",
+ __func__, __LINE__, ps3_result(result));
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+int ps3_repository_write_highmem_region_count(unsigned int region_count)
+{
+ int result;
+ u64 v1 = (u64)region_count;
+
+ result = write_node(
+ make_first_field("highmem", 0),
+ make_field("region", 0),
+ make_field("count", 0),
+ 0,
+ v1, 0);
+ return result;
+}
+
+int ps3_repository_write_highmem_base(unsigned int region_index,
+ u64 highmem_base)
+{
+ return write_node(
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("base", 0),
+ 0,
+ highmem_base, 0);
+}
+
+int ps3_repository_write_highmem_size(unsigned int region_index,
+ u64 highmem_size)
+{
+ return write_node(
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("size", 0),
+ 0,
+ highmem_size, 0);
+}
+
+int ps3_repository_write_highmem_info(unsigned int region_index,
+ u64 highmem_base, u64 highmem_size)
+{
+ int result;
+
+ result = ps3_repository_write_highmem_base(region_index, highmem_base);
+ return result ? result
+ : ps3_repository_write_highmem_size(region_index, highmem_size);
+}
+
+static int ps3_repository_delete_highmem_base(unsigned int region_index)
+{
+ return delete_node(
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("base", 0),
+ 0);
+}
+
+static int ps3_repository_delete_highmem_size(unsigned int region_index)
+{
+ return delete_node(
+ make_first_field("highmem", 0),
+ make_field("region", region_index),
+ make_field("size", 0),
+ 0);
+}
+
+int ps3_repository_delete_highmem_info(unsigned int region_index)
+{
+ int result;
+
+ result = ps3_repository_delete_highmem_base(region_index);
+ result += ps3_repository_delete_highmem_size(region_index);
+
+ return result ? -1 : 0;
+}
+
+#endif /* defined(CONFIG_PS3_WRITE_REPOSITORY) */
+
#if defined(DEBUG)
int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index aadbe4f6d537..837cf49357ed 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -30,9 +30,9 @@ config PPC_SPLPAR
two or more partitions.
config EEH
- bool "PCI Extended Error Handling (EEH)" if EXPERT
+ bool
depends on PPC_PSERIES && PCI
- default y if !EXPERT
+ default y
config PSERIES_MSI
bool
@@ -67,7 +67,7 @@ config IO_EVENT_IRQ
This option will only enable the IO event platform code. You
will still need to enable or compile the actual drivers
- that use this infrastruture to handle IO event interrupts.
+ that use this infrastructure to handle IO event interrupts.
Say Y if you are unsure.
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 309d38ef7322..ecd394cf34e6 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -489,7 +489,7 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* a stack trace will help the device-driver authors figure
* out what happened. So print that out.
*/
- dump_stack();
+ WARN(1, "EEH: failure detected\n");
return 1;
dn_unlock:
@@ -1076,7 +1076,7 @@ static void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Adding device %s\n", pci_name(dev));
dn = pci_device_to_OF_node(dev);
- edev = pci_dev_to_eeh_dev(dev);
+ edev = of_node_to_eeh_dev(dn);
if (edev->pdev == dev) {
pr_debug("EEH: Already referenced !\n");
return;
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index 342797fc0f9c..13e8cc43adf7 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -22,12 +22,12 @@ static inline long poll_pending(void)
static inline u8 get_cede_latency_hint(void)
{
- return get_lppaca()->gpr5_dword.fields.cede_latency_hint;
+ return get_lppaca()->cede_latency_hint;
}
static inline void set_cede_latency_hint(u8 latency_hint)
{
- get_lppaca()->gpr5_dword.fields.cede_latency_hint = latency_hint;
+ get_lppaca()->cede_latency_hint = latency_hint;
}
static inline long cede_processor(void)
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 168651acdd83..7b3bf76ef834 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -103,11 +103,13 @@ int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
}
+EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_register);
void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
{
blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
}
+EXPORT_SYMBOL_GPL(pSeries_reconfig_notifier_unregister);
int pSeries_reconfig_notify(unsigned long action, void *p)
{
@@ -426,6 +428,7 @@ static int do_remove_property(char *buf, size_t bufsize)
static int do_update_property(char *buf, size_t bufsize)
{
struct device_node *np;
+ struct pSeries_reconfig_prop_update upd_value;
unsigned char *value;
char *name, *end, *next_prop;
int rc, length;
@@ -454,6 +457,10 @@ static int do_update_property(char *buf, size_t bufsize)
return -ENODEV;
}
+ upd_value.node = np;
+ upd_value.property = newprop;
+ pSeries_reconfig_notify(PSERIES_UPDATE_PROPERTY, &upd_value);
+
rc = prom_update_property(np, newprop, oldprop);
if (rc)
return rc;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index d3be961e2ae7..10386b676d87 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -51,8 +51,7 @@
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index d5f5416be310..b724622c3a0b 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -18,69 +18,45 @@
extern int cpm_get_irq(struct pt_regs *regs);
static struct irq_domain *mpc8xx_pic_host;
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+static unsigned long mpc8xx_cached_irq_mask;
static sysconf8xx_t __iomem *siu_reg;
-int cpm_get_irq(struct pt_regs *regs);
+static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d)
+{
+ return 0x80000000 >> irqd_to_hwirq(d);
+}
static void mpc8xx_unmask_irq(struct irq_data *d)
{
- int bit, word;
- unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
- bit = irq_nr & 0x1f;
- word = irq_nr >> 5;
-
- ppc_cached_irq_mask[word] |= (1 << (31-bit));
- out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+ mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
+ out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_mask_irq(struct irq_data *d)
{
- int bit, word;
- unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
- bit = irq_nr & 0x1f;
- word = irq_nr >> 5;
-
- ppc_cached_irq_mask[word] &= ~(1 << (31-bit));
- out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+ mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d);
+ out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static void mpc8xx_ack(struct irq_data *d)
{
- int bit;
- unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
- bit = irq_nr & 0x1f;
- out_be32(&siu_reg->sc_sipend, 1 << (31-bit));
+ out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d));
}
static void mpc8xx_end_irq(struct irq_data *d)
{
- int bit, word;
- unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d);
-
- bit = irq_nr & 0x1f;
- word = irq_nr >> 5;
-
- ppc_cached_irq_mask[word] |= (1 << (31-bit));
- out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]);
+ mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d);
+ out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask);
}
static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
- if (flow_type & IRQ_TYPE_EDGE_FALLING) {
- irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d);
+ /* only external IRQ senses are programmable */
+ if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) {
unsigned int siel = in_be32(&siu_reg->sc_siel);
-
- /* only external IRQ senses are programmable */
- if ((hw & 1) == 0) {
- siel |= (0x80000000 >> hw);
- out_be32(&siu_reg->sc_siel, siel);
- __irq_set_handler_locked(d->irq, handle_edge_irq);
- }
+ siel |= mpc8xx_irqd_to_bit(d);
+ out_be32(&siu_reg->sc_siel, siel);
+ __irq_set_handler_locked(d->irq, handle_edge_irq);
}
return 0;
}
@@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
IRQ_TYPE_EDGE_FALLING,
};
+ if (intspec[0] > 0x1f)
+ return 0;
+
*out_hwirq = intspec[0];
if (intsize > 1 && intspec[1] < 4)
*out_flags = map_pic_senses[intspec[1]];
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 9ac71ebd2c40..395af1347749 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -604,18 +604,14 @@ static struct mpic *mpic_find(unsigned int irq)
}
/* Determine if the linux irq is an IPI */
-static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq)
+static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src)
{
- unsigned int src = virq_to_hw(irq);
-
return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]);
}
/* Determine if the linux irq is a timer */
-static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq)
+static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src)
{
- unsigned int src = virq_to_hw(irq);
-
return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]);
}
@@ -876,21 +872,45 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
if (src >= mpic->num_sources)
return -EINVAL;
+ vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
+
+ /* We don't support "none" type */
if (flow_type == IRQ_TYPE_NONE)
- if (mpic->senses && src < mpic->senses_count)
- flow_type = mpic->senses[src];
- if (flow_type == IRQ_TYPE_NONE)
- flow_type = IRQ_TYPE_LEVEL_LOW;
+ flow_type = IRQ_TYPE_DEFAULT;
+
+ /* Default: read HW settings */
+ if (flow_type == IRQ_TYPE_DEFAULT) {
+ switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+ MPIC_INFO(VECPRI_SENSE_MASK))) {
+ case MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE):
+ flow_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
+ flow_type = IRQ_TYPE_EDGE_FALLING;
+ break;
+ case MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE):
+ flow_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ break;
+ }
+ }
+ /* Apply to irq desc */
irqd_set_trigger_type(d, flow_type);
+ /* Apply to HW */
if (mpic_is_ht_interrupt(mpic, src))
vecpri = MPIC_VECPRI_POLARITY_POSITIVE |
MPIC_VECPRI_SENSE_EDGE;
else
vecpri = mpic_type_to_vecpri(mpic, flow_type);
- vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) |
MPIC_INFO(VECPRI_SENSE_MASK));
vnew |= vecpri;
@@ -1026,7 +1046,7 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq);
/* Set default irq type */
- irq_set_irq_type(virq, IRQ_TYPE_NONE);
+ irq_set_irq_type(virq, IRQ_TYPE_DEFAULT);
/* If the MPIC was reset, then all vectors have already been
* initialized. Otherwise, a per source lazy initialization
@@ -1417,12 +1437,6 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num,
mpic->num_sources = isu_first + mpic->isu_size;
}
-void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count)
-{
- mpic->senses = senses;
- mpic->senses_count = count;
-}
-
void __init mpic_init(struct mpic *mpic)
{
int i, cpu;
@@ -1555,12 +1569,12 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
return;
raw_spin_lock_irqsave(&mpic_lock, flags);
- if (mpic_is_ipi(mpic, irq)) {
+ if (mpic_is_ipi(mpic, src)) {
reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) &
~MPIC_VECPRI_PRIORITY_MASK;
mpic_ipi_write(src - mpic->ipi_vecs[0],
reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT));
- } else if (mpic_is_tm(mpic, irq)) {
+ } else if (mpic_is_tm(mpic, src)) {
reg = mpic_tm_read(src - mpic->timer_vecs[0]) &
~MPIC_VECPRI_PRIORITY_MASK;
mpic_tm_write(src - mpic->timer_vecs[0],
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
index 6e7fa386e76a..483d8fa72e8b 100644
--- a/arch/powerpc/sysdev/mpic_msgr.c
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -27,6 +27,7 @@
static struct mpic_msgr **mpic_msgrs;
static unsigned int mpic_msgr_count;
+static DEFINE_RAW_SPINLOCK(msgrs_lock);
static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
{
@@ -56,12 +57,11 @@ struct mpic_msgr *mpic_msgr_get(unsigned int reg_num)
if (reg_num >= mpic_msgr_count)
return ERR_PTR(-ENODEV);
- raw_spin_lock_irqsave(&msgr->lock, flags);
- if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) {
- msgr = mpic_msgrs[reg_num];
+ raw_spin_lock_irqsave(&msgrs_lock, flags);
+ msgr = mpic_msgrs[reg_num];
+ if (msgr->in_use == MSGR_FREE)
msgr->in_use = MSGR_INUSE;
- }
- raw_spin_unlock_irqrestore(&msgr->lock, flags);
+ raw_spin_unlock_irqrestore(&msgrs_lock, flags);
return msgr;
}
@@ -228,7 +228,7 @@ static __devinit int mpic_msgr_probe(struct platform_device *dev)
reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
- msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET;
+ msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET);
msgr->in_use = MSGR_FREE;
msgr->num = i;
raw_spin_lock_init(&msgr->lock);
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 1c2d7af17bbe..82c6702dcbab 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -28,10 +28,11 @@
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/export.h>
+#include <linux/kernel.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
-#include <boot/dcr.h>
+#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/msi_bitmap.h>
@@ -43,13 +44,14 @@
#define PEIH_FLUSH0 0x30
#define PEIH_FLUSH1 0x38
#define PEIH_CNTRST 0x48
-#define NR_MSI_IRQS 4
+
+static int msi_irqs;
struct ppc4xx_msi {
u32 msi_addr_lo;
u32 msi_addr_hi;
void __iomem *msi_regs;
- int msi_virqs[NR_MSI_IRQS];
+ int *msi_virqs;
struct msi_bitmap bitmap;
struct device_node *msi_dev;
};
@@ -61,7 +63,7 @@ static int ppc4xx_msi_init_allocator(struct platform_device *dev,
{
int err;
- err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS,
+ err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs,
dev->dev.of_node);
if (err)
return err;
@@ -83,6 +85,11 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int),
+ GFP_KERNEL);
+ if (!msi_data->msi_virqs)
+ return -ENOMEM;
+
list_for_each_entry(entry, &dev->msi_list, list) {
int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (int_no >= 0)
@@ -150,12 +157,11 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
if (!sdr_addr)
return -1;
- SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */
- SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */
-
+ mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
+ mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
- if (msi->msi_dev)
+ if (!msi->msi_dev)
return -ENODEV;
msi->msi_regs = of_iomap(msi->msi_dev, 0);
@@ -167,9 +173,12 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- msi->msi_addr_hi = 0x0;
- msi->msi_addr_lo = (u32) msi_phys;
- dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo);
+ if (!msi_virt)
+ return -ENOMEM;
+ msi->msi_addr_hi = upper_32_bits(msi_phys);
+ msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
+ dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
+ msi->msi_addr_hi, msi->msi_addr_lo);
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
@@ -185,6 +194,8 @@ static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
+ dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
+
return 0;
}
@@ -194,7 +205,7 @@ static int ppc4xx_of_msi_remove(struct platform_device *dev)
int i;
int virq;
- for (i = 0; i < NR_MSI_IRQS; i++) {
+ for (i = 0; i < msi_irqs; i++) {
virq = msi->msi_virqs[i];
if (virq != NO_IRQ)
irq_dispose_mapping(virq);
@@ -215,8 +226,6 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
struct resource res;
int err = 0;
- msi = &ppc4xx_msi;/*keep the msi data for further use*/
-
dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
@@ -234,6 +243,10 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
goto error_out;
}
+ msi_irqs = of_irq_count(dev->dev.of_node);
+ if (!msi_irqs)
+ return -ENODEV;
+
if (ppc4xx_setup_pcieh_hw(dev, res, msi))
goto error_out;
@@ -242,6 +255,7 @@ static int __devinit ppc4xx_msi_probe(struct platform_device *dev)
dev_err(&dev->dev, "Error allocating MSI bitmap\n");
goto error_out;
}
+ ppc4xx_msi = *msi;
ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs;
ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c
index 49a3ece1c6b3..702256a1ca11 100644
--- a/arch/powerpc/sysdev/scom.c
+++ b/arch/powerpc/sysdev/scom.c
@@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <asm/debug.h>
#include <asm/prom.h>
#include <asm/scom.h>
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index ea5e204e3450..cd1d18db92c6 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void)
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
+ struct irq_desc *desc;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
@@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void)
/* Allow IPIs again... */
icp_ops->set_priority(DEFAULT_PRIORITY);
- for_each_irq(virq) {
- struct irq_desc *desc;
+ for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
@@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void)
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
- desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
- if (!desc || !desc->action)
+ if (!desc->action)
continue;
if (desc->irq_data.domain != xics_host)
continue;