summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/40x/Kconfig8
-rw-r--r--arch/powerpc/platforms/40x/Makefile1
-rw-r--r--arch/powerpc/platforms/40x/hcu4.c61
-rw-r--r--arch/powerpc/platforms/512x/Kconfig1
-rw-r--r--arch/powerpc/platforms/82xx/km82xx.c4
-rw-r--r--arch/powerpc/platforms/83xx/Kconfig9
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c58
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig30
-rw-r--r--arch/powerpc/platforms/85xx/Makefile3
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c11
-rw-r--r--arch/powerpc/platforms/85xx/p2041_rdb.c (renamed from arch/powerpc/platforms/85xx/p2040_rdb.c)18
-rw-r--r--arch/powerpc/platforms/85xx/p3060_qds.c77
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c12
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig13
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype4
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/iommu.c21
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig16
-rw-r--r--arch/powerpc/platforms/powernv/Makefile5
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c88
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c97
-rw-r--r--arch/powerpc/platforms/powernv/opal-takeover.S140
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S101
-rw-r--r--arch/powerpc/platforms/powernv/opal.c322
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c234
-rw-r--r--arch/powerpc/platforms/powernv/pci.c427
-rw-r--r--arch/powerpc/platforms/powernv/pci.h48
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h16
-rw-r--r--arch/powerpc/platforms/powernv/setup.c196
-rw-r--r--arch/powerpc/platforms/powernv/smp.c182
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig12
-rw-r--r--arch/powerpc/platforms/ps3/Makefile1
-rw-r--r--arch/powerpc/platforms/ps3/gelic_udbg.c273
-rw-r--r--arch/powerpc/platforms/ps3/system-bus.c7
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c34
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c171
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig11
-rw-r--r--arch/powerpc/platforms/wsp/Makefile2
-rw-r--r--arch/powerpc/platforms/wsp/ics.c48
-rw-r--r--arch/powerpc/platforms/wsp/ics.h5
-rw-r--r--arch/powerpc/platforms/wsp/msi.c102
-rw-r--r--arch/powerpc/platforms/wsp/msi.h19
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c4
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h3
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c1133
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.h268
51 files changed, 4180 insertions, 127 deletions
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index b5d87067a58b..8f9c3e245cff 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -32,14 +32,6 @@ config EP405
help
This option enables support for the EP405/EP405PC boards.
-config HCU4
- bool "Hcu4"
- depends on 40x
- default n
- select 405GPR
- help
- This option enables support for the Nestal Maschinen HCU4 board.
-
config HOTFOOT
bool "Hotfoot"
depends on 40x
diff --git a/arch/powerpc/platforms/40x/Makefile b/arch/powerpc/platforms/40x/Makefile
index 56e89004c468..88c22de0c850 100644
--- a/arch/powerpc/platforms/40x/Makefile
+++ b/arch/powerpc/platforms/40x/Makefile
@@ -1,4 +1,3 @@
-obj-$(CONFIG_HCU4) += hcu4.o
obj-$(CONFIG_WALNUT) += walnut.o
obj-$(CONFIG_XILINX_VIRTEX_GENERIC_BOARD) += virtex.o
obj-$(CONFIG_EP405) += ep405.o
diff --git a/arch/powerpc/platforms/40x/hcu4.c b/arch/powerpc/platforms/40x/hcu4.c
deleted file mode 100644
index 60b2afecab75..000000000000
--- a/arch/powerpc/platforms/40x/hcu4.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Architecture- / platform-specific boot-time initialization code for
- * IBM PowerPC 4xx based boards. Adapted from original
- * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
- * <dan@net4x.com>.
- *
- * Copyright(c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
- *
- * Rewritten and ported to the merged powerpc tree:
- * Copyright 2007 IBM Corporation
- * Josh Boyer <jwboyer@linux.vnet.ibm.com>
- *
- * 2002 (c) MontaVista, Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-#include <asm/time.h>
-#include <asm/uic.h>
-#include <asm/ppc4xx.h>
-
-static __initdata struct of_device_id hcu4_of_bus[] = {
- { .compatible = "ibm,plb3", },
- { .compatible = "ibm,opb", },
- { .compatible = "ibm,ebc", },
- {},
-};
-
-static int __init hcu4_device_probe(void)
-{
- of_platform_bus_probe(NULL, hcu4_of_bus, NULL);
- return 0;
-}
-machine_device_initcall(hcu4, hcu4_device_probe);
-
-static int __init hcu4_probe(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "netstal,hcu4"))
- return 0;
-
- return 1;
-}
-
-define_machine(hcu4) {
- .name = "HCU4",
- .probe = hcu4_probe,
- .progress = udbg_progress,
- .init_IRQ = uic_init_tree,
- .get_irq = uic_get_irq,
- .restart = ppc4xx_reset_system,
- .calibrate_decr = generic_calibrate_decr,
-};
diff --git a/arch/powerpc/platforms/512x/Kconfig b/arch/powerpc/platforms/512x/Kconfig
index 27b0651221d1..b3ebce1aec07 100644
--- a/arch/powerpc/platforms/512x/Kconfig
+++ b/arch/powerpc/platforms/512x/Kconfig
@@ -6,6 +6,7 @@ config PPC_MPC512x
select PPC_CLOCK
select PPC_PCI_CHOICE
select FSL_PCI if PCI
+ select ARCH_WANT_OPTIONAL_GPIOLIB
config MPC5121_ADS
bool "Freescale MPC5121E ADS"
diff --git a/arch/powerpc/platforms/82xx/km82xx.c b/arch/powerpc/platforms/82xx/km82xx.c
index 428c5e0a0e75..3661bcdc326a 100644
--- a/arch/powerpc/platforms/82xx/km82xx.c
+++ b/arch/powerpc/platforms/82xx/km82xx.c
@@ -49,6 +49,9 @@ struct cpm_pin {
};
static __initdata struct cpm_pin km82xx_pins[] = {
+ /* SMC1 */
+ {2, 4, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
+ {2, 5, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY},
/* SMC2 */
{0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY},
@@ -137,6 +140,7 @@ static void __init init_ioports(void)
}
cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8);
+ cpm2_smc_clk_setup(CPM_CLK_SMC1, CPM_BRG7);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX);
cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX);
cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX);
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig
index 73f4135f3a1a..670a033264c0 100644
--- a/arch/powerpc/platforms/83xx/Kconfig
+++ b/arch/powerpc/platforms/83xx/Kconfig
@@ -114,18 +114,21 @@ config KMETER1
endif
-# used for usb
+# used for usb & gpio
config PPC_MPC831x
bool
+ select ARCH_WANT_OPTIONAL_GPIOLIB
# used for math-emu
config PPC_MPC832x
bool
-# used for usb
+# used for usb & gpio
config PPC_MPC834x
bool
+ select ARCH_WANT_OPTIONAL_GPIOLIB
-# used for usb
+# used for usb & gpio
config PPC_MPC837x
bool
+ select ARCH_WANT_OPTIONAL_GPIOLIB
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 70798ac911ef..ef6537b8ed33 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -21,6 +21,8 @@
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/slab.h>
+#include <linux/kthread.h>
+#include <linux/reboot.h>
#include <asm/prom.h>
#include <asm/machdep.h>
@@ -30,6 +32,7 @@
*/
#define MCU_REG_CTRL 0x20
#define MCU_CTRL_POFF 0x40
+#define MCU_CTRL_BTN 0x80
#define MCU_NUM_GPIO 2
@@ -42,13 +45,55 @@ struct mcu {
static struct mcu *glob_mcu;
+struct task_struct *shutdown_thread;
+static int shutdown_thread_fn(void *data)
+{
+ int ret;
+ struct mcu *mcu = glob_mcu;
+
+ while (!kthread_should_stop()) {
+ ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL);
+ if (ret < 0)
+ pr_err("MCU status reg read failed.\n");
+ mcu->reg_ctrl = ret;
+
+
+ if (mcu->reg_ctrl & MCU_CTRL_BTN) {
+ i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL,
+ mcu->reg_ctrl & ~MCU_CTRL_BTN);
+
+ ctrl_alt_del();
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
+ return 0;
+}
+
+static ssize_t show_status(struct device *d,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mcu *mcu = glob_mcu;
+
+ ret = i2c_smbus_read_byte_data(mcu->client, MCU_REG_CTRL);
+ if (ret < 0)
+ return -ENODEV;
+ mcu->reg_ctrl = ret;
+
+ return sprintf(buf, "%02x\n", ret);
+}
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+
static void mcu_power_off(void)
{
struct mcu *mcu = glob_mcu;
pr_info("Sending power-off request to the MCU...\n");
mutex_lock(&mcu->lock);
- i2c_smbus_write_byte_data(glob_mcu->client, MCU_REG_CTRL,
+ i2c_smbus_write_byte_data(mcu->client, MCU_REG_CTRL,
mcu->reg_ctrl | MCU_CTRL_POFF);
mutex_unlock(&mcu->lock);
}
@@ -130,6 +175,13 @@ static int __devinit mcu_probe(struct i2c_client *client,
dev_info(&client->dev, "will provide power-off service\n");
}
+ if (device_create_file(&client->dev, &dev_attr_status))
+ dev_err(&client->dev,
+ "couldn't create device file for status\n");
+
+ shutdown_thread = kthread_run(shutdown_thread_fn, NULL,
+ "mcu-i2c-shdn");
+
return 0;
err:
kfree(mcu);
@@ -141,6 +193,10 @@ static int __devexit mcu_remove(struct i2c_client *client)
struct mcu *mcu = i2c_get_clientdata(client);
int ret;
+ kthread_stop(shutdown_thread);
+
+ device_remove_file(&client->dev, &dev_attr_status);
+
if (glob_mcu == mcu) {
ppc_md.power_off = NULL;
glob_mcu = NULL;
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index 12f5932dadc9..45023e26aea3 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -171,17 +171,18 @@ config SBC8560
help
This option enables support for the Wind River SBC8560 board
-config P2040_RDB
- bool "Freescale P2040 RDB"
+config P2041_RDB
+ bool "Freescale P2041 RDB"
select DEFAULT_UIMAGE
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
- This option enables support for the P2040 RDB board
+ This option enables support for the P2041 RDB board
config P3041_DS
bool "Freescale P3041 DS"
@@ -189,19 +190,33 @@ config P3041_DS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
This option enables support for the P3041 DS board
+config P3060_QDS
+ bool "Freescale P3060 QDS"
+ select DEFAULT_UIMAGE
+ select PPC_E500MC
+ select PHYS_64BIT
+ select SWIOTLB
+ select MPC8xxx_GPIO
+ select HAS_RAPIDIO
+ select PPC_EPAPR_HV_PIC
+ help
+ This option enables support for the P3060 QDS board
+
config P4080_DS
bool "Freescale P4080 DS"
select DEFAULT_UIMAGE
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
@@ -216,7 +231,8 @@ config P5020_DS
select PPC_E500MC
select PHYS_64BIT
select SWIOTLB
- select MPC8xxx_GPIO
+ select ARCH_REQUIRE_GPIOLIB
+ select GPIO_MPC8XXX
select HAS_RAPIDIO
select PPC_EPAPR_HV_PIC
help
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index a971b32c5c0a..bc5acb95917a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -13,8 +13,9 @@ obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o
obj-$(CONFIG_P1010_RDB) += p1010rdb.o
obj-$(CONFIG_P1022_DS) += p1022_ds.o
obj-$(CONFIG_P1023_RDS) += p1023_rds.o
-obj-$(CONFIG_P2040_RDB) += p2040_rdb.o corenet_ds.o
+obj-$(CONFIG_P2041_RDB) += p2041_rdb.o corenet_ds.o
obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o
+obj-$(CONFIG_P3060_QDS) += p3060_qds.o corenet_ds.o
obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o
obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o
obj-$(CONFIG_STX_GP3) += stx_gp3.o
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index c01c7277888c..fda15716fada 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -129,17 +129,20 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
*/
static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
{
- struct device_node *pixis_node;
+ struct device_node *np;
void __iomem *pixis;
u8 __iomem *brdcfg1;
- pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
- if (!pixis_node) {
+ np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
+ if (!np)
+ /* older device trees used "fsl,p1022ds-pixis" */
+ np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis");
+ if (!np) {
pr_err("p1022ds: missing ngPIXIS node\n");
return;
}
- pixis = of_iomap(pixis_node, 0);
+ pixis = of_iomap(np, 0);
if (!pixis) {
pr_err("p1022ds: could not map ngPIXIS registers\n");
return;
diff --git a/arch/powerpc/platforms/85xx/p2040_rdb.c b/arch/powerpc/platforms/85xx/p2041_rdb.c
index 32b56ac73dfb..eda6ed5683e1 100644
--- a/arch/powerpc/platforms/85xx/p2040_rdb.c
+++ b/arch/powerpc/platforms/85xx/p2041_rdb.c
@@ -1,5 +1,5 @@
/*
- * P2040 RDB Setup
+ * P2041 RDB Setup
*
* Copyright 2011 Freescale Semiconductor Inc.
*
@@ -35,18 +35,18 @@
/*
* Called very early, device-tree isn't unflattened
*/
-static int __init p2040_rdb_probe(void)
+static int __init p2041_rdb_probe(void)
{
unsigned long root = of_get_flat_dt_root();
#ifdef CONFIG_SMP
extern struct smp_ops_t smp_85xx_ops;
#endif
- if (of_flat_dt_is_compatible(root, "fsl,P2040RDB"))
+ if (of_flat_dt_is_compatible(root, "fsl,P2041RDB"))
return 1;
/* Check if we're running under the Freescale hypervisor */
- if (of_flat_dt_is_compatible(root, "fsl,P2040RDB-hv")) {
+ if (of_flat_dt_is_compatible(root, "fsl,P2041RDB-hv")) {
ppc_md.init_IRQ = ehv_pic_init;
ppc_md.get_irq = ehv_pic_get_irq;
ppc_md.restart = fsl_hv_restart;
@@ -66,9 +66,9 @@ static int __init p2040_rdb_probe(void)
return 0;
}
-define_machine(p2040_rdb) {
- .name = "P2040 RDB",
- .probe = p2040_rdb_probe,
+define_machine(p2041_rdb) {
+ .name = "P2041 RDB",
+ .probe = p2041_rdb_probe,
.setup_arch = corenet_ds_setup_arch,
.init_IRQ = corenet_ds_pic_init,
#ifdef CONFIG_PCI
@@ -81,8 +81,8 @@ define_machine(p2040_rdb) {
.power_save = e500_idle,
};
-machine_device_initcall(p2040_rdb, corenet_ds_publish_devices);
+machine_device_initcall(p2041_rdb, corenet_ds_publish_devices);
#ifdef CONFIG_SWIOTLB
-machine_arch_initcall(p2040_rdb, swiotlb_setup_bus_notifier);
+machine_arch_initcall(p2041_rdb, swiotlb_setup_bus_notifier);
#endif
diff --git a/arch/powerpc/platforms/85xx/p3060_qds.c b/arch/powerpc/platforms/85xx/p3060_qds.c
new file mode 100644
index 000000000000..01dcf44871e9
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/p3060_qds.c
@@ -0,0 +1,77 @@
+/*
+ * P3060 QDS Setup
+ *
+ * Copyright 2011 Freescale Semiconductor Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/phy.h>
+#include <asm/machdep.h>
+#include <asm/udbg.h>
+#include <asm/mpic.h>
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <sysdev/fsl_pci.h>
+#include <asm/ehv_pic.h>
+#include "corenet_ds.h"
+
+/*
+ * Called very early, device-tree isn't unflattened
+ */
+static int __init p3060_qds_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+#ifdef CONFIG_SMP
+ extern struct smp_ops_t smp_85xx_ops;
+#endif
+
+ if (of_flat_dt_is_compatible(root, "fsl,P3060QDS"))
+ return 1;
+
+ /* Check if we're running under the Freescale hypervisor */
+ if (of_flat_dt_is_compatible(root, "fsl,P3060QDS-hv")) {
+ ppc_md.init_IRQ = ehv_pic_init;
+ ppc_md.get_irq = ehv_pic_get_irq;
+ ppc_md.restart = fsl_hv_restart;
+ ppc_md.power_off = fsl_hv_halt;
+ ppc_md.halt = fsl_hv_halt;
+#ifdef CONFIG_SMP
+ /*
+ * Disable the timebase sync operations because we can't write
+ * to the timebase registers under the hypervisor.
+ */
+ smp_85xx_ops.give_timebase = NULL;
+ smp_85xx_ops.take_timebase = NULL;
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+define_machine(p3060_qds) {
+ .name = "P3060 QDS",
+ .probe = p3060_qds_probe,
+ .setup_arch = corenet_ds_setup_arch,
+ .init_IRQ = corenet_ds_pic_init,
+#ifdef CONFIG_PCI
+ .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
+#endif
+ .get_irq = mpic_get_coreint_irq,
+ .restart = fsl_rstcr_restart,
+ .calibrate_decr = generic_calibrate_decr,
+ .progress = udbg_progress,
+ .power_save = e500_idle,
+};
+
+machine_device_initcall(p3060_qds, declare_of_platform_devices);
+
+#ifdef CONFIG_SWIOTLB
+machine_arch_initcall(p3060_qds, swiotlb_setup_bus_notifier);
+#endif
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 09ced7221750..cebd786dc334 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -283,7 +283,7 @@ static int __init sbc8560_bdrstcr_init(void)
of_address_to_resource(np, 0, &res);
- printk(KERN_INFO "sbc8560: Found BRSTCR at i/o 0x%x\n", res.start);
+ printk(KERN_INFO "sbc8560: Found BRSTCR at %pR\n", &res);
brstcr = ioremap(res.start, resource_size(&res));
if(!brstcr)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 5b9b901f6443..2df4785ffd4e 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -48,10 +48,11 @@ smp_85xx_kick_cpu(int nr)
const u64 *cpu_rel_addr;
__iomem u32 *bptr_vaddr;
struct device_node *np;
- int n = 0;
+ int n = 0, hw_cpu = get_hard_smp_processor_id(nr);
int ioremappable;
- WARN_ON (nr < 0 || nr >= NR_CPUS);
+ WARN_ON(nr < 0 || nr >= NR_CPUS);
+ WARN_ON(hw_cpu < 0 || hw_cpu >= NR_CPUS);
pr_debug("smp_85xx_kick_cpu: kick CPU #%d\n", nr);
@@ -79,7 +80,7 @@ smp_85xx_kick_cpu(int nr)
local_irq_save(flags);
- out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr);
+ out_be32(bptr_vaddr + BOOT_ENTRY_PIR, hw_cpu);
#ifdef CONFIG_PPC32
out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start));
@@ -88,7 +89,7 @@ smp_85xx_kick_cpu(int nr)
(ulong)(bptr_vaddr + SIZE_BOOT_ENTRY));
/* Wait a bit for the CPU to ack. */
- while ((__secondary_hold_acknowledge != nr) && (++n < 1000))
+ while ((__secondary_hold_acknowledge != hw_cpu) && (++n < 1000))
mdelay(1);
#else
smp_generic_kick_cpu(nr);
@@ -206,7 +207,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image)
if ( !timeout )
printk(KERN_ERR "Unable to bring down secondary cpu(s)");
- for (i = 0; i < num_cpus; i++)
+ for_each_online_cpu(i)
{
if ( i == smp_processor_id() ) continue;
mpic_reset_core(i);
@@ -243,6 +244,7 @@ void __init mpc85xx_smp_init(void)
* If left NULL, .message_pass defaults to
* smp_muxed_ipi_message_pass
*/
+ smp_85xx_ops.message_pass = NULL;
smp_85xx_ops.cause_ipi = doorbell_cause_ipi;
}
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index a0b5638c5dc8..8d6599d54ea6 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -4,6 +4,7 @@ menuconfig PPC_86xx
depends on 6xx
select FSL_SOC
select ALTIVEC
+ select ARCH_WANT_OPTIONAL_GPIOLIB
help
The Freescale E600 SoCs have 74xx cores.
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index b9ba86191aed..e4588721ef34 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -1,5 +1,6 @@
menu "Platform support"
+source "arch/powerpc/platforms/powernv/Kconfig"
source "arch/powerpc/platforms/pseries/Kconfig"
source "arch/powerpc/platforms/iseries/Kconfig"
source "arch/powerpc/platforms/chrp/Kconfig"
@@ -333,16 +334,6 @@ config OF_RTC
source "arch/powerpc/sysdev/bestcomm/Kconfig"
-config MPC8xxx_GPIO
- bool "MPC512x/MPC8xxx GPIO support"
- depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \
- FSL_SOC_BOOKE || PPC_86xx
- select GENERIC_GPIO
- select ARCH_REQUIRE_GPIOLIB
- help
- Say Y here if you're going to use hardware that connects to the
- MPC512x/831x/834x/837x/8572/8610 GPIOs.
-
config SIMPLE_GPIO
bool "Support for simple, memory-mapped GPIO controllers"
depends on PPC
@@ -355,7 +346,7 @@ config SIMPLE_GPIO
on-board peripherals.
config MCU_MPC8349EMITX
- tristate "MPC8349E-mITX MCU driver"
+ bool "MPC8349E-mITX MCU driver"
depends on I2C && PPC_83xx
select GENERIC_GPIO
select ARCH_REQUIRE_GPIOLIB
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index e06e39589a09..a85990c886e9 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -69,6 +69,7 @@ config PPC_BOOK3S_64
bool "Server processors"
select PPC_FPU
select PPC_HAVE_PMU_SUPPORT
+ select SYS_SUPPORTS_HUGETLBFS
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -173,6 +174,7 @@ config BOOKE
config FSL_BOOKE
bool
depends on (E200 || E500) && PPC32
+ select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT
default y
# this is for common code between PPC32 & PPC64 FSL BOOKE
@@ -296,7 +298,7 @@ config PPC_BOOK3E_MMU
config PPC_MM_SLICES
bool
- default y if HUGETLB_PAGE || (PPC_STD_MMU_64 && PPC_64K_PAGES)
+ default y if (PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES)
default n
config VIRT_CPU_ACCOUNTING
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 73e2116cfeed..2635a22bade2 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_PPC_82xx) += 82xx/
obj-$(CONFIG_PPC_83xx) += 83xx/
obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
obj-$(CONFIG_PPC_86xx) += 86xx/
+obj-$(CONFIG_PPC_POWERNV) += powernv/
obj-$(CONFIG_PPC_PSERIES) += pseries/
obj-$(CONFIG_PPC_ISERIES) += iseries/
obj-$(CONFIG_PPC_MAPLE) += maple/
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index 26a067122a54..fc46fcac3921 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -1159,6 +1159,26 @@ static int __init setup_iommu_fixed(char *str)
}
__setup("iommu_fixed=", setup_iommu_fixed);
+static u64 cell_dma_get_required_mask(struct device *dev)
+{
+ struct dma_map_ops *dma_ops;
+
+ if (!dev->dma_mask)
+ return 0;
+
+ if (!iommu_fixed_disabled &&
+ cell_iommu_get_fixed_address(dev) != OF_BAD_ADDR)
+ return DMA_BIT_MASK(64);
+
+ dma_ops = get_dma_ops(dev);
+ if (dma_ops->get_required_mask)
+ return dma_ops->get_required_mask(dev);
+
+ WARN_ONCE(1, "no get_required_mask in %p ops", dma_ops);
+
+ return DMA_BIT_MASK(64);
+}
+
static int __init cell_iommu_init(void)
{
struct device_node *np;
@@ -1175,6 +1195,7 @@ static int __init cell_iommu_init(void)
/* Setup various ppc_md. callbacks */
ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
ppc_md.tce_build = tce_build_cell;
ppc_md.tce_free = tce_free_cell;
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
new file mode 100644
index 000000000000..74fea5c21839
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -0,0 +1,16 @@
+config PPC_POWERNV
+ depends on PPC64 && PPC_BOOK3S
+ bool "IBM PowerNV (Non-Virtualized) platform support"
+ select PPC_NATIVE
+ select PPC_XICS
+ select PPC_ICP_NATIVE
+ select PPC_P7_NAP
+ select PPC_PCI_CHOICE if EMBEDDED
+ default y
+
+config PPC_POWERNV_RTAS
+ depends on PPC_POWERNV
+ bool "Support for RTAS based PowerNV platforms such as BML"
+ default y
+ select PPC_ICS_RTAS
+ select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
new file mode 100644
index 000000000000..31853008b418
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -0,0 +1,5 @@
+obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o
+obj-y += opal-rtc.o opal-nvram.o
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
new file mode 100644
index 000000000000..3f83e1ae26ac
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -0,0 +1,88 @@
+/*
+ * PowerNV nvram code.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+
+#include <asm/opal.h>
+#include <asm/machdep.h>
+
+static unsigned int nvram_size;
+
+static ssize_t opal_nvram_size(void)
+{
+ return nvram_size;
+}
+
+static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
+{
+ s64 rc;
+ int off;
+
+ if (*index >= nvram_size)
+ return 0;
+ off = *index;
+ if ((off + count) > nvram_size)
+ count = nvram_size - off;
+ rc = opal_read_nvram(__pa(buf), count, off);
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+ *index += count;
+ return count;
+}
+
+static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
+{
+ s64 rc = OPAL_BUSY;
+ int off;
+
+ if (*index >= nvram_size)
+ return 0;
+ off = *index;
+ if ((off + count) > nvram_size)
+ count = nvram_size - off;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_write_nvram(__pa(buf), count, off);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ }
+ *index += count;
+ return count;
+}
+
+void __init opal_nvram_init(void)
+{
+ struct device_node *np;
+ const u32 *nbytes_p;
+
+ np = of_find_compatible_node(NULL, NULL, "ibm,opal-nvram");
+ if (np == NULL)
+ return;
+
+ nbytes_p = of_get_property(np, "#bytes", NULL);
+ if (!nbytes_p) {
+ of_node_put(np);
+ return;
+ }
+ nvram_size = *nbytes_p;
+
+ printk(KERN_INFO "OPAL nvram setup, %u bytes\n", nvram_size);
+ of_node_put(np);
+
+ ppc_md.nvram_read = opal_nvram_read;
+ ppc_md.nvram_write = opal_nvram_write;
+ ppc_md.nvram_size = opal_nvram_size;
+}
+
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
new file mode 100644
index 000000000000..2aa7641aac9b
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -0,0 +1,97 @@
+/*
+ * PowerNV Real Time Clock.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/time.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/delay.h>
+
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
+{
+ tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
+ bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
+ tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
+ tm->tm_mday = bcd2bin(y_m_d & 0xff);
+ tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
+ tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
+ tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
+
+ GregorianDay(tm);
+}
+
+unsigned long __init opal_get_boot_time(void)
+{
+ struct rtc_time tm;
+ u32 y_m_d;
+ u64 h_m_s_ms;
+ long rc = OPAL_BUSY;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
+ if (rc != OPAL_SUCCESS)
+ return 0;
+ opal_to_tm(y_m_d, h_m_s_ms, &tm);
+ return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+}
+
+void opal_get_rtc_time(struct rtc_time *tm)
+{
+ long rc = OPAL_BUSY;
+ u32 y_m_d;
+ u64 h_m_s_ms;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_rtc_read(&y_m_d, &h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
+ if (rc != OPAL_SUCCESS)
+ return;
+ opal_to_tm(y_m_d, h_m_s_ms, tm);
+}
+
+int opal_set_rtc_time(struct rtc_time *tm)
+{
+ long rc = OPAL_BUSY;
+ u32 y_m_d = 0;
+ u64 h_m_s_ms = 0;
+
+ y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
+ y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
+ y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
+ y_m_d |= ((u32)bin2bcd(tm->tm_mday));
+
+ h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
+ h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
+ h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_rtc_write(y_m_d, h_m_s_ms);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
+ return rc == OPAL_SUCCESS ? 0 : -EIO;
+}
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
new file mode 100644
index 000000000000..77b48b2b9309
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-takeover.S
@@ -0,0 +1,140 @@
+/*
+ * PowerNV OPAL takeover assembly code, for use by prom_init.c
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/hvcall.h>
+#include <asm/asm-offsets.h>
+#include <asm/opal.h>
+
+#define STK_PARAM(i) (48 + ((i)-3)*8)
+
+#define H_HAL_TAKEOVER 0x5124
+#define H_HAL_TAKEOVER_QUERY_MAGIC -1
+
+ .text
+_GLOBAL(opal_query_takeover)
+ mfcr r0
+ stw r0,8(r1)
+ std r3,STK_PARAM(r3)(r1)
+ std r4,STK_PARAM(r4)(r1)
+ li r3,H_HAL_TAKEOVER
+ li r4,H_HAL_TAKEOVER_QUERY_MAGIC
+ HVSC
+ ld r10,STK_PARAM(r3)(r1)
+ std r4,0(r10)
+ ld r10,STK_PARAM(r4)(r1)
+ std r5,0(r10)
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr
+
+_GLOBAL(opal_do_takeover)
+ mfcr r0
+ stw r0,8(r1)
+ mflr r0
+ std r0,16(r1)
+ bl __opal_do_takeover
+ ld r0,16(r1)
+ mtlr r0
+ lwz r0,8(r1)
+ mtcrf 0xff,r0
+ blr
+
+__opal_do_takeover:
+ ld r4,0(r3)
+ ld r5,0x8(r3)
+ ld r6,0x10(r3)
+ ld r7,0x18(r3)
+ ld r8,0x20(r3)
+ ld r9,0x28(r3)
+ ld r10,0x30(r3)
+ ld r11,0x38(r3)
+ li r3,H_HAL_TAKEOVER
+ HVSC
+ blr
+
+ .globl opal_secondary_entry
+opal_secondary_entry:
+ mr r31,r3
+ mfmsr r11
+ li r12,(MSR_SF | MSR_ISF)@highest
+ sldi r12,r12,48
+ or r11,r11,r12
+ mtmsrd r11
+ isync
+ mfspr r4,SPRN_PIR
+ std r4,0(r3)
+1: HMT_LOW
+ ld r4,8(r3)
+ cmpli cr0,r4,0
+ beq 1b
+ HMT_MEDIUM
+1: addi r3,r31,16
+ bl __opal_do_takeover
+ b 1b
+
+_GLOBAL(opal_enter_rtas)
+ mflr r0
+ std r0,16(r1)
+ stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+
+ /* Because PROM is running in 32b mode, it clobbers the high order half
+ * of all registers that it saves. We therefore save those registers
+ * PROM might touch to the stack. (r0, r3-r13 are caller saved)
+ */
+ SAVE_GPR(2, r1)
+ SAVE_GPR(13, r1)
+ SAVE_8GPRS(14, r1)
+ SAVE_10GPRS(22, r1)
+ mfcr r10
+ mfmsr r11
+ std r10,_CCR(r1)
+ std r11,_MSR(r1)
+
+ /* Get the PROM entrypoint */
+ mtlr r5
+
+ /* Switch MSR to 32 bits mode
+ */
+ li r12,1
+ rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
+ andc r11,r11,r12
+ li r12,1
+ rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
+ andc r11,r11,r12
+ mtmsrd r11
+ isync
+
+ /* Enter RTAS here... */
+ blrl
+
+ /* Just make sure that r1 top 32 bits didn't get
+ * corrupt by OF
+ */
+ rldicl r1,r1,0,32
+
+ /* Restore the MSR (back to 64 bits) */
+ ld r0,_MSR(r1)
+ MTMSRD(r0)
+ isync
+
+ /* Restore other registers */
+ REST_GPR(2, r1)
+ REST_GPR(13, r1)
+ REST_8GPRS(14, r1)
+ REST_10GPRS(22, r1)
+ ld r4,_CCR(r1)
+ mtcr r4
+
+ addi r1,r1,PROM_FRAME_SIZE
+ ld r0,16(r1)
+ mtlr r0
+ blr
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
new file mode 100644
index 000000000000..4a3f46d8533e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -0,0 +1,101 @@
+/*
+ * PowerNV OPAL API wrappers
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/hvcall.h>
+#include <asm/asm-offsets.h>
+#include <asm/opal.h>
+
+/* TODO:
+ *
+ * - Trace irqs in/off (needs saving/restoring all args, argh...)
+ * - Get r11 feed up by Dave so I can have better register usage
+ */
+#define OPAL_CALL(name, token) \
+ _GLOBAL(name); \
+ mflr r0; \
+ mfcr r12; \
+ std r0,16(r1); \
+ std r12,8(r1); \
+ std r1,PACAR1(r13); \
+ li r0,0; \
+ mfmsr r12; \
+ ori r0,r0,MSR_EE; \
+ std r12,PACASAVEDMSR(r13); \
+ andc r12,r12,r0; \
+ mtmsrd r12,1; \
+ LOAD_REG_ADDR(r0,.opal_return); \
+ mtlr r0; \
+ li r0,MSR_DR|MSR_IR; \
+ andc r12,r12,r0; \
+ li r0,token; \
+ mtspr SPRN_HSRR1,r12; \
+ LOAD_REG_ADDR(r11,opal); \
+ ld r12,8(r11); \
+ ld r2,0(r11); \
+ mtspr SPRN_HSRR0,r12; \
+ hrfid
+
+_STATIC(opal_return)
+ ld r2,PACATOC(r13);
+ ld r4,8(r1);
+ ld r5,16(r1);
+ ld r6,PACASAVEDMSR(r13);
+ mtspr SPRN_SRR0,r5;
+ mtspr SPRN_SRR1,r6;
+ mtcr r4;
+ rfid
+
+OPAL_CALL(opal_console_write, OPAL_CONSOLE_WRITE);
+OPAL_CALL(opal_console_read, OPAL_CONSOLE_READ);
+OPAL_CALL(opal_console_write_buffer_space, OPAL_CONSOLE_WRITE_BUFFER_SPACE);
+OPAL_CALL(opal_rtc_read, OPAL_RTC_READ);
+OPAL_CALL(opal_rtc_write, OPAL_RTC_WRITE);
+OPAL_CALL(opal_cec_power_down, OPAL_CEC_POWER_DOWN);
+OPAL_CALL(opal_cec_reboot, OPAL_CEC_REBOOT);
+OPAL_CALL(opal_read_nvram, OPAL_READ_NVRAM);
+OPAL_CALL(opal_write_nvram, OPAL_WRITE_NVRAM);
+OPAL_CALL(opal_handle_interrupt, OPAL_HANDLE_INTERRUPT);
+OPAL_CALL(opal_poll_events, OPAL_POLL_EVENTS);
+OPAL_CALL(opal_pci_set_hub_tce_memory, OPAL_PCI_SET_HUB_TCE_MEMORY);
+OPAL_CALL(opal_pci_set_phb_tce_memory, OPAL_PCI_SET_PHB_TCE_MEMORY);
+OPAL_CALL(opal_pci_config_read_byte, OPAL_PCI_CONFIG_READ_BYTE);
+OPAL_CALL(opal_pci_config_read_half_word, OPAL_PCI_CONFIG_READ_HALF_WORD);
+OPAL_CALL(opal_pci_config_read_word, OPAL_PCI_CONFIG_READ_WORD);
+OPAL_CALL(opal_pci_config_write_byte, OPAL_PCI_CONFIG_WRITE_BYTE);
+OPAL_CALL(opal_pci_config_write_half_word, OPAL_PCI_CONFIG_WRITE_HALF_WORD);
+OPAL_CALL(opal_pci_config_write_word, OPAL_PCI_CONFIG_WRITE_WORD);
+OPAL_CALL(opal_set_xive, OPAL_SET_XIVE);
+OPAL_CALL(opal_get_xive, OPAL_GET_XIVE);
+OPAL_CALL(opal_register_exception_handler, OPAL_REGISTER_OPAL_EXCEPTION_HANDLER);
+OPAL_CALL(opal_pci_eeh_freeze_status, OPAL_PCI_EEH_FREEZE_STATUS);
+OPAL_CALL(opal_pci_eeh_freeze_clear, OPAL_PCI_EEH_FREEZE_CLEAR);
+OPAL_CALL(opal_pci_shpc, OPAL_PCI_SHPC);
+OPAL_CALL(opal_pci_phb_mmio_enable, OPAL_PCI_PHB_MMIO_ENABLE);
+OPAL_CALL(opal_pci_set_phb_mem_window, OPAL_PCI_SET_PHB_MEM_WINDOW);
+OPAL_CALL(opal_pci_map_pe_mmio_window, OPAL_PCI_MAP_PE_MMIO_WINDOW);
+OPAL_CALL(opal_pci_set_phb_table_memory, OPAL_PCI_SET_PHB_TABLE_MEMORY);
+OPAL_CALL(opal_pci_set_pe, OPAL_PCI_SET_PE);
+OPAL_CALL(opal_pci_set_peltv, OPAL_PCI_SET_PELTV);
+OPAL_CALL(opal_pci_set_mve, OPAL_PCI_SET_MVE);
+OPAL_CALL(opal_pci_set_mve_enable, OPAL_PCI_SET_MVE_ENABLE);
+OPAL_CALL(opal_pci_get_xive_reissue, OPAL_PCI_GET_XIVE_REISSUE);
+OPAL_CALL(opal_pci_set_xive_reissue, OPAL_PCI_SET_XIVE_REISSUE);
+OPAL_CALL(opal_pci_set_xive_pe, OPAL_PCI_SET_XIVE_PE);
+OPAL_CALL(opal_get_xive_source, OPAL_GET_XIVE_SOURCE);
+OPAL_CALL(opal_get_msi_32, OPAL_GET_MSI_32);
+OPAL_CALL(opal_get_msi_64, OPAL_GET_MSI_64);
+OPAL_CALL(opal_start_cpu, OPAL_START_CPU);
+OPAL_CALL(opal_query_cpu_status, OPAL_QUERY_CPU_STATUS);
+OPAL_CALL(opal_write_oppanel, OPAL_WRITE_OPPANEL);
+OPAL_CALL(opal_pci_map_pe_dma_window, OPAL_PCI_MAP_PE_DMA_WINDOW);
+OPAL_CALL(opal_pci_map_pe_dma_window_real, OPAL_PCI_MAP_PE_DMA_WINDOW_REAL);
+OPAL_CALL(opal_pci_reset, OPAL_PCI_RESET);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
new file mode 100644
index 000000000000..aaa0dba49471
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -0,0 +1,322 @@
+/*
+ * PowerNV OPAL high level interfaces
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <asm/opal.h>
+#include <asm/firmware.h>
+
+#include "powernv.h"
+
+struct opal {
+ u64 base;
+ u64 entry;
+} opal;
+
+static struct device_node *opal_node;
+static DEFINE_SPINLOCK(opal_write_lock);
+extern u64 opal_mc_secondary_handler[];
+
+int __init early_init_dt_scan_opal(unsigned long node,
+ const char *uname, int depth, void *data)
+{
+ const void *basep, *entryp;
+ unsigned long basesz, entrysz;
+ u64 glue;
+
+ if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
+ return 0;
+
+ basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
+ entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
+
+ if (!basep || !entryp)
+ return 1;
+
+ opal.base = of_read_number(basep, basesz/4);
+ opal.entry = of_read_number(entryp, entrysz/4);
+
+ pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%ld)\n",
+ opal.base, basep, basesz);
+ pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%ld)\n",
+ opal.entry, entryp, entrysz);
+
+ powerpc_firmware_features |= FW_FEATURE_OPAL;
+ if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
+ powerpc_firmware_features |= FW_FEATURE_OPALv2;
+ printk("OPAL V2 detected !\n");
+ } else {
+ printk("OPAL V1 detected !\n");
+ }
+
+ /* Hookup some exception handlers. We use the fwnmi area at 0x7000
+ * to provide the glue space to OPAL
+ */
+ glue = 0x7000;
+ opal_register_exception_handler(OPAL_MACHINE_CHECK_HANDLER,
+ __pa(opal_mc_secondary_handler[0]),
+ glue);
+ glue += 128;
+ opal_register_exception_handler(OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
+ 0, glue);
+ glue += 128;
+ opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
+
+ return 1;
+}
+
+int opal_get_chars(uint32_t vtermno, char *buf, int count)
+{
+ s64 len, rc;
+ u64 evt;
+
+ if (!opal.entry)
+ return -ENODEV;
+ opal_poll_events(&evt);
+ if ((evt & OPAL_EVENT_CONSOLE_INPUT) == 0)
+ return 0;
+ len = count;
+ rc = opal_console_read(vtermno, &len, buf);
+ if (rc == OPAL_SUCCESS)
+ return len;
+ return 0;
+}
+
+int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
+{
+ int written = 0;
+ s64 len, rc;
+ unsigned long flags;
+ u64 evt;
+
+ if (!opal.entry)
+ return -ENODEV;
+
+ /* We want put_chars to be atomic to avoid mangling of hvsi
+ * packets. To do that, we first test for room and return
+ * -EAGAIN if there isn't enough.
+ *
+ * Unfortunately, opal_console_write_buffer_space() doesn't
+ * appear to work on opal v1, so we just assume there is
+ * enough room and be done with it
+ */
+ spin_lock_irqsave(&opal_write_lock, flags);
+ if (firmware_has_feature(FW_FEATURE_OPALv2)) {
+ rc = opal_console_write_buffer_space(vtermno, &len);
+ if (rc || len < total_len) {
+ spin_unlock_irqrestore(&opal_write_lock, flags);
+ /* Closed -> drop characters */
+ if (rc)
+ return total_len;
+ opal_poll_events(&evt);
+ return -EAGAIN;
+ }
+ }
+
+ /* We still try to handle partial completions, though they
+ * should no longer happen.
+ */
+ rc = OPAL_BUSY;
+ while(total_len > 0 && (rc == OPAL_BUSY ||
+ rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
+ len = total_len;
+ rc = opal_console_write(vtermno, &len, data);
+ if (rc == OPAL_SUCCESS) {
+ total_len -= len;
+ data += len;
+ written += len;
+ }
+ /* This is a bit nasty but we need that for the console to
+ * flush when there aren't any interrupts. We will clean
+ * things a bit later to limit that to synchronous path
+ * such as the kernel console and xmon/udbg
+ */
+ do
+ opal_poll_events(&evt);
+ while(rc == OPAL_SUCCESS && (evt & OPAL_EVENT_CONSOLE_OUTPUT));
+ }
+ spin_unlock_irqrestore(&opal_write_lock, flags);
+ return written;
+}
+
+int opal_machine_check(struct pt_regs *regs)
+{
+ struct opal_machine_check_event *opal_evt = get_paca()->opal_mc_evt;
+ struct opal_machine_check_event evt;
+ const char *level, *sevstr, *subtype;
+ static const char *opal_mc_ue_types[] = {
+ "Indeterminate",
+ "Instruction fetch",
+ "Page table walk ifetch",
+ "Load/Store",
+ "Page table walk Load/Store",
+ };
+ static const char *opal_mc_slb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *opal_mc_erat_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+ static const char *opal_mc_tlb_types[] = {
+ "Indeterminate",
+ "Parity",
+ "Multihit",
+ };
+
+ /* Copy the event structure and release the original */
+ evt = *opal_evt;
+ opal_evt->in_use = 0;
+
+ /* Print things out */
+ if (evt.version != OpalMCE_V1) {
+ pr_err("Machine Check Exception, Unknown event version %d !\n",
+ evt.version);
+ return 0;
+ }
+ switch(evt.severity) {
+ case OpalMCE_SEV_NO_ERROR:
+ level = KERN_INFO;
+ sevstr = "Harmless";
+ break;
+ case OpalMCE_SEV_WARNING:
+ level = KERN_WARNING;
+ sevstr = "";
+ break;
+ case OpalMCE_SEV_ERROR_SYNC:
+ level = KERN_ERR;
+ sevstr = "Severe";
+ break;
+ case OpalMCE_SEV_FATAL:
+ default:
+ level = KERN_ERR;
+ sevstr = "Fatal";
+ break;
+ }
+
+ printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
+ evt.disposition == OpalMCE_DISPOSITION_RECOVERED ?
+ "Recovered" : "[Not recovered");
+ printk("%s Initiator: %s\n", level,
+ evt.initiator == OpalMCE_INITIATOR_CPU ? "CPU" : "Unknown");
+ switch(evt.error_type) {
+ case OpalMCE_ERROR_TYPE_UE:
+ subtype = evt.u.ue_error.ue_error_type <
+ ARRAY_SIZE(opal_mc_ue_types) ?
+ opal_mc_ue_types[evt.u.ue_error.ue_error_type]
+ : "Unknown";
+ printk("%s Error type: UE [%s]\n", level, subtype);
+ if (evt.u.ue_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt.u.ue_error.effective_address);
+ if (evt.u.ue_error.physical_address_provided)
+ printk("%s Physial address: %016llx\n",
+ level, evt.u.ue_error.physical_address);
+ break;
+ case OpalMCE_ERROR_TYPE_SLB:
+ subtype = evt.u.slb_error.slb_error_type <
+ ARRAY_SIZE(opal_mc_slb_types) ?
+ opal_mc_slb_types[evt.u.slb_error.slb_error_type]
+ : "Unknown";
+ printk("%s Error type: SLB [%s]\n", level, subtype);
+ if (evt.u.slb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt.u.slb_error.effective_address);
+ break;
+ case OpalMCE_ERROR_TYPE_ERAT:
+ subtype = evt.u.erat_error.erat_error_type <
+ ARRAY_SIZE(opal_mc_erat_types) ?
+ opal_mc_erat_types[evt.u.erat_error.erat_error_type]
+ : "Unknown";
+ printk("%s Error type: ERAT [%s]\n", level, subtype);
+ if (evt.u.erat_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt.u.erat_error.effective_address);
+ break;
+ case OpalMCE_ERROR_TYPE_TLB:
+ subtype = evt.u.tlb_error.tlb_error_type <
+ ARRAY_SIZE(opal_mc_tlb_types) ?
+ opal_mc_tlb_types[evt.u.tlb_error.tlb_error_type]
+ : "Unknown";
+ printk("%s Error type: TLB [%s]\n", level, subtype);
+ if (evt.u.tlb_error.effective_address_provided)
+ printk("%s Effective address: %016llx\n",
+ level, evt.u.tlb_error.effective_address);
+ break;
+ default:
+ case OpalMCE_ERROR_TYPE_UNKNOWN:
+ printk("%s Error type: Unknown\n", level);
+ break;
+ }
+ return evt.severity == OpalMCE_SEV_FATAL ? 0 : 1;
+}
+
+static irqreturn_t opal_interrupt(int irq, void *data)
+{
+ uint64_t events;
+
+ opal_handle_interrupt(virq_to_hw(irq), &events);
+
+ /* XXX TODO: Do something with the events */
+
+ return IRQ_HANDLED;
+}
+
+static int __init opal_init(void)
+{
+ struct device_node *np, *consoles;
+ const u32 *irqs;
+ int rc, i, irqlen;
+
+ opal_node = of_find_node_by_path("/ibm,opal");
+ if (!opal_node) {
+ pr_warn("opal: Node not found\n");
+ return -ENODEV;
+ }
+ if (firmware_has_feature(FW_FEATURE_OPALv2))
+ consoles = of_find_node_by_path("/ibm,opal/consoles");
+ else
+ consoles = of_node_get(opal_node);
+
+ /* Register serial ports */
+ for_each_child_of_node(consoles, np) {
+ if (strcmp(np->name, "serial"))
+ continue;
+ of_platform_device_create(np, NULL, NULL);
+ }
+ of_node_put(consoles);
+
+ /* Find all OPAL interrupts and request them */
+ irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
+ pr_debug("opal: Found %d interrupts reserved for OPAL\n",
+ irqs ? (irqlen / 4) : 0);
+ for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
+ unsigned int hwirq = be32_to_cpup(irqs);
+ unsigned int irq = irq_create_mapping(NULL, hwirq);
+ if (irq == NO_IRQ) {
+ pr_warning("opal: Failed to map irq 0x%x\n", hwirq);
+ continue;
+ }
+ rc = request_irq(irq, opal_interrupt, 0, "opal", NULL);
+ if (rc)
+ pr_warning("opal: Error %d requesting irq %d"
+ " (0x%x)\n", rc, irq, hwirq);
+ }
+ return 0;
+}
+subsys_initcall(opal_init);
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
new file mode 100644
index 000000000000..4c80f7c77d56
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -0,0 +1,234 @@
+/*
+ * Support PCI/PCIe on PowerNV platforms
+ *
+ * Currently supports only P5IOC2
+ *
+ * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/opal.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+/* For now, use a fixed amount of TCE memory for each p5ioc2
+ * hub, 16M will do
+ */
+#define P5IOC2_TCE_MEMORY 0x01000000
+
+#ifdef CONFIG_PCI_MSI
+static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg)
+{
+ if (WARN_ON(!is_64))
+ return -ENXIO;
+ msg->data = hwirq - phb->msi_base;
+ msg->address_hi = 0x10000000;
+ msg->address_lo = 0;
+
+ return 0;
+}
+
+static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
+{
+ unsigned int bmap_size;
+ const __be32 *prop = of_get_property(phb->hose->dn,
+ "ibm,opal-msi-ranges", NULL);
+ if (!prop)
+ return;
+
+ /* Don't do MSI's on p5ioc2 PCI-X are they are not properly
+ * verified in HW
+ */
+ if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix"))
+ return;
+ phb->msi_base = be32_to_cpup(prop);
+ phb->msi_count = be32_to_cpup(prop + 1);
+ bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
+ phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
+ if (!phb->msi_map) {
+ pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
+ phb->hose->global_number);
+ return;
+ }
+ phb->msi_setup = pnv_pci_p5ioc2_msi_setup;
+ phb->msi32_support = 0;
+ pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
+ phb->msi_count, phb->msi_base);
+}
+#else
+static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
+#endif /* CONFIG_PCI_MSI */
+
+static void __devinit pnv_pci_p5ioc2_dma_dev_setup(struct pnv_phb *phb,
+ struct pci_dev *pdev)
+{
+ if (phb->p5ioc2.iommu_table.it_map == NULL)
+ iommu_init_table(&phb->p5ioc2.iommu_table, phb->hose->node);
+
+ set_iommu_table_base(&pdev->dev, &phb->p5ioc2.iommu_table);
+}
+
+static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np,
+ void *tce_mem, u64 tce_size)
+{
+ struct pnv_phb *phb;
+ const u64 *prop64;
+ u64 phb_id;
+ int64_t rc;
+ static int primary = 1;
+
+ pr_info(" Initializing p5ioc2 PHB %s\n", np->full_name);
+
+ prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
+ if (!prop64) {
+ pr_err(" Missing \"ibm,opal-phbid\" property !\n");
+ return;
+ }
+ phb_id = be64_to_cpup(prop64);
+ pr_devel(" PHB-ID : 0x%016llx\n", phb_id);
+ pr_devel(" TCE AT : 0x%016lx\n", __pa(tce_mem));
+ pr_devel(" TCE SZ : 0x%016llx\n", tce_size);
+
+ rc = opal_pci_set_phb_tce_memory(phb_id, __pa(tce_mem), tce_size);
+ if (rc != OPAL_SUCCESS) {
+ pr_err(" Failed to set TCE memory, OPAL error %lld\n", rc);
+ return;
+ }
+
+ phb = alloc_bootmem(sizeof(struct pnv_phb));
+ if (phb) {
+ memset(phb, 0, sizeof(struct pnv_phb));
+ phb->hose = pcibios_alloc_controller(np);
+ }
+ if (!phb || !phb->hose) {
+ pr_err(" Failed to allocate PCI controller\n");
+ return;
+ }
+
+ spin_lock_init(&phb->lock);
+ phb->hose->first_busno = 0;
+ phb->hose->last_busno = 0xff;
+ phb->hose->private_data = phb;
+ phb->opal_id = phb_id;
+ phb->type = PNV_PHB_P5IOC2;
+
+ phb->regs = of_iomap(np, 0);
+
+ if (phb->regs == NULL)
+ pr_err(" Failed to map registers !\n");
+ else {
+ pr_devel(" P_BUID = 0x%08x\n", in_be32(phb->regs + 0x100));
+ pr_devel(" P_IOSZ = 0x%08x\n", in_be32(phb->regs + 0x1b0));
+ pr_devel(" P_IO_ST = 0x%08x\n", in_be32(phb->regs + 0x1e0));
+ pr_devel(" P_MEM1_H = 0x%08x\n", in_be32(phb->regs + 0x1a0));
+ pr_devel(" P_MEM1_L = 0x%08x\n", in_be32(phb->regs + 0x190));
+ pr_devel(" P_MSZ1_L = 0x%08x\n", in_be32(phb->regs + 0x1c0));
+ pr_devel(" P_MEM_ST = 0x%08x\n", in_be32(phb->regs + 0x1d0));
+ pr_devel(" P_MEM2_H = 0x%08x\n", in_be32(phb->regs + 0x2c0));
+ pr_devel(" P_MEM2_L = 0x%08x\n", in_be32(phb->regs + 0x2b0));
+ pr_devel(" P_MSZ2_H = 0x%08x\n", in_be32(phb->regs + 0x2d0));
+ pr_devel(" P_MSZ2_L = 0x%08x\n", in_be32(phb->regs + 0x2e0));
+ }
+
+ /* Interpret the "ranges" property */
+ /* This also maps the I/O region and sets isa_io/mem_base */
+ pci_process_bridge_OF_ranges(phb->hose, np, primary);
+ primary = 0;
+
+ phb->hose->ops = &pnv_pci_ops;
+
+ /* Setup MSI support */
+ pnv_pci_init_p5ioc2_msis(phb);
+
+ /* Setup TCEs */
+ phb->dma_dev_setup = pnv_pci_p5ioc2_dma_dev_setup;
+ pnv_pci_setup_iommu_table(&phb->p5ioc2.iommu_table,
+ tce_mem, tce_size, 0);
+}
+
+void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
+{
+ struct device_node *phbn;
+ const u64 *prop64;
+ u64 hub_id;
+ void *tce_mem;
+ uint64_t tce_per_phb;
+ int64_t rc;
+ int phb_count = 0;
+
+ pr_info("Probing p5ioc2 IO-Hub %s\n", np->full_name);
+
+ prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
+ if (!prop64) {
+ pr_err(" Missing \"ibm,opal-hubid\" property !\n");
+ return;
+ }
+ hub_id = be64_to_cpup(prop64);
+ pr_info(" HUB-ID : 0x%016llx\n", hub_id);
+
+ /* Currently allocate 16M of TCE memory for every Hub
+ *
+ * XXX TODO: Make it chip local if possible
+ */
+ tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY,
+ __pa(MAX_DMA_ADDRESS));
+ if (!tce_mem) {
+ pr_err(" Failed to allocate TCE Memory !\n");
+ return;
+ }
+ pr_debug(" TCE : 0x%016lx..0x%016lx\n",
+ __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1);
+ rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem),
+ P5IOC2_TCE_MEMORY);
+ if (rc != OPAL_SUCCESS) {
+ pr_err(" Failed to allocate TCE memory, OPAL error %lld\n", rc);
+ return;
+ }
+
+ /* Count child PHBs */
+ for_each_child_of_node(np, phbn) {
+ if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
+ of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
+ phb_count++;
+ }
+
+ /* Calculate how much TCE space we can give per PHB */
+ tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
+ pr_info(" Allocating %lld MB of TCE memory per PHB\n",
+ tce_per_phb >> 20);
+
+ /* Initialize PHBs */
+ for_each_child_of_node(np, phbn) {
+ if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
+ of_device_is_compatible(phbn, "ibm,p5ioc2-pciex")) {
+ pnv_pci_init_p5ioc2_phb(phbn, tce_mem, tce_per_phb);
+ tce_mem += tce_per_phb;
+ }
+ }
+}
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
new file mode 100644
index 000000000000..85bb66d7f933
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -0,0 +1,427 @@
+/*
+ * Support PCI/PCIe on PowerNV platforms
+ *
+ * Currently supports only P5IOC2
+ *
+ * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/msi.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/opal.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/abs_addr.h>
+
+#include "powernv.h"
+#include "pci.h"
+
+/* Delay in usec */
+#define PCI_RESET_DELAY_US 3000000
+
+#define cfg_dbg(fmt...) do { } while(0)
+//#define cfg_dbg(fmt...) printk(fmt)
+
+#ifdef CONFIG_PCI_MSI
+static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ return (phb && phb->msi_map) ? 0 : -ENODEV;
+}
+
+static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
+{
+ unsigned int id;
+
+ spin_lock(&phb->lock);
+ id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
+ if (id >= phb->msi_count && phb->msi_next)
+ id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
+ if (id >= phb->msi_count) {
+ spin_unlock(&phb->lock);
+ return 0;
+ }
+ __set_bit(id, phb->msi_map);
+ spin_unlock(&phb->lock);
+ return id + phb->msi_base;
+}
+
+static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
+{
+ unsigned int id;
+
+ if (WARN_ON(hwirq < phb->msi_base ||
+ hwirq >= (phb->msi_base + phb->msi_count)))
+ return;
+ id = hwirq - phb->msi_base;
+ spin_lock(&phb->lock);
+ __clear_bit(id, phb->msi_map);
+ spin_unlock(&phb->lock);
+}
+
+static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+ struct msi_msg msg;
+ unsigned int hwirq, virq;
+ int rc;
+
+ if (WARN_ON(!phb))
+ return -ENODEV;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
+ pr_warn("%s: Supports only 64-bit MSIs\n",
+ pci_name(pdev));
+ return -ENXIO;
+ }
+ hwirq = pnv_get_one_msi(phb);
+ if (!hwirq) {
+ pr_warn("%s: Failed to find a free MSI\n",
+ pci_name(pdev));
+ return -ENOSPC;
+ }
+ virq = irq_create_mapping(NULL, hwirq);
+ if (virq == NO_IRQ) {
+ pr_warn("%s: Failed to map MSI to linux irq\n",
+ pci_name(pdev));
+ pnv_put_msi(phb, hwirq);
+ return -ENOMEM;
+ }
+ rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
+ &msg);
+ if (rc) {
+ pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
+ irq_dispose_mapping(virq);
+ pnv_put_msi(phb, hwirq);
+ return rc;
+ }
+ irq_set_msi_desc(virq, entry);
+ write_msi_msg(virq, &msg);
+ }
+ return 0;
+}
+
+static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+ struct msi_desc *entry;
+
+ if (WARN_ON(!phb))
+ return;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (entry->irq == NO_IRQ)
+ continue;
+ irq_set_msi_desc(entry->irq, NULL);
+ pnv_put_msi(phb, virq_to_hw(entry->irq));
+ irq_dispose_mapping(entry->irq);
+ }
+}
+#endif /* CONFIG_PCI_MSI */
+
+static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
+ u32 bdfn)
+{
+ s64 rc;
+ u8 fstate;
+ u16 pcierr;
+ u32 pe_no;
+
+ /* Get PE# if we support IODA */
+ pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0;
+
+ /* Read freeze status */
+ rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
+ NULL);
+ if (rc) {
+ pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
+ " err %lld\n", phb->hose->global_number, pe_no, rc);
+ return;
+ }
+ cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
+ bdfn, pe_no, fstate);
+ if (fstate != 0) {
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ if (rc) {
+ pr_warning("PCI %d: Failed to clear EEH freeze state"
+ " for PE#%d, err %lld\n",
+ phb->hose->global_number, pe_no, rc);
+ }
+ }
+}
+
+static int pnv_pci_read_config(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 *val)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
+ s64 rc;
+
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1: {
+ u8 v8;
+ rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
+ *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
+ break;
+ }
+ case 2: {
+ u16 v16;
+ rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
+ &v16);
+ *val = (rc == OPAL_SUCCESS) ? v16 : 0xffff;
+ break;
+ }
+ case 4: {
+ u32 v32;
+ rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
+ *val = (rc == OPAL_SUCCESS) ? v32 : 0xffffffff;
+ break;
+ }
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+ cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
+ bus->number, devfn, where, size, *val);
+
+ /* Check if the PHB got frozen due to an error (no response) */
+ pnv_pci_config_check_eeh(phb, bus, bdfn);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int pnv_pci_write_config(struct pci_bus *bus,
+ unsigned int devfn,
+ int where, int size, u32 val)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ struct pnv_phb *phb = hose->private_data;
+ u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
+
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
+ bus->number, devfn, where, size, val);
+ switch (size) {
+ case 1:
+ opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
+ break;
+ case 2:
+ opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
+ break;
+ case 4:
+ opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+ /* Check if the PHB got frozen due to an error (no response) */
+ pnv_pci_config_check_eeh(phb, bus, bdfn);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops pnv_pci_ops = {
+ .read = pnv_pci_read_config,
+ .write = pnv_pci_write_config,
+};
+
+static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ u64 proto_tce;
+ u64 *tcep;
+ u64 rpn;
+
+ proto_tce = TCE_PCI_READ; // Read allowed
+
+ if (direction != DMA_TO_DEVICE)
+ proto_tce |= TCE_PCI_WRITE;
+
+ tcep = ((u64 *)tbl->it_base) + index;
+
+ while (npages--) {
+ /* can't move this out since we might cross LMB boundary */
+ rpn = (virt_to_abs(uaddr)) >> TCE_SHIFT;
+ *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+
+ uaddr += TCE_PAGE_SIZE;
+ tcep++;
+ }
+ return 0;
+}
+
+static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
+{
+ u64 *tcep = ((u64 *)tbl->it_base) + index;
+
+ while (npages--)
+ *(tcep++) = 0;
+}
+
+void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset)
+{
+ tbl->it_blocksize = 16;
+ tbl->it_base = (unsigned long)tce_mem;
+ tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
+ tbl->it_index = 0;
+ tbl->it_size = tce_size >> 3;
+ tbl->it_busno = 0;
+ tbl->it_type = TCE_PCI;
+}
+
+static struct iommu_table * __devinit
+pnv_pci_setup_bml_iommu(struct pci_controller *hose)
+{
+ struct iommu_table *tbl;
+ const __be64 *basep;
+ const __be32 *sizep;
+
+ basep = of_get_property(hose->dn, "linux,tce-base", NULL);
+ sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
+ if (basep == NULL || sizep == NULL) {
+ pr_err("PCI: %s has missing tce entries !\n", hose->dn->full_name);
+ return NULL;
+ }
+ tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
+ if (WARN_ON(!tbl))
+ return NULL;
+ pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
+ be32_to_cpup(sizep), 0);
+ iommu_init_table(tbl, hose->node);
+ return tbl;
+}
+
+static void __devinit pnv_pci_dma_fallback_setup(struct pci_controller *hose,
+ struct pci_dev *pdev)
+{
+ struct device_node *np = pci_bus_to_OF_node(hose->bus);
+ struct pci_dn *pdn;
+
+ if (np == NULL)
+ return;
+ pdn = PCI_DN(np);
+ if (!pdn->iommu_table)
+ pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
+ if (!pdn->iommu_table)
+ return;
+ set_iommu_table_base(&pdev->dev, pdn->iommu_table);
+}
+
+static void __devinit pnv_pci_dma_dev_setup(struct pci_dev *pdev)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ /* If we have no phb structure, try to setup a fallback based on
+ * the device-tree (RTAS PCI for example)
+ */
+ if (phb && phb->dma_dev_setup)
+ phb->dma_dev_setup(phb, pdev);
+ else
+ pnv_pci_dma_fallback_setup(hose, pdev);
+}
+
+static int pnv_pci_probe_mode(struct pci_bus *bus)
+{
+ struct pci_controller *hose = pci_bus_to_host(bus);
+ const __be64 *tstamp;
+ u64 now, target;
+
+
+ /* We hijack this as a way to ensure we have waited long
+ * enough since the reset was lifted on the PCI bus
+ */
+ if (bus != hose->bus)
+ return PCI_PROBE_NORMAL;
+ tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
+ if (!tstamp || !*tstamp)
+ return PCI_PROBE_NORMAL;
+
+ now = mftb() / tb_ticks_per_usec;
+ target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
+ + PCI_RESET_DELAY_US;
+
+ pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
+ hose->global_number, target, now);
+
+ if (now < target)
+ msleep((target - now + 999) / 1000);
+
+ return PCI_PROBE_NORMAL;
+}
+
+void __init pnv_pci_init(void)
+{
+ struct device_node *np;
+
+ pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN);
+
+ /* We do not want to just probe */
+ pci_probe_only = 0;
+
+ /* OPAL absent, try POPAL first then RTAS detection of PHBs */
+ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+#ifdef CONFIG_PPC_POWERNV_RTAS
+ init_pci_config_tokens();
+ find_and_init_phbs();
+#endif /* CONFIG_PPC_POWERNV_RTAS */
+ } else {
+ /* OPAL is here, do our normal stuff */
+
+ /* Look for p5ioc2 IO-Hubs */
+ for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+ pnv_pci_init_p5ioc2_hub(np);
+ }
+
+ /* Setup the linkage between OF nodes and PHBs */
+ pci_devs_phb_init();
+
+ /* Configure IOMMU DMA hooks */
+ ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
+ ppc_md.tce_build = pnv_tce_build;
+ ppc_md.tce_free = pnv_tce_free;
+ ppc_md.pci_probe_mode = pnv_pci_probe_mode;
+ set_pci_dma_ops(&dma_iommu_ops);
+
+ /* Configure MSIs */
+#ifdef CONFIG_PCI_MSI
+ ppc_md.msi_check_device = pnv_msi_check_device;
+ ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
+ ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
+#endif
+}
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
new file mode 100644
index 000000000000..d4dbc4950936
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -0,0 +1,48 @@
+#ifndef __POWERNV_PCI_H
+#define __POWERNV_PCI_H
+
+struct pci_dn;
+
+enum pnv_phb_type {
+ PNV_PHB_P5IOC2,
+ PNV_PHB_IODA1,
+ PNV_PHB_IODA2,
+};
+
+struct pnv_phb {
+ struct pci_controller *hose;
+ enum pnv_phb_type type;
+ u64 opal_id;
+ void __iomem *regs;
+ spinlock_t lock;
+
+#ifdef CONFIG_PCI_MSI
+ unsigned long *msi_map;
+ unsigned int msi_base;
+ unsigned int msi_count;
+ unsigned int msi_next;
+ unsigned int msi32_support;
+#endif
+ int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg);
+ void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+ void (*fixup_phb)(struct pci_controller *hose);
+ u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
+
+ union {
+ struct {
+ struct iommu_table iommu_table;
+ } p5ioc2;
+ };
+};
+
+extern struct pci_ops pnv_pci_ops;
+
+extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
+ void *tce_mem, u64 tce_size,
+ u64 dma_offset);
+extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
+
+
+#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
new file mode 100644
index 000000000000..8a9df7f9667e
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -0,0 +1,16 @@
+#ifndef _POWERNV_H
+#define _POWERNV_H
+
+#ifdef CONFIG_SMP
+extern void pnv_smp_init(void);
+#else
+static inline void pnv_smp_init(void) { }
+#endif
+
+#ifdef CONFIG_PCI
+extern void pnv_pci_init(void);
+#else
+static inline void pnv_pci_init(void) { }
+#endif
+
+#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
new file mode 100644
index 000000000000..467bd4ac6824
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -0,0 +1,196 @@
+/*
+ * PowerNV setup code.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#undef DEBUG
+
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/reboot.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/bug.h>
+
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/xics.h>
+#include <asm/rtas.h>
+#include <asm/opal.h>
+#include <asm/xics.h>
+
+#include "powernv.h"
+
+static void __init pnv_setup_arch(void)
+{
+ /* Initialize SMP */
+ pnv_smp_init();
+
+ /* Setup PCI */
+ pnv_pci_init();
+
+ /* Setup RTC and NVRAM callbacks */
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ opal_nvram_init();
+
+ /* Enable NAP mode */
+ powersave_nap = 1;
+
+ /* XXX PMCS */
+}
+
+static void __init pnv_init_early(void)
+{
+#ifdef CONFIG_HVC_OPAL
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ hvc_opal_init_early();
+ else
+#endif
+ add_preferred_console("hvc", 0, NULL);
+}
+
+static void __init pnv_init_IRQ(void)
+{
+ xics_init();
+
+ WARN_ON(!ppc_md.get_irq);
+}
+
+static void pnv_show_cpuinfo(struct seq_file *m)
+{
+ struct device_node *root;
+ const char *model = "";
+
+ root = of_find_node_by_path("/");
+ if (root)
+ model = of_get_property(root, "model", NULL);
+ seq_printf(m, "machine\t\t: PowerNV %s\n", model);
+ if (firmware_has_feature(FW_FEATURE_OPALv2))
+ seq_printf(m, "firmware\t: OPAL v2\n");
+ else if (firmware_has_feature(FW_FEATURE_OPAL))
+ seq_printf(m, "firmware\t: OPAL v1\n");
+ else
+ seq_printf(m, "firmware\t: BML\n");
+ of_node_put(root);
+}
+
+static void __noreturn pnv_restart(char *cmd)
+{
+ long rc = OPAL_BUSY;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_cec_reboot();
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
+ for (;;)
+ opal_poll_events(NULL);
+}
+
+static void __noreturn pnv_power_off(void)
+{
+ long rc = OPAL_BUSY;
+
+ while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ rc = opal_cec_power_down(0);
+ if (rc == OPAL_BUSY_EVENT)
+ opal_poll_events(NULL);
+ else
+ mdelay(10);
+ }
+ for (;;)
+ opal_poll_events(NULL);
+}
+
+static void __noreturn pnv_halt(void)
+{
+ pnv_power_off();
+}
+
+static void pnv_progress(char *s, unsigned short hex)
+{
+}
+
+#ifdef CONFIG_KEXEC
+static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
+{
+ xics_kexec_teardown_cpu(secondary);
+}
+#endif /* CONFIG_KEXEC */
+
+static void __init pnv_setup_machdep_opal(void)
+{
+ ppc_md.get_boot_time = opal_get_boot_time;
+ ppc_md.get_rtc_time = opal_get_rtc_time;
+ ppc_md.set_rtc_time = opal_set_rtc_time;
+ ppc_md.restart = pnv_restart;
+ ppc_md.power_off = pnv_power_off;
+ ppc_md.halt = pnv_halt;
+ ppc_md.machine_check_exception = opal_machine_check;
+}
+
+#ifdef CONFIG_PPC_POWERNV_RTAS
+static void __init pnv_setup_machdep_rtas(void)
+{
+ if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
+ ppc_md.get_boot_time = rtas_get_boot_time;
+ ppc_md.get_rtc_time = rtas_get_rtc_time;
+ ppc_md.set_rtc_time = rtas_set_rtc_time;
+ }
+ ppc_md.restart = rtas_restart;
+ ppc_md.power_off = rtas_power_off;
+ ppc_md.halt = rtas_halt;
+}
+#endif /* CONFIG_PPC_POWERNV_RTAS */
+
+static int __init pnv_probe(void)
+{
+ unsigned long root = of_get_flat_dt_root();
+
+ if (!of_flat_dt_is_compatible(root, "ibm,powernv"))
+ return 0;
+
+ hpte_init_native();
+
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ pnv_setup_machdep_opal();
+#ifdef CONFIG_PPC_POWERNV_RTAS
+ else if (rtas.base)
+ pnv_setup_machdep_rtas();
+#endif /* CONFIG_PPC_POWERNV_RTAS */
+
+ pr_debug("PowerNV detected !\n");
+
+ return 1;
+}
+
+define_machine(powernv) {
+ .name = "PowerNV",
+ .probe = pnv_probe,
+ .init_early = pnv_init_early,
+ .setup_arch = pnv_setup_arch,
+ .init_IRQ = pnv_init_IRQ,
+ .show_cpuinfo = pnv_show_cpuinfo,
+ .progress = pnv_progress,
+ .power_save = power7_idle,
+ .calibrate_decr = generic_calibrate_decr,
+#ifdef CONFIG_KEXEC
+ .kexec_cpu_down = pnv_kexec_cpu_down,
+#endif
+};
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
new file mode 100644
index 000000000000..e87736685243
--- /dev/null
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -0,0 +1,182 @@
+/*
+ * SMP support for PowerNV machines.
+ *
+ * Copyright 2011 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+
+#include <asm/irq.h>
+#include <asm/smp.h>
+#include <asm/paca.h>
+#include <asm/machdep.h>
+#include <asm/cputable.h>
+#include <asm/firmware.h>
+#include <asm/system.h>
+#include <asm/rtas.h>
+#include <asm/vdso_datapage.h>
+#include <asm/cputhreads.h>
+#include <asm/xics.h>
+#include <asm/opal.h>
+
+#include "powernv.h"
+
+#ifdef DEBUG
+#include <asm/udbg.h>
+#define DBG(fmt...) udbg_printf(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
+static void __cpuinit pnv_smp_setup_cpu(int cpu)
+{
+ if (cpu != boot_cpuid)
+ xics_setup_cpu();
+}
+
+static int pnv_smp_cpu_bootable(unsigned int nr)
+{
+ /* Special case - we inhibit secondary thread startup
+ * during boot if the user requests it.
+ */
+ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+ if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ return 0;
+ if (smt_enabled_at_boot
+ && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
+ return 0;
+ }
+
+ return 1;
+}
+
+int __devinit pnv_smp_kick_cpu(int nr)
+{
+ unsigned int pcpu = get_hard_smp_processor_id(nr);
+ unsigned long start_here = __pa(*((unsigned long *)
+ generic_secondary_smp_init));
+ long rc;
+
+ BUG_ON(nr < 0 || nr >= NR_CPUS);
+
+ /* On OPAL v2 the CPU are still spinning inside OPAL itself,
+ * get them back now
+ */
+ if (firmware_has_feature(FW_FEATURE_OPALv2)) {
+ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
+ rc = opal_start_cpu(pcpu, start_here);
+ if (rc != OPAL_SUCCESS)
+ pr_warn("OPAL Error %ld starting CPU %d\n",
+ rc, nr);
+ }
+ return smp_generic_kick_cpu(nr);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int pnv_smp_cpu_disable(void)
+{
+ int cpu = smp_processor_id();
+
+ /* This is identical to pSeries... might consolidate by
+ * moving migrate_irqs_away to a ppc_md with default to
+ * the generic fixup_irqs. --BenH.
+ */
+ set_cpu_online(cpu, false);
+ vdso_data->processorCount--;
+ if (cpu == boot_cpuid)
+ boot_cpuid = cpumask_any(cpu_online_mask);
+ xics_migrate_irqs_away();
+ return 0;
+}
+
+static void pnv_smp_cpu_kill_self(void)
+{
+ unsigned int cpu;
+
+ /* If powersave_nap is enabled, use NAP mode, else just
+ * spin aimlessly
+ */
+ if (!powersave_nap) {
+ generic_mach_cpu_die();
+ return;
+ }
+
+ /* Standard hot unplug procedure */
+ local_irq_disable();
+ idle_task_exit();
+ current->active_mm = NULL; /* for sanity */
+ cpu = smp_processor_id();
+ DBG("CPU%d offline\n", cpu);
+ generic_set_cpu_dead(cpu);
+ smp_wmb();
+
+ /* We don't want to take decrementer interrupts while we are offline,
+ * so clear LPCR:PECE1. We keep PECE2 enabled.
+ */
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
+ while (!generic_check_cpu_restart(cpu)) {
+ power7_idle();
+ if (!generic_check_cpu_restart(cpu)) {
+ DBG("CPU%d Unexpected exit while offline !\n", cpu);
+ /* We may be getting an IPI, so we re-enable
+ * interrupts to process it, it will be ignored
+ * since we aren't online (hopefully)
+ */
+ local_irq_enable();
+ local_irq_disable();
+ }
+ }
+ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
+ DBG("CPU%d coming online...\n", cpu);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static struct smp_ops_t pnv_smp_ops = {
+ .message_pass = smp_muxed_ipi_message_pass,
+ .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
+ .probe = xics_smp_probe,
+ .kick_cpu = pnv_smp_kick_cpu,
+ .setup_cpu = pnv_smp_setup_cpu,
+ .cpu_bootable = pnv_smp_cpu_bootable,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = pnv_smp_cpu_disable,
+ .cpu_die = generic_cpu_die,
+#endif /* CONFIG_HOTPLUG_CPU */
+};
+
+/* This is called very early during platform setup_arch */
+void __init pnv_smp_init(void)
+{
+ smp_ops = &pnv_smp_ops;
+
+ /* XXX We don't yet have a proper entry point from HAL, for
+ * now we rely on kexec-style entry from BML
+ */
+
+#ifdef CONFIG_PPC_RTAS
+ /* Non-lpar has additional take/give timebase */
+ if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
+ smp_ops->give_timebase = rtas_give_timebase;
+ smp_ops->take_timebase = rtas_take_timebase;
+ }
+#endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_HOTPLUG_CPU
+ ppc_md.cpu_die = pnv_smp_cpu_kill_self;
+#endif
+}
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index dfe316b161a9..476d9d9b2405 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -148,4 +148,16 @@ config PS3_LPM
profiling support of the Cell processor with programs like
oprofile and perfmon2, then say Y or M, otherwise say N.
+config PS3GELIC_UDBG
+ bool "PS3 udbg output via UDP broadcasts on Ethernet"
+ depends on PPC_PS3
+ help
+ Enables udbg early debugging output by sending broadcast UDP
+ via the Ethernet port (UDP port number 18194).
+
+ This driver uses a trivial implementation and is independent
+ from the main network driver.
+
+ If in doubt, say N here.
+
endmenu
diff --git a/arch/powerpc/platforms/ps3/Makefile b/arch/powerpc/platforms/ps3/Makefile
index ac1bdf844eca..02b9e636dab7 100644
--- a/arch/powerpc/platforms/ps3/Makefile
+++ b/arch/powerpc/platforms/ps3/Makefile
@@ -2,6 +2,7 @@ obj-y += setup.o mm.o time.o hvcall.o htab.o repository.o
obj-y += interrupt.o exports.o os-area.o
obj-y += system-bus.o
+obj-$(CONFIG_PS3GELIC_UDBG) += gelic_udbg.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SPU_BASE) += spu.o
obj-y += device-init.o
diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c
new file mode 100644
index 000000000000..20b46a19a48f
--- /dev/null
+++ b/arch/powerpc/platforms/ps3/gelic_udbg.c
@@ -0,0 +1,273 @@
+/*
+ * udbg debug output routine via GELIC UDP broadcasts
+ *
+ * Copyright (C) 2007 Sony Computer Entertainment Inc.
+ * Copyright 2006, 2007 Sony Corporation
+ * Copyright (C) 2010 Hector Martin <hector@marcansoft.com>
+ * Copyright (C) 2011 Andre Heider <a.heider@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ */
+
+#include <asm/io.h>
+#include <asm/udbg.h>
+#include <asm/lv1call.h>
+
+#define GELIC_BUS_ID 1
+#define GELIC_DEVICE_ID 0
+#define GELIC_DEBUG_PORT 18194
+#define GELIC_MAX_MESSAGE_SIZE 1000
+
+#define GELIC_LV1_GET_MAC_ADDRESS 1
+#define GELIC_LV1_GET_VLAN_ID 4
+#define GELIC_LV1_VLAN_TX_ETHERNET_0 2
+
+#define GELIC_DESCR_DMA_STAT_MASK 0xf0000000
+#define GELIC_DESCR_DMA_CARDOWNED 0xa0000000
+
+#define GELIC_DESCR_TX_DMA_IKE 0x00080000
+#define GELIC_DESCR_TX_DMA_NO_CHKSUM 0x00000000
+#define GELIC_DESCR_TX_DMA_FRAME_TAIL 0x00040000
+
+#define GELIC_DESCR_DMA_CMD_NO_CHKSUM (GELIC_DESCR_DMA_CARDOWNED | \
+ GELIC_DESCR_TX_DMA_IKE | \
+ GELIC_DESCR_TX_DMA_NO_CHKSUM)
+
+static u64 bus_addr;
+
+struct gelic_descr {
+ /* as defined by the hardware */
+ __be32 buf_addr;
+ __be32 buf_size;
+ __be32 next_descr_addr;
+ __be32 dmac_cmd_status;
+ __be32 result_size;
+ __be32 valid_size; /* all zeroes for tx */
+ __be32 data_status;
+ __be32 data_error; /* all zeroes for tx */
+} __attribute__((aligned(32)));
+
+struct debug_block {
+ struct gelic_descr descr;
+ u8 pkt[1520];
+} __packed;
+
+struct ethhdr {
+ u8 dest[6];
+ u8 src[6];
+ u16 type;
+} __packed;
+
+struct vlantag {
+ u16 vlan;
+ u16 subtype;
+} __packed;
+
+struct iphdr {
+ u8 ver_len;
+ u8 dscp_ecn;
+ u16 total_length;
+ u16 ident;
+ u16 frag_off_flags;
+ u8 ttl;
+ u8 proto;
+ u16 checksum;
+ u32 src;
+ u32 dest;
+} __packed;
+
+struct udphdr {
+ u16 src;
+ u16 dest;
+ u16 len;
+ u16 checksum;
+} __packed;
+
+static __iomem struct ethhdr *h_eth;
+static __iomem struct vlantag *h_vlan;
+static __iomem struct iphdr *h_ip;
+static __iomem struct udphdr *h_udp;
+
+static __iomem char *pmsg;
+static __iomem char *pmsgc;
+
+static __iomem struct debug_block dbg __attribute__((aligned(32)));
+
+static int header_size;
+
+static void map_dma_mem(int bus_id, int dev_id, void *start, size_t len,
+ u64 *real_bus_addr)
+{
+ s64 result;
+ u64 real_addr = ((u64)start) & 0x0fffffffffffffffUL;
+ u64 real_end = real_addr + len;
+ u64 map_start = real_addr & ~0xfff;
+ u64 map_end = (real_end + 0xfff) & ~0xfff;
+ u64 bus_addr = 0;
+
+ u64 flags = 0xf800000000000000UL;
+
+ result = lv1_allocate_device_dma_region(bus_id, dev_id,
+ map_end - map_start, 12, 0,
+ &bus_addr);
+ if (result)
+ lv1_panic(0);
+
+ result = lv1_map_device_dma_region(bus_id, dev_id, map_start,
+ bus_addr, map_end - map_start,
+ flags);
+ if (result)
+ lv1_panic(0);
+
+ *real_bus_addr = bus_addr + real_addr - map_start;
+}
+
+static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
+{
+ s64 result;
+ u64 real_bus_addr;
+
+ real_bus_addr = bus_addr & ~0xfff;
+ len += bus_addr - real_bus_addr;
+ len = (len + 0xfff) & ~0xfff;
+
+ result = lv1_unmap_device_dma_region(bus_id, dev_id, real_bus_addr,
+ len);
+ if (result)
+ return result;
+
+ return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
+}
+
+static void gelic_debug_init(void)
+{
+ s64 result;
+ u64 v2;
+ u64 mac;
+ u64 vlan_id;
+
+ result = lv1_open_device(GELIC_BUS_ID, GELIC_DEVICE_ID, 0);
+ if (result)
+ lv1_panic(0);
+
+ map_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID, &dbg, sizeof(dbg),
+ &bus_addr);
+
+ memset(&dbg, 0, sizeof(dbg));
+
+ dbg.descr.buf_addr = bus_addr + offsetof(struct debug_block, pkt);
+
+ wmb();
+
+ result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
+ GELIC_LV1_GET_MAC_ADDRESS, 0, 0, 0,
+ &mac, &v2);
+ if (result)
+ lv1_panic(0);
+
+ mac <<= 16;
+
+ h_eth = (struct ethhdr *)dbg.pkt;
+
+ memset(&h_eth->dest, 0xff, 6);
+ memcpy(&h_eth->src, &mac, 6);
+
+ header_size = sizeof(struct ethhdr);
+
+ result = lv1_net_control(GELIC_BUS_ID, GELIC_DEVICE_ID,
+ GELIC_LV1_GET_VLAN_ID,
+ GELIC_LV1_VLAN_TX_ETHERNET_0, 0, 0,
+ &vlan_id, &v2);
+ if (!result) {
+ h_eth->type = 0x8100;
+
+ header_size += sizeof(struct vlantag);
+ h_vlan = (struct vlantag *)(h_eth + 1);
+ h_vlan->vlan = vlan_id;
+ h_vlan->subtype = 0x0800;
+ h_ip = (struct iphdr *)(h_vlan + 1);
+ } else {
+ h_eth->type = 0x0800;
+ h_ip = (struct iphdr *)(h_eth + 1);
+ }
+
+ header_size += sizeof(struct iphdr);
+ h_ip->ver_len = 0x45;
+ h_ip->ttl = 10;
+ h_ip->proto = 0x11;
+ h_ip->src = 0x00000000;
+ h_ip->dest = 0xffffffff;
+
+ header_size += sizeof(struct udphdr);
+ h_udp = (struct udphdr *)(h_ip + 1);
+ h_udp->src = GELIC_DEBUG_PORT;
+ h_udp->dest = GELIC_DEBUG_PORT;
+
+ pmsgc = pmsg = (char *)(h_udp + 1);
+}
+
+static void gelic_debug_shutdown(void)
+{
+ if (bus_addr)
+ unmap_dma_mem(GELIC_BUS_ID, GELIC_DEVICE_ID,
+ bus_addr, sizeof(dbg));
+ lv1_close_device(GELIC_BUS_ID, GELIC_DEVICE_ID);
+}
+
+static void gelic_sendbuf(int msgsize)
+{
+ u16 *p;
+ u32 sum;
+ int i;
+
+ dbg.descr.buf_size = header_size + msgsize;
+ h_ip->total_length = msgsize + sizeof(struct udphdr) +
+ sizeof(struct iphdr);
+ h_udp->len = msgsize + sizeof(struct udphdr);
+
+ h_ip->checksum = 0;
+ sum = 0;
+ p = (u16 *)h_ip;
+ for (i = 0; i < 5; i++)
+ sum += *p++;
+ h_ip->checksum = ~(sum + (sum >> 16));
+
+ dbg.descr.dmac_cmd_status = GELIC_DESCR_DMA_CMD_NO_CHKSUM |
+ GELIC_DESCR_TX_DMA_FRAME_TAIL;
+ dbg.descr.result_size = 0;
+ dbg.descr.data_status = 0;
+
+ wmb();
+
+ lv1_net_start_tx_dma(GELIC_BUS_ID, GELIC_DEVICE_ID, bus_addr, 0);
+
+ while ((dbg.descr.dmac_cmd_status & GELIC_DESCR_DMA_STAT_MASK) ==
+ GELIC_DESCR_DMA_CARDOWNED)
+ cpu_relax();
+}
+
+static void ps3gelic_udbg_putc(char ch)
+{
+ *pmsgc++ = ch;
+ if (ch == '\n' || (pmsgc-pmsg) >= GELIC_MAX_MESSAGE_SIZE) {
+ gelic_sendbuf(pmsgc-pmsg);
+ pmsgc = pmsg;
+ }
+}
+
+void __init udbg_init_ps3gelic(void)
+{
+ gelic_debug_init();
+ udbg_putc = ps3gelic_udbg_putc;
+}
+
+void udbg_shutdown_ps3gelic(void)
+{
+ udbg_putc = NULL;
+ gelic_debug_shutdown();
+}
+EXPORT_SYMBOL(udbg_shutdown_ps3gelic);
diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
index 23083c397528..688141c76e03 100644
--- a/arch/powerpc/platforms/ps3/system-bus.c
+++ b/arch/powerpc/platforms/ps3/system-bus.c
@@ -695,12 +695,18 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
return mask >= DMA_BIT_MASK(32);
}
+static u64 ps3_dma_get_required_mask(struct device *_dev)
+{
+ return DMA_BIT_MASK(32);
+}
+
static struct dma_map_ops ps3_sb_dma_ops = {
.alloc_coherent = ps3_alloc_coherent,
.free_coherent = ps3_free_coherent,
.map_sg = ps3_sb_map_sg,
.unmap_sg = ps3_sb_unmap_sg,
.dma_supported = ps3_dma_supported,
+ .get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_sb_map_page,
.unmap_page = ps3_unmap_page,
};
@@ -711,6 +717,7 @@ static struct dma_map_ops ps3_ioc0_dma_ops = {
.map_sg = ps3_ioc0_map_sg,
.unmap_sg = ps3_ioc0_unmap_sg,
.dma_supported = ps3_dma_supported,
+ .get_required_mask = ps3_dma_get_required_mask,
.map_page = ps3_ioc0_map_page,
.unmap_page = ps3_unmap_page,
};
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 05cf4769b88c..c81f6bb9c10f 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -15,6 +15,7 @@ config PPC_PSERIES
select PPC_UDBG_16550
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
+ select ZLIB_DEFLATE
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e9be25bc571b..0f1b706506ed 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -112,6 +112,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
dlpar_free_one_cc_node(dn);
}
+#define COMPLETE 0
#define NEXT_SIBLING 1
#define NEXT_CHILD 2
#define NEXT_PROPERTY 3
@@ -158,6 +159,9 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
spin_unlock(&rtas_data_buf_lock);
switch (rc) {
+ case COMPLETE:
+ break;
+
case NEXT_SIBLING:
dn = dlpar_parse_cc_node(ccwa);
if (!dn)
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ada6e07532ec..d42f37d8a440 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1338,7 +1338,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void)
{
if (machine_is(pseries))
- proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
+ proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}
__initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 01faab9456ca..5905a3b9f7e6 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -939,14 +939,14 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ret) {
dev_info(&dev->dev, "failed to map direct window for %s: %d\n",
dn->full_name, ret);
- goto out_clear_window;
+ goto out_free_window;
}
ret = prom_add_property(pdn, win64);
if (ret) {
dev_err(&dev->dev, "unable to add dma window property for %s: %d",
pdn->full_name, ret);
- goto out_clear_window;
+ goto out_free_window;
}
window->device = pdn;
@@ -958,6 +958,9 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
dma_addr = of_read_number(&create.addr_hi, 2);
goto out_unlock;
+out_free_window:
+ kfree(window);
+
out_clear_window:
remove_ddw(pdn);
@@ -1077,12 +1080,38 @@ check_mask:
return 0;
}
+static u64 dma_get_required_mask_pSeriesLP(struct device *dev)
+{
+ if (!dev->dma_mask)
+ return 0;
+
+ if (!disable_ddw && dev_is_pci(dev)) {
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct device_node *dn;
+
+ dn = pci_device_to_OF_node(pdev);
+
+ /* search upwards for ibm,dma-window */
+ for (; dn && PCI_DN(dn) && !PCI_DN(dn)->iommu_table;
+ dn = dn->parent)
+ if (of_get_property(dn, "ibm,dma-window", NULL))
+ break;
+ /* if there is a ibm,ddw-applicable property require 64 bits */
+ if (dn && PCI_DN(dn) &&
+ of_get_property(dn, "ibm,ddw-applicable", NULL))
+ return DMA_BIT_MASK(64);
+ }
+
+ return dma_iommu_ops.get_required_mask(dev);
+}
+
#else /* CONFIG_PCI */
#define pci_dma_bus_setup_pSeries NULL
#define pci_dma_dev_setup_pSeries NULL
#define pci_dma_bus_setup_pSeriesLP NULL
#define pci_dma_dev_setup_pSeriesLP NULL
#define dma_set_mask_pSeriesLP NULL
+#define dma_get_required_mask_pSeriesLP NULL
#endif /* !CONFIG_PCI */
static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
@@ -1186,6 +1215,7 @@ void iommu_init_early_pSeries(void)
ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
+ ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 00cc3a094885..a76b22844d18 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -18,6 +18,8 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/kmsg_dump.h>
+#include <linux/ctype.h>
+#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -78,8 +80,41 @@ static struct kmsg_dumper nvram_kmsg_dumper = {
#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
static unsigned long last_unread_rtas_event; /* timestamp */
-/* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */
-static char *oops_buf;
+/*
+ * For capturing and compressing an oops or panic report...
+
+ * big_oops_buf[] holds the uncompressed text we're capturing.
+ *
+ * oops_buf[] holds the compressed text, preceded by a prefix.
+ * The prefix is just a u16 holding the length of the compressed* text.
+ * (*Or uncompressed, if compression fails.) oops_buf[] gets written
+ * to NVRAM.
+ *
+ * oops_len points to the prefix. oops_data points to the compressed text.
+ *
+ * +- oops_buf
+ * | +- oops_data
+ * v v
+ * +------------+-----------------------------------------------+
+ * | length | text |
+ * | (2 bytes) | (oops_data_sz bytes) |
+ * +------------+-----------------------------------------------+
+ * ^
+ * +- oops_len
+ *
+ * We preallocate these buffers during init to avoid kmalloc during oops/panic.
+ */
+static size_t big_oops_buf_sz;
+static char *big_oops_buf, *oops_buf;
+static u16 *oops_len;
+static char *oops_data;
+static size_t oops_data_sz;
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
{
@@ -387,11 +422,44 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
sizeof(rtas_log_partition));
}
oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
+ if (!oops_buf) {
+ pr_err("nvram: No memory for %s partition\n",
+ oops_log_partition.name);
+ return;
+ }
+ oops_len = (u16*) oops_buf;
+ oops_data = oops_buf + sizeof(u16);
+ oops_data_sz = oops_log_partition.size - sizeof(u16);
+
+ /*
+ * Figure compression (preceded by elimination of each line's <n>
+ * severity prefix) will reduce the oops/panic report to at most
+ * 45% of its original size.
+ */
+ big_oops_buf_sz = (oops_data_sz * 100) / 45;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("nvram: No memory for compression workspace; "
+ "skipping compression of %s partition data\n",
+ oops_log_partition.name);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed %s data; "
+ "skipping compression\n", oops_log_partition.name);
+ stream.workspace = NULL;
+ }
+
rc = kmsg_dump_register(&nvram_kmsg_dumper);
if (rc != 0) {
pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
kfree(oops_buf);
- return;
+ kfree(big_oops_buf);
+ kfree(stream.workspace);
}
}
@@ -473,7 +541,83 @@ static int clobbering_unread_rtas_event(void)
NVRAM_RTAS_READ_TIMEOUT);
}
-/* our kmsg_dump callback */
+/* Squeeze out each line's <n> severity prefix. */
+static size_t elide_severities(char *buf, size_t len)
+{
+ char *in, *out, *buf_end = buf + len;
+ /* Assume a <n> at the very beginning marks the start of a line. */
+ int newline = 1;
+
+ in = out = buf;
+ while (in < buf_end) {
+ if (newline && in+3 <= buf_end &&
+ *in == '<' && isdigit(in[1]) && in[2] == '>') {
+ in += 3;
+ newline = 0;
+ } else {
+ newline = (*in == '\n');
+ *out++ = *in++;
+ }
+ }
+ return out - buf;
+}
+
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+ int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+ oops_data_sz);
+ if (zipped_len < 0) {
+ pr_err("nvram: compression failed; returned %d\n", zipped_len);
+ pr_err("nvram: logging uncompressed oops/panic report\n");
+ return -1;
+ }
+ *oops_len = (u16) zipped_len;
+ return 0;
+}
+
+/*
+ * This is our kmsg_dump callback, called after an oops or panic report
+ * has been written to the printk buffer. We want to capture as much
+ * of the printk buffer as possible. First, capture as much as we can
+ * that we think will compress sufficiently to fit in the lnx,oops-log
+ * partition. If that's too much, go back and capture uncompressed text.
+ */
static void oops_to_nvram(struct kmsg_dumper *dumper,
enum kmsg_dump_reason reason,
const char *old_msgs, unsigned long old_len,
@@ -482,6 +626,8 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
static unsigned int oops_count = 0;
static bool panicking = false;
size_t text_len;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+ int rc = -1;
switch (reason) {
case KMSG_DUMP_RESTART:
@@ -509,8 +655,19 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
if (clobbering_unread_rtas_event())
return;
- text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len,
- oops_buf, oops_log_partition.size);
+ if (big_oops_buf) {
+ text_len = capture_last_msgs(old_msgs, old_len,
+ new_msgs, new_len, big_oops_buf, big_oops_buf_sz);
+ text_len = elide_severities(big_oops_buf, text_len);
+ rc = zip_oops(text_len);
+ }
+ if (rc != 0) {
+ text_len = capture_last_msgs(old_msgs, old_len,
+ new_msgs, new_len, oops_data, oops_data_sz);
+ err_type = ERR_TYPE_KERNEL_PANIC;
+ *oops_len = (u16) text_len;
+ }
+
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count);
+ (int) (sizeof(*oops_len) + *oops_len), err_type, ++oops_count);
}
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index c3c48eb62cc1..f4fb837873fb 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -1,5 +1,12 @@
config PPC_WSP
bool
+ select PPC_A2
+ select PPC_SCOM
+ select PPC_XICS
+ select PPC_ICP_NATIVE
+ select PCI
+ select PPC_IO_WORKAROUNDS if PCI
+ select PPC_INDIRECT_PIO if PCI
default n
menu "WSP platform selection"
@@ -7,13 +14,9 @@ menu "WSP platform selection"
config PPC_PSR2
bool "PSR-2 platform"
- select PPC_A2
select GENERIC_TBSYNC
- select PPC_SCOM
select EPAPR_BOOT
select PPC_WSP
- select PPC_XICS
- select PPC_ICP_NATIVE
default y
endmenu
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
index 095be73d6cd4..a1486b436f02 100644
--- a/arch/powerpc/platforms/wsp/Makefile
+++ b/arch/powerpc/platforms/wsp/Makefile
@@ -4,3 +4,5 @@ obj-y += setup.o ics.o
obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o
obj-$(CONFIG_PPC_WSP) += scom_wsp.o
obj-$(CONFIG_SMP) += smp.o scom_smp.o
+obj-$(CONFIG_PCI) += wsp_pci.o
+obj-$(CONFIG_PCI_MSI) += msi.o \ No newline at end of file
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index e53bd9e7b125..576874392543 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -710,3 +710,51 @@ void __init wsp_init_irq(void)
/* We need to patch our irq chip's EOI to point to the right ICP */
wsp_irq_chip.irq_eoi = icp_ops->eoi;
}
+
+#ifdef CONFIG_PCI_MSI
+static void wsp_ics_msi_unmask_irq(struct irq_data *d)
+{
+ wsp_chip_unmask_irq(d);
+ unmask_msi_irq(d);
+}
+
+static unsigned int wsp_ics_msi_startup(struct irq_data *d)
+{
+ wsp_ics_msi_unmask_irq(d);
+ return 0;
+}
+
+static void wsp_ics_msi_mask_irq(struct irq_data *d)
+{
+ mask_msi_irq(d);
+ wsp_chip_mask_irq(d);
+}
+
+/*
+ * we do it this way because we reassinge default EOI handling in
+ * irq_init() above
+ */
+static void wsp_ics_eoi(struct irq_data *data)
+{
+ wsp_irq_chip.irq_eoi(data);
+}
+
+static struct irq_chip wsp_ics_msi = {
+ .name = "WSP ICS MSI",
+ .irq_startup = wsp_ics_msi_startup,
+ .irq_mask = wsp_ics_msi_mask_irq,
+ .irq_unmask = wsp_ics_msi_unmask_irq,
+ .irq_eoi = wsp_ics_eoi,
+ .irq_set_affinity = wsp_chip_set_affinity
+};
+
+void wsp_ics_set_msi_chip(unsigned int irq)
+{
+ irq_set_chip(irq, &wsp_ics_msi);
+}
+
+void wsp_ics_set_std_chip(unsigned int irq)
+{
+ irq_set_chip(irq, &wsp_irq_chip);
+}
+#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
index e34d53102640..07b644e0cf97 100644
--- a/arch/powerpc/platforms/wsp/ics.h
+++ b/arch/powerpc/platforms/wsp/ics.h
@@ -17,4 +17,9 @@ extern void wsp_init_irq(void);
extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
+#ifdef CONFIG_PCI_MSI
+extern void wsp_ics_set_msi_chip(unsigned int irq);
+extern void wsp_ics_set_std_chip(unsigned int irq);
+#endif /* CONFIG_PCI_MSI */
+
#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/msi.c b/arch/powerpc/platforms/wsp/msi.c
new file mode 100644
index 000000000000..380882f27add
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/msi.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2011 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include "msi.h"
+#include "ics.h"
+#include "wsp_pci.h"
+
+/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
+#define MSI_ADDR_32 0xFFFF0000ul
+#define MSI_ADDR_64 0x1000000000000000ul
+
+int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct pci_controller *phb;
+ struct msi_desc *entry;
+ struct msi_msg msg;
+ unsigned int virq;
+ int hwirq;
+
+ phb = pci_bus_to_host(dev->bus);
+ if (!phb)
+ return -ENOENT;
+
+ entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
+ if (entry->msi_attrib.is_64) {
+ msg.address_lo = 0;
+ msg.address_hi = MSI_ADDR_64 >> 32;
+ } else {
+ msg.address_lo = MSI_ADDR_32;
+ msg.address_hi = 0;
+ }
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ hwirq = wsp_ics_alloc_irq(phb->dn, 1);
+ if (hwirq < 0) {
+ dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
+ return hwirq;
+ }
+
+ virq = irq_create_mapping(NULL, hwirq);
+ if (virq == NO_IRQ) {
+ dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
+ return -1;
+ }
+
+ dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
+ hwirq, virq);
+
+ wsp_ics_set_msi_chip(virq);
+ irq_set_msi_desc(virq, entry);
+ msg.data = hwirq & XIVE_ADDR_MASK;
+ write_msi_msg(virq, &msg);
+ }
+
+ return 0;
+}
+
+void wsp_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct pci_controller *phb;
+ struct msi_desc *entry;
+ int hwirq;
+
+ phb = pci_bus_to_host(dev->bus);
+
+ dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq == NO_IRQ)
+ continue;
+
+ irq_set_msi_desc(entry->irq, NULL);
+ wsp_ics_set_std_chip(entry->irq);
+
+ hwirq = virq_to_hw(entry->irq);
+ /* In this order to avoid racing with irq_create_mapping() */
+ irq_dispose_mapping(entry->irq);
+ wsp_ics_free_irq(phb->dn, hwirq);
+ }
+}
+
+void wsp_setup_phb_msi(struct pci_controller *phb)
+{
+ /* Create a single MVE at offset 0 that matches everything */
+ out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
+ out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
+
+ ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
+ ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
+}
diff --git a/arch/powerpc/platforms/wsp/msi.h b/arch/powerpc/platforms/wsp/msi.h
new file mode 100644
index 000000000000..0ab27b71b24d
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/msi.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright 2011 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __WSP_MSI_H
+#define __WSP_MSI_H
+
+#ifdef CONFIG_PCI_MSI
+extern void wsp_setup_phb_msi(struct pci_controller *phb);
+#else
+static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
+#endif
+
+#endif /* __WSP_MSI_H */
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
index 40f28916ff6c..166f2e4b4bee 100644
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ b/arch/powerpc/platforms/wsp/psr2.c
@@ -63,6 +63,10 @@ static void __init psr2_setup_arch(void)
#ifdef CONFIG_SMP
a2_setup_smp();
#endif
+#ifdef CONFIG_PCI
+ wsp_setup_pci();
+#endif
+
}
static int __init psr2_probe(void)
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
index 7c3e087fd2f2..33479818f62a 100644
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ b/arch/powerpc/platforms/wsp/wsp.h
@@ -3,6 +3,9 @@
#include <asm/wsp.h>
+/* Devtree compatible strings for major devices */
+#define PCIE_COMPATIBLE "ibm,wsp-pciex"
+
extern void wsp_setup_pci(void);
extern void scom_init_wsp(void);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
new file mode 100644
index 000000000000..e0262cd0e2d3
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -0,0 +1,1133 @@
+/*
+ * Copyright 2010 Ben Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define DEBUG
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+#include <asm/prom.h>
+#include <asm/pci-bridge.h>
+#include <asm/machdep.h>
+#include <asm/ppc-pci.h>
+#include <asm/iommu.h>
+#include <asm/io-workarounds.h>
+
+#include "wsp.h"
+#include "wsp_pci.h"
+#include "msi.h"
+
+
+/* Max number of TVTs for one table. Only 32-bit tables can use
+ * multiple TVTs and so the max currently supported is thus 8
+ * since only 2G of DMA space is supported
+ */
+#define MAX_TABLE_TVT_COUNT 8
+
+struct wsp_dma_table {
+ struct list_head link;
+ struct iommu_table table;
+ struct wsp_phb *phb;
+ struct page *tces[MAX_TABLE_TVT_COUNT];
+};
+
+/* We support DMA regions from 0...2G in 32bit space (no support for
+ * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
+ * entry) with validation enabled (though not supported by SimiCS
+ * just yet).
+ *
+ * To simplify things, we divide this 2G space into N regions based
+ * on the constant below which could be turned into a tunable eventually
+ *
+ * We then assign dynamically those regions to devices as they show up.
+ *
+ * We use a bitmap as an allocator for these.
+ *
+ * Tables are allocated/created dynamically as devices are discovered,
+ * multiple TVT entries are used if needed
+ *
+ * When 64-bit DMA support is added we should simply use a separate set
+ * of larger regions (the HW supports 64 TVT entries). We can
+ * additionally create a bypass region in 64-bit space for performances
+ * though that would have a cost in term of security.
+ *
+ * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
+ * for all devices and bus/dev/fn validation is disabled
+ *
+ * Note that a DMA32 region cannot be smaller than 256M so the max
+ * supported here for now is 8. We don't yet support sharing regions
+ * between multiple devices so the max number of devices supported
+ * is MAX_TABLE_TVT_COUNT.
+ */
+#define NUM_DMA32_REGIONS 1
+
+struct wsp_phb {
+ struct pci_controller *hose;
+
+ /* Lock controlling access to the list of dma tables.
+ * It does -not- protect against dma_* operations on
+ * those tables, those should be stopped before an entry
+ * is removed from the list.
+ *
+ * The lock is also used for error handling operations
+ */
+ spinlock_t lock;
+ struct list_head dma_tables;
+ unsigned long dma32_map;
+ unsigned long dma32_base;
+ unsigned int dma32_num_regions;
+ unsigned long dma32_region_size;
+
+ /* Debugfs stuff */
+ struct dentry *ddir;
+
+ struct list_head all;
+};
+static LIST_HEAD(wsp_phbs);
+
+//#define cfg_debug(fmt...) pr_debug(fmt)
+#define cfg_debug(fmt...)
+
+
+static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 *val)
+{
+ struct pci_controller *hose;
+ int suboff;
+ u64 addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = PCIE_REG_CA_ENABLE |
+ ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
+ ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
+ ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
+ suboff = offset & 3;
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+
+ switch (len) {
+ case 1:
+ addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
+ >> (suboff << 3)) & 0xff;
+ cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, *val);
+ break;
+ case 2:
+ addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
+ >> (suboff << 3)) & 0xffff;
+ cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, *val);
+ break;
+ default:
+ addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
+ cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, *val);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+ int offset, int len, u32 val)
+{
+ struct pci_controller *hose;
+ int suboff;
+ u64 addr;
+
+ hose = pci_bus_to_host(bus);
+ if (hose == NULL)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ if (offset >= 0x1000)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ addr = PCIE_REG_CA_ENABLE |
+ ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
+ ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
+ ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
+ suboff = offset & 3;
+
+ /*
+ * Note: the caller has already checked that offset is
+ * suitably aligned and that len is 1, 2 or 4.
+ */
+ switch (len) {
+ case 1:
+ addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
+ val <<= suboff << 3;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
+ cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, val);
+ break;
+ case 2:
+ addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
+ val <<= suboff << 3;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
+ cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, val);
+ break;
+ default:
+ addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
+ out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
+ out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
+ cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
+ bus->number, devfn >> 3, devfn & 7,
+ offset, suboff, addr, val);
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static struct pci_ops wsp_pcie_pci_ops =
+{
+ .read = wsp_pcie_read_config,
+ .write = wsp_pcie_write_config,
+};
+
+#define TCE_SHIFT 12
+#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
+#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
+#define TCE_PCI_READ 0x1 /* read from PCI allowed */
+#define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
+#define TCE_RPN_SHIFT 12
+
+//#define dma_debug(fmt...) pr_debug(fmt)
+#define dma_debug(fmt...)
+
+static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
+ unsigned long uaddr, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+{
+ struct wsp_dma_table *ptbl = container_of(tbl,
+ struct wsp_dma_table,
+ table);
+ u64 proto_tce;
+ u64 *tcep;
+ u64 rpn;
+
+ proto_tce = TCE_PCI_READ;
+#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
+ proto_tce |= TCE_PCI_WRITE;
+#else
+ if (direction != DMA_TO_DEVICE)
+ proto_tce |= TCE_PCI_WRITE;
+#endif
+
+ /* XXX Make this faster by factoring out the page address for
+ * within a TCE table
+ */
+ while (npages--) {
+ /* We don't use it->base as the table can be scattered */
+ tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
+ tcep += (index & 0xffff);
+
+ /* can't move this out since we might cross LMB boundary */
+ rpn = __pa(uaddr) >> TCE_SHIFT;
+ *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
+
+ dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
+ tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
+
+ uaddr += TCE_PAGE_SIZE;
+ index++;
+ }
+ return 0;
+}
+
+static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
+{
+ struct wsp_dma_table *ptbl = container_of(tbl,
+ struct wsp_dma_table,
+ table);
+#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
+ struct pci_controller *hose = ptbl->phb->hose;
+#endif
+ u64 *tcep;
+
+ /* XXX Make this faster by factoring out the page address for
+ * within a TCE table. Also use line-kill option to kill multiple
+ * TCEs at once
+ */
+ while (npages--) {
+ /* We don't use it->base as the table can be scattered */
+ tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
+ tcep += (index & 0xffff);
+ dma_debug("[DMA] TCE %p cleared\n", tcep);
+ *tcep = 0;
+#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
+ /* Don't write there since it would pollute other MMIO accesses */
+ out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
+ PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
+ (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
+#endif
+ index++;
+ }
+}
+
+static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
+ unsigned int region,
+ struct pci_dev *validate)
+{
+ struct pci_controller *hose = phb->hose;
+ unsigned long size = phb->dma32_region_size;
+ unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
+ struct wsp_dma_table *tbl;
+ int tvts_per_table, i, tvt, nid;
+ unsigned long flags;
+
+ nid = of_node_to_nid(phb->hose->dn);
+
+ /* Calculate how many TVTs are needed */
+ tvts_per_table = size / 0x10000000;
+ if (tvts_per_table == 0)
+ tvts_per_table = 1;
+
+ /* Calculate the base TVT index. We know all tables have the same
+ * size so we just do a simple multiply here
+ */
+ tvt = region * tvts_per_table;
+
+ pr_debug(" Region : %d\n", region);
+ pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
+ pr_debug(" Number of TVTs : %d\n", tvts_per_table);
+ pr_debug(" Base TVT : %d\n", tvt);
+ pr_debug(" Node : %d\n", nid);
+
+ tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
+ if (!tbl)
+ return ERR_PTR(-ENOMEM);
+ tbl->phb = phb;
+
+ /* Create as many TVTs as needed, each represents 256M at most */
+ for (i = 0; i < tvts_per_table; i++) {
+ u64 tvt_data1, tvt_data0;
+
+ /* Allocate table. We use a 4K TCE size for now always so
+ * one table is always 8 * (258M / 4K) == 512K
+ */
+ tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
+ if (tbl->tces[i] == NULL)
+ goto fail;
+ memset(page_address(tbl->tces[i]), 0, 0x80000);
+
+ pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
+
+ /* Table size. We currently set it to be the whole 256M region */
+ tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
+ /* IO page size set to 4K */
+ tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
+ /* Shift in the address */
+ tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
+
+ /* Validation stuff. We only validate fully bus/dev/fn for now
+ * one day maybe we can group devices but that isn't the case
+ * at the moment
+ */
+ if (validate) {
+ tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
+ tvt_data0 |= validate->bus->number;
+ tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
+ tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
+ << IODA_TVT1_DEVNUM_VALUE_SHIFT;
+ tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
+ tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
+ << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
+ }
+
+ /* XX PE number is always 0 for now */
+
+ /* Program the values using the PHB lock */
+ spin_lock_irqsave(&phb->lock, flags);
+ out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
+ (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
+ out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
+ out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
+ spin_unlock_irqrestore(&phb->lock, flags);
+ }
+
+ /* Init bits and pieces */
+ tbl->table.it_blocksize = 16;
+ tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
+ tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
+
+ /*
+ * It's already blank but we clear it anyway.
+ * Consider an aditiona interface that makes cleaing optional
+ */
+ iommu_init_table(&tbl->table, nid);
+
+ list_add(&tbl->link, &phb->dma_tables);
+ return tbl;
+
+ fail:
+ pr_debug(" Failed to allocate a 256M TCE table !\n");
+ for (i = 0; i < tvts_per_table; i++)
+ if (tbl->tces[i])
+ __free_pages(tbl->tces[i], get_order(0x80000));
+ kfree(tbl);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
+{
+ struct dev_archdata *archdata = &pdev->dev.archdata;
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct wsp_phb *phb = hose->private_data;
+ struct wsp_dma_table *table = NULL;
+ unsigned long flags;
+ int i;
+
+ /* Don't assign an iommu table to a bridge */
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ return;
+
+ pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
+
+ spin_lock_irqsave(&phb->lock, flags);
+
+ /* If only one region, check if it already exist */
+ if (phb->dma32_num_regions == 1) {
+ spin_unlock_irqrestore(&phb->lock, flags);
+ if (list_empty(&phb->dma_tables))
+ table = wsp_pci_create_dma32_table(phb, 0, NULL);
+ else
+ table = list_first_entry(&phb->dma_tables,
+ struct wsp_dma_table,
+ link);
+ } else {
+ /* else find a free region */
+ for (i = 0; i < phb->dma32_num_regions && !table; i++) {
+ if (__test_and_set_bit(i, &phb->dma32_map))
+ continue;
+ spin_unlock_irqrestore(&phb->lock, flags);
+ table = wsp_pci_create_dma32_table(phb, i, pdev);
+ }
+ }
+
+ /* Check if we got an error */
+ if (IS_ERR(table)) {
+ pr_err("%s: Failed to create DMA table, err %ld !\n",
+ pci_name(pdev), PTR_ERR(table));
+ return;
+ }
+
+ /* Or a valid table */
+ if (table) {
+ pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
+ pci_name(pdev),
+ table->table.it_offset << IOMMU_PAGE_SHIFT,
+ (table->table.it_offset << IOMMU_PAGE_SHIFT)
+ + phb->dma32_region_size - 1);
+ archdata->dma_data.iommu_table_base = &table->table;
+ return;
+ }
+
+ /* Or no room */
+ spin_unlock_irqrestore(&phb->lock, flags);
+ pr_err("%s: Out of DMA space !\n", pci_name(pdev));
+}
+
+static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
+{
+ u64 val;
+ int i;
+
+#define DUMP_REG(x) \
+ pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
+
+#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS
+ /* WSP DD1 has a bogus class code by default in the PCI-E
+ * root complex's built-in P2P bridge */
+ val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
+ pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
+ out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
+ (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
+ pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
+#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
+
+#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
+ /* XXX Disable TCE caching, it doesn't work on DD1 */
+ out_be64(hose->cfg_data + 0xe50,
+ in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
+ printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
+#endif
+
+ /* Configure M32A and IO. IO is hard wired to be 1M for now */
+ out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
+ out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
+ (~(hose->io_resource.end - hose->io_resource.start)) &
+ 0x3fffffff000ul);
+ out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
+
+ out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
+ hose->mem_resources[0].start);
+ printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
+ (~(hose->mem_resources[0].end -
+ hose->mem_resources[0].start)) & 0x3ffffff0000ul);
+ out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
+ (~(hose->mem_resources[0].end -
+ hose->mem_resources[0].start)) & 0x3ffffff0000ul);
+ out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
+ (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
+
+ /* Clear all TVT entries
+ *
+ * XX Might get TVT count from device-tree
+ */
+ for (i = 0; i < IODA_TVT_COUNT; i++) {
+ out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
+ PCIE_REG_IODA_AD_TBL_TVT | i);
+ out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
+ out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
+ }
+
+ /* Kill the TCE cache */
+ out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
+ in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
+ PCIE_REG_PHBC_64B_TCE_EN);
+
+ /* Enable 32 & 64-bit MSIs, IO space and M32A */
+ val = PCIE_REG_PHBC_32BIT_MSI_EN |
+ PCIE_REG_PHBC_IO_EN |
+ PCIE_REG_PHBC_64BIT_MSI_EN |
+ PCIE_REG_PHBC_M32A_EN;
+ if (iommu_is_off)
+ val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
+ pr_debug("Will write config: 0x%llx\n", val);
+ out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
+
+ /* Enable error reporting */
+ out_be64(hose->cfg_data + 0xe00,
+ in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
+
+ /* Mask an error that's generated when doing config space probe
+ *
+ * XXX Maybe we should only mask it around config space cycles... that or
+ * ignore it when we know we had a config space cycle recently ?
+ */
+ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
+ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
+
+ /* Enable UTL errors, for now, all of them got to UTL irq 1
+ *
+ * We similarily mask one UTL error caused apparently during normal
+ * probing. We also mask the link up error
+ */
+ out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
+ out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
+ out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
+ out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
+ out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
+ out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
+
+ DUMP_REG(PCIE_REG_IO_BASE_ADDR);
+ DUMP_REG(PCIE_REG_IO_BASE_MASK);
+ DUMP_REG(PCIE_REG_IO_START_ADDR);
+ DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
+ DUMP_REG(PCIE_REG_M32A_BASE_MASK);
+ DUMP_REG(PCIE_REG_M32A_START_ADDR);
+ DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
+ DUMP_REG(PCIE_REG_M32B_BASE_MASK);
+ DUMP_REG(PCIE_REG_M32B_START_ADDR);
+ DUMP_REG(PCIE_REG_M64_BASE_ADDR);
+ DUMP_REG(PCIE_REG_M64_BASE_MASK);
+ DUMP_REG(PCIE_REG_M64_START_ADDR);
+ DUMP_REG(PCIE_REG_PHB_CONFIG);
+}
+
+static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
+{
+ u64 val;
+ int i;
+
+ for (i = 0; i < 10000; i++) {
+ val = in_be64(phb->hose->cfg_data + 0xe08);
+ if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
+ return;
+ udelay(1);
+ }
+ pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
+ phb->hose->global_number, port);
+}
+
+#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
+static ret wsp_pci_##name at \
+{ \
+ struct iowa_bus *bus; \
+ struct wsp_phb *phb; \
+ unsigned long flags; \
+ ret rval; \
+ bus = iowa_pio_find_bus(aa); \
+ WARN_ON(!bus); \
+ phb = bus->private; \
+ spin_lock_irqsave(&phb->lock, flags); \
+ wsp_pci_wait_io_idle(phb, aa); \
+ rval = __do_##name al; \
+ spin_unlock_irqrestore(&phb->lock, flags); \
+ return rval; \
+}
+
+#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
+static void wsp_pci_##name at \
+{ \
+ struct iowa_bus *bus; \
+ struct wsp_phb *phb; \
+ unsigned long flags; \
+ bus = iowa_pio_find_bus(aa); \
+ WARN_ON(!bus); \
+ phb = bus->private; \
+ spin_lock_irqsave(&phb->lock, flags); \
+ wsp_pci_wait_io_idle(phb, aa); \
+ __do_##name al; \
+ spin_unlock_irqrestore(&phb->lock, flags); \
+}
+
+#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
+#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
+
+#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
+ DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
+
+#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
+ DEF_PCI_AC_NORET_##space(name, at, al, aa) \
+
+
+#include <asm/io-defs.h>
+
+#undef DEF_PCI_AC_RET
+#undef DEF_PCI_AC_NORET
+
+static struct ppc_pci_io wsp_pci_iops = {
+ .inb = wsp_pci_inb,
+ .inw = wsp_pci_inw,
+ .inl = wsp_pci_inl,
+ .outb = wsp_pci_outb,
+ .outw = wsp_pci_outw,
+ .outl = wsp_pci_outl,
+ .insb = wsp_pci_insb,
+ .insw = wsp_pci_insw,
+ .insl = wsp_pci_insl,
+ .outsb = wsp_pci_outsb,
+ .outsw = wsp_pci_outsw,
+ .outsl = wsp_pci_outsl,
+};
+
+static int __init wsp_setup_one_phb(struct device_node *np)
+{
+ struct pci_controller *hose;
+ struct wsp_phb *phb;
+
+ pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
+
+ phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
+ if (!phb)
+ return -ENOMEM;
+ hose = pcibios_alloc_controller(np);
+ if (!hose) {
+ /* Can't really free the phb */
+ return -ENOMEM;
+ }
+ hose->private_data = phb;
+ phb->hose = hose;
+
+ INIT_LIST_HEAD(&phb->dma_tables);
+ spin_lock_init(&phb->lock);
+
+ /* XXX Use bus-range property ? */
+ hose->first_busno = 0;
+ hose->last_busno = 0xff;
+
+ /* We use cfg_data as the address for the whole bridge MMIO space
+ */
+ hose->cfg_data = of_iomap(hose->dn, 0);
+
+ pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
+
+ /* Get the ranges of the device-tree */
+ pci_process_bridge_OF_ranges(hose, np, 0);
+
+ /* XXX Force re-assigning of everything for now */
+ pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
+ PCI_ENABLE_PROC_DOMAINS);
+ pci_probe_only = 0;
+
+ /* Calculate how the TCE space is divided */
+ phb->dma32_base = 0;
+ phb->dma32_num_regions = NUM_DMA32_REGIONS;
+ if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
+ pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
+ MAX_TABLE_TVT_COUNT);
+ phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
+ }
+ phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
+
+ BUG_ON(!is_power_of_2(phb->dma32_region_size));
+
+ /* Setup config ops */
+ hose->ops = &wsp_pcie_pci_ops;
+
+ /* Configure the HW */
+ wsp_pcie_configure_hw(hose);
+
+ /* Instanciate IO workarounds */
+ iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
+#ifdef CONFIG_PCI_MSI
+ wsp_setup_phb_msi(hose);
+#endif
+
+ /* Add to global list */
+ list_add(&phb->all, &wsp_phbs);
+
+ return 0;
+}
+
+void __init wsp_setup_pci(void)
+{
+ struct device_node *np;
+ int rc;
+
+ /* Find host bridges */
+ for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
+ rc = wsp_setup_one_phb(np);
+ if (rc)
+ pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
+ np->full_name, rc);
+ }
+
+ /* Establish device-tree linkage */
+ pci_devs_phb_init();
+
+ /* Set DMA ops to use TCEs */
+ if (iommu_is_off) {
+ pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
+ set_pci_dma_ops(&dma_direct_ops);
+ } else {
+ ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
+ ppc_md.tce_build = tce_build_wsp;
+ ppc_md.tce_free = tce_free_wsp;
+ set_pci_dma_ops(&dma_iommu_ops);
+ }
+}
+
+#define err_debug(fmt...) pr_debug(fmt)
+//#define err_debug(fmt...)
+
+static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
+{
+ const u32 *prop;
+ int hw_irq;
+
+ /* Ok, no interrupts property, let's try to find our child P2P */
+ np = of_get_next_child(np, NULL);
+ if (np == NULL)
+ return 0;
+
+ /* Grab it's interrupt map */
+ prop = of_get_property(np, "interrupt-map", NULL);
+ if (prop == NULL)
+ return 0;
+
+ /* Grab one of the interrupts in there, keep the low 4 bits */
+ hw_irq = prop[5] & 0xf;
+
+ /* 0..4 for PHB 0 and 5..9 for PHB 1 */
+ if (hw_irq < 5)
+ hw_irq = 4;
+ else
+ hw_irq = 9;
+ hw_irq |= prop[5] & ~0xf;
+
+ err_debug("PCI: Using 0x%x as error IRQ for %s\n",
+ hw_irq, np->parent->full_name);
+ return irq_create_mapping(NULL, hw_irq);
+}
+
+static const struct {
+ u32 offset;
+ const char *name;
+} wsp_pci_regs[] = {
+#define DREG(x) { PCIE_REG_##x, #x }
+#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
+ /* Architected registers except CONFIG_ and IODA
+ * to avoid side effects
+ */
+ DREG(DMA_CHAN_STATUS),
+ DREG(CPU_LOADSTORE_STATUS),
+ DREG(LOCK0),
+ DREG(LOCK1),
+ DREG(PHB_CONFIG),
+ DREG(IO_BASE_ADDR),
+ DREG(IO_BASE_MASK),
+ DREG(IO_START_ADDR),
+ DREG(M32A_BASE_ADDR),
+ DREG(M32A_BASE_MASK),
+ DREG(M32A_START_ADDR),
+ DREG(M32B_BASE_ADDR),
+ DREG(M32B_BASE_MASK),
+ DREG(M32B_START_ADDR),
+ DREG(M64_BASE_ADDR),
+ DREG(M64_BASE_MASK),
+ DREG(M64_START_ADDR),
+ DREG(TCE_KILL),
+ DREG(LOCK2),
+ DREG(PHB_GEN_CAP),
+ DREG(PHB_TCE_CAP),
+ DREG(PHB_IRQ_CAP),
+ DREG(PHB_EEH_CAP),
+ DREG(PAPR_ERR_INJ_CONTROL),
+ DREG(PAPR_ERR_INJ_ADDR),
+ DREG(PAPR_ERR_INJ_MASK),
+
+ /* UTL core regs */
+ DUTL(SYS_BUS_CONTROL),
+ DUTL(STATUS),
+ DUTL(SYS_BUS_AGENT_STATUS),
+ DUTL(SYS_BUS_AGENT_ERR_SEV),
+ DUTL(SYS_BUS_AGENT_IRQ_EN),
+ DUTL(SYS_BUS_BURST_SZ_CONF),
+ DUTL(REVISION_ID),
+ DUTL(OUT_POST_HDR_BUF_ALLOC),
+ DUTL(OUT_POST_DAT_BUF_ALLOC),
+ DUTL(IN_POST_HDR_BUF_ALLOC),
+ DUTL(IN_POST_DAT_BUF_ALLOC),
+ DUTL(OUT_NP_BUF_ALLOC),
+ DUTL(IN_NP_BUF_ALLOC),
+ DUTL(PCIE_TAGS_ALLOC),
+ DUTL(GBIF_READ_TAGS_ALLOC),
+
+ DUTL(PCIE_PORT_CONTROL),
+ DUTL(PCIE_PORT_STATUS),
+ DUTL(PCIE_PORT_ERROR_SEV),
+ DUTL(PCIE_PORT_IRQ_EN),
+ DUTL(RC_STATUS),
+ DUTL(RC_ERR_SEVERITY),
+ DUTL(RC_IRQ_EN),
+ DUTL(EP_STATUS),
+ DUTL(EP_ERR_SEVERITY),
+ DUTL(EP_ERR_IRQ_EN),
+ DUTL(PCI_PM_CTRL1),
+ DUTL(PCI_PM_CTRL2),
+
+ /* PCIe stack regs */
+ DREG(SYSTEM_CONFIG1),
+ DREG(SYSTEM_CONFIG2),
+ DREG(EP_SYSTEM_CONFIG),
+ DREG(EP_FLR),
+ DREG(EP_BAR_CONFIG),
+ DREG(LINK_CONFIG),
+ DREG(PM_CONFIG),
+ DREG(DLP_CONTROL),
+ DREG(DLP_STATUS),
+ DREG(ERR_REPORT_CONTROL),
+ DREG(SLOT_CONTROL1),
+ DREG(SLOT_CONTROL2),
+ DREG(UTL_CONFIG),
+ DREG(BUFFERS_CONFIG),
+ DREG(ERROR_INJECT),
+ DREG(SRIOV_CONFIG),
+ DREG(PF0_SRIOV_STATUS),
+ DREG(PF1_SRIOV_STATUS),
+ DREG(PORT_NUMBER),
+ DREG(POR_SYSTEM_CONFIG),
+
+ /* Internal logic regs */
+ DREG(PHB_VERSION),
+ DREG(RESET),
+ DREG(PHB_CONTROL),
+ DREG(PHB_TIMEOUT_CONTROL1),
+ DREG(PHB_QUIESCE_DMA),
+ DREG(PHB_DMA_READ_TAG_ACTV),
+ DREG(PHB_TCE_READ_TAG_ACTV),
+
+ /* FIR registers */
+ DREG(LEM_FIR_ACCUM),
+ DREG(LEM_FIR_AND_MASK),
+ DREG(LEM_FIR_OR_MASK),
+ DREG(LEM_ACTION0),
+ DREG(LEM_ACTION1),
+ DREG(LEM_ERROR_MASK),
+ DREG(LEM_ERROR_AND_MASK),
+ DREG(LEM_ERROR_OR_MASK),
+
+ /* Error traps registers */
+ DREG(PHB_ERR_STATUS),
+ DREG(PHB_ERR_STATUS),
+ DREG(PHB_ERR1_STATUS),
+ DREG(PHB_ERR_INJECT),
+ DREG(PHB_ERR_LEM_ENABLE),
+ DREG(PHB_ERR_IRQ_ENABLE),
+ DREG(PHB_ERR_FREEZE_ENABLE),
+ DREG(PHB_ERR_SIDE_ENABLE),
+ DREG(PHB_ERR_LOG_0),
+ DREG(PHB_ERR_LOG_1),
+ DREG(PHB_ERR_STATUS_MASK),
+ DREG(PHB_ERR1_STATUS_MASK),
+ DREG(MMIO_ERR_STATUS),
+ DREG(MMIO_ERR1_STATUS),
+ DREG(MMIO_ERR_INJECT),
+ DREG(MMIO_ERR_LEM_ENABLE),
+ DREG(MMIO_ERR_IRQ_ENABLE),
+ DREG(MMIO_ERR_FREEZE_ENABLE),
+ DREG(MMIO_ERR_SIDE_ENABLE),
+ DREG(MMIO_ERR_LOG_0),
+ DREG(MMIO_ERR_LOG_1),
+ DREG(MMIO_ERR_STATUS_MASK),
+ DREG(MMIO_ERR1_STATUS_MASK),
+ DREG(DMA_ERR_STATUS),
+ DREG(DMA_ERR1_STATUS),
+ DREG(DMA_ERR_INJECT),
+ DREG(DMA_ERR_LEM_ENABLE),
+ DREG(DMA_ERR_IRQ_ENABLE),
+ DREG(DMA_ERR_FREEZE_ENABLE),
+ DREG(DMA_ERR_SIDE_ENABLE),
+ DREG(DMA_ERR_LOG_0),
+ DREG(DMA_ERR_LOG_1),
+ DREG(DMA_ERR_STATUS_MASK),
+ DREG(DMA_ERR1_STATUS_MASK),
+
+ /* Debug and Trace registers */
+ DREG(PHB_DEBUG_CONTROL0),
+ DREG(PHB_DEBUG_STATUS0),
+ DREG(PHB_DEBUG_CONTROL1),
+ DREG(PHB_DEBUG_STATUS1),
+ DREG(PHB_DEBUG_CONTROL2),
+ DREG(PHB_DEBUG_STATUS2),
+ DREG(PHB_DEBUG_CONTROL3),
+ DREG(PHB_DEBUG_STATUS3),
+ DREG(PHB_DEBUG_CONTROL4),
+ DREG(PHB_DEBUG_STATUS4),
+ DREG(PHB_DEBUG_CONTROL5),
+ DREG(PHB_DEBUG_STATUS5),
+
+ /* Don't seem to exist ...
+ DREG(PHB_DEBUG_CONTROL6),
+ DREG(PHB_DEBUG_STATUS6),
+ */
+};
+
+static int wsp_pci_regs_show(struct seq_file *m, void *private)
+{
+ struct wsp_phb *phb = m->private;
+ struct pci_controller *hose = phb->hose;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
+ /* Skip write-only regs */
+ if (wsp_pci_regs[i].offset == 0xc08 ||
+ wsp_pci_regs[i].offset == 0xc10 ||
+ wsp_pci_regs[i].offset == 0xc38 ||
+ wsp_pci_regs[i].offset == 0xc40)
+ continue;
+ seq_printf(m, "0x%03x: 0x%016llx %s\n",
+ wsp_pci_regs[i].offset,
+ in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
+ wsp_pci_regs[i].name);
+ }
+ return 0;
+}
+
+static int wsp_pci_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, wsp_pci_regs_show, inode->i_private);
+}
+
+static const struct file_operations wsp_pci_regs_fops = {
+ .open = wsp_pci_regs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int wsp_pci_reg_set(void *data, u64 val)
+{
+ out_be64((void __iomem *)data, val);
+ return 0;
+}
+
+static int wsp_pci_reg_get(void *data, u64 *val)
+{
+ *val = in_be64((void __iomem *)data);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
+
+static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
+{
+ struct wsp_phb *phb = dev_id;
+ struct pci_controller *hose = phb->hose;
+ irqreturn_t handled = IRQ_NONE;
+ struct wsp_pcie_err_log_data ed;
+
+ pr_err("PCI: Error interrupt on %s (PHB %d)\n",
+ hose->dn->full_name, hose->global_number);
+ again:
+ memset(&ed, 0, sizeof(ed));
+
+ /* Read and clear UTL errors */
+ ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
+ if (ed.utl_sys_err)
+ out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
+ ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
+ if (ed.utl_port_err)
+ out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
+ ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
+ if (ed.utl_rc_err)
+ out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
+
+ /* Read and clear main trap errors */
+ ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
+ if (ed.phb_err) {
+ ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
+ ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
+ ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
+ out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
+ out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
+ }
+ ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
+ if (ed.mmio_err) {
+ ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
+ ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
+ ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
+ out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
+ out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
+ }
+ ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
+ if (ed.dma_err) {
+ ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
+ ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
+ ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
+ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
+ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
+ }
+
+ /* Now print things out */
+ if (ed.phb_err) {
+ pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
+ pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
+ pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
+ pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
+ }
+ if (ed.mmio_err) {
+ pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
+ pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
+ pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
+ pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
+ }
+ if (ed.dma_err) {
+ pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
+ pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
+ pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
+ pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
+ }
+ if (ed.utl_sys_err)
+ pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
+ if (ed.utl_port_err)
+ pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
+ if (ed.utl_rc_err)
+ pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
+
+ /* Interrupts are caused by the error traps. If we had any error there
+ * we loop again in case the UTL buffered some new stuff between
+ * going there and going to the traps
+ */
+ if (ed.dma_err || ed.mmio_err || ed.phb_err) {
+ handled = IRQ_HANDLED;
+ goto again;
+ }
+ return handled;
+}
+
+static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
+{
+ struct pci_controller *hose = phb->hose;
+ int err_irq, i, rc;
+ char fname[16];
+
+ /* Create a debugfs file for that PHB */
+ sprintf(fname, "phb%d", phb->hose->global_number);
+ phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
+
+ /* Some useful debug output */
+ if (phb->ddir) {
+ struct dentry *d = debugfs_create_dir("regs", phb->ddir);
+ char tmp[64];
+
+ for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
+ sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
+ wsp_pci_regs[i].name);
+ debugfs_create_file(tmp, 0600, d,
+ hose->cfg_data + wsp_pci_regs[i].offset,
+ &wsp_pci_reg_fops);
+ }
+ debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
+ }
+
+ /* Find the IRQ number for that PHB */
+ err_irq = irq_of_parse_and_map(hose->dn, 0);
+ if (err_irq == 0)
+ /* XXX Error IRQ lacking from device-tree */
+ err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
+ if (err_irq == 0) {
+ pr_err("PCI: Failed to fetch error interrupt for %s\n",
+ hose->dn->full_name);
+ return;
+ }
+ /* Request it */
+ rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
+ if (rc) {
+ pr_err("PCI: Failed to request interrupt for %s\n",
+ hose->dn->full_name);
+ }
+ /* Enable interrupts for all errors for now */
+ out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
+ out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
+ out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
+}
+
+/*
+ * This is called later to hookup with the error interrupt
+ */
+static int __init wsp_setup_pci_late(void)
+{
+ struct wsp_phb *phb;
+
+ list_for_each_entry(phb, &wsp_phbs, all)
+ wsp_setup_pci_err_reporting(phb);
+
+ return 0;
+}
+arch_initcall(wsp_setup_pci_late);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.h b/arch/powerpc/platforms/wsp/wsp_pci.h
new file mode 100644
index 000000000000..52e9bd95250d
--- /dev/null
+++ b/arch/powerpc/platforms/wsp/wsp_pci.h
@@ -0,0 +1,268 @@
+/*
+ * Copyright 2010 Ben Herrenschmidt, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __WSP_PCI_H
+#define __WSP_PCI_H
+
+/* Architected registers */
+#define PCIE_REG_DMA_CHAN_STATUS 0x110
+#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
+
+#define PCIE_REG_CONFIG_DATA 0x130
+#define PCIE_REG_LOCK0 0x138
+#define PCIE_REG_CONFIG_ADDRESS 0x140
+#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
+#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
+#define PCIE_REG_CA_BUS_SHIFT (20+32)
+#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
+#define PCIE_REG_CA_DEV_SHIFT (15+32)
+#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
+#define PCIE_REG_CA_FUNC_SHIFT (12+32)
+#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
+#define PCIE_REG_CA_REG_SHIFT ( 0+32)
+#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
+#define PCIE_REG_CA_BE_SHIFT ( 28)
+#define PCIE_REG_LOCK1 0x148
+
+#define PCIE_REG_PHB_CONFIG 0x160
+#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
+#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
+#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
+#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
+#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
+#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
+#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
+#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
+#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
+#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
+
+#define PCIE_REG_IO_BASE_ADDR 0x170
+#define PCIE_REG_IO_BASE_MASK 0x178
+#define PCIE_REG_IO_START_ADDR 0x180
+
+#define PCIE_REG_M32A_BASE_ADDR 0x190
+#define PCIE_REG_M32A_BASE_MASK 0x198
+#define PCIE_REG_M32A_START_ADDR 0x1a0
+
+#define PCIE_REG_M32B_BASE_ADDR 0x1b0
+#define PCIE_REG_M32B_BASE_MASK 0x1b8
+#define PCIE_REG_M32B_START_ADDR 0x1c0
+
+#define PCIE_REG_M64_BASE_ADDR 0x1e0
+#define PCIE_REG_M64_BASE_MASK 0x1e8
+#define PCIE_REG_M64_START_ADDR 0x1f0
+
+#define PCIE_REG_TCE_KILL 0x210
+#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
+#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
+#define PCIE_REG_TCEKILL_PS_4K 0
+#define PCIE_REG_TCEKILL_PS_64K 1
+#define PCIE_REG_TCEKILL_PS_16M 2
+#define PCIE_REG_TCEKILL_PS_16G 3
+
+#define PCIE_REG_IODA_ADDR 0x220
+#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
+#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
+#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
+#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
+#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
+#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
+#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
+#define PCIE_REG_IODA_DATA0 0x228
+#define PCIE_REG_IODA_DATA1 0x230
+
+#define PCIE_REG_LOCK2 0x240
+
+#define PCIE_REG_PHB_GEN_CAP 0x250
+#define PCIE_REG_PHB_TCE_CAP 0x258
+#define PCIE_REG_PHB_IRQ_CAP 0x260
+#define PCIE_REG_PHB_EEH_CAP 0x268
+
+#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
+#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
+#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
+
+
+#define PCIE_REG_SYS_CFG1 0x600
+#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
+
+#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
+#define IODA_TVT0_TTA_SHIFT 4
+#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
+#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
+#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
+#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
+#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
+#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
+#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
+#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
+#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
+#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
+#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
+#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
+#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
+#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
+#define IODA_TVT1_PE_NUMBER_SHIFT 0
+
+#define IODA_TVT_COUNT 64
+
+/* UTL Core registers */
+#define PCIE_UTL_SYS_BUS_CONTROL 0x400
+#define PCIE_UTL_STATUS 0x408
+#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
+#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
+#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
+#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
+#define PCIE_UTL_REVISION_ID 0x448
+
+#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
+#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
+#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
+#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
+#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
+#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
+#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
+#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
+
+#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
+#define PCIE_UTL_PCIE_PORT_STATUS 0x548
+#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
+#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
+#define PCIE_UTL_RC_STATUS 0x560
+#define PCIE_UTL_RC_ERR_SEVERITY 0x568
+#define PCIE_UTL_RC_IRQ_EN 0x570
+#define PCIE_UTL_EP_STATUS 0x578
+#define PCIE_UTL_EP_ERR_SEVERITY 0x580
+#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
+
+#define PCIE_UTL_PCI_PM_CTRL1 0x590
+#define PCIE_UTL_PCI_PM_CTRL2 0x598
+
+/* PCIe stack registers */
+#define PCIE_REG_SYSTEM_CONFIG1 0x600
+#define PCIE_REG_SYSTEM_CONFIG2 0x608
+#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
+#define PCIE_REG_EP_FLR 0x620
+#define PCIE_REG_EP_BAR_CONFIG 0x628
+#define PCIE_REG_LINK_CONFIG 0x630
+#define PCIE_REG_PM_CONFIG 0x640
+#define PCIE_REG_DLP_CONTROL 0x650
+#define PCIE_REG_DLP_STATUS 0x658
+#define PCIE_REG_ERR_REPORT_CONTROL 0x660
+#define PCIE_REG_SLOT_CONTROL1 0x670
+#define PCIE_REG_SLOT_CONTROL2 0x678
+#define PCIE_REG_UTL_CONFIG 0x680
+#define PCIE_REG_BUFFERS_CONFIG 0x690
+#define PCIE_REG_ERROR_INJECT 0x698
+#define PCIE_REG_SRIOV_CONFIG 0x6a0
+#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
+#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
+#define PCIE_REG_PORT_NUMBER 0x700
+#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
+
+/* PHB internal logic registers */
+#define PCIE_REG_PHB_VERSION 0x800
+#define PCIE_REG_RESET 0x808
+#define PCIE_REG_PHB_CONTROL 0x810
+#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
+#define PCIE_REG_PHB_QUIESCE_DMA 0x888
+#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
+#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
+
+/* FIR registers */
+#define PCIE_REG_LEM_FIR_ACCUM 0xc00
+#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
+#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
+#define PCIE_REG_LEM_ACTION0 0xc18
+#define PCIE_REG_LEM_ACTION1 0xc20
+#define PCIE_REG_LEM_ERROR_MASK 0xc30
+#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
+#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
+
+/* PHB Error registers */
+#define PCIE_REG_PHB_ERR_STATUS 0xc80
+#define PCIE_REG_PHB_ERR1_STATUS 0xc88
+#define PCIE_REG_PHB_ERR_INJECT 0xc90
+#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
+#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
+#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
+#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
+#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
+#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
+#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
+#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
+
+#define PCIE_REG_MMIO_ERR_STATUS 0xd00
+#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
+#define PCIE_REG_MMIO_ERR_INJECT 0xd10
+#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
+#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
+#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
+#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
+#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
+#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
+#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
+#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
+
+#define PCIE_REG_DMA_ERR_STATUS 0xd80
+#define PCIE_REG_DMA_ERR1_STATUS 0xd88
+#define PCIE_REG_DMA_ERR_INJECT 0xd90
+#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
+#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
+#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
+#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
+#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
+#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
+#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
+#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
+
+/* Shortcuts for access to the above using the PHB definitions
+ * with an offset
+ */
+#define PCIE_REG_ERR_PHB_OFFSET 0x0
+#define PCIE_REG_ERR_MMIO_OFFSET 0x80
+#define PCIE_REG_ERR_DMA_OFFSET 0x100
+
+/* Debug and Trace registers */
+#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
+#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
+#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
+#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
+#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
+#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
+#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
+#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
+#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
+#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
+#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
+#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
+#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
+#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
+
+/* Definition for PCIe errors */
+struct wsp_pcie_err_log_data {
+ __u64 phb_err;
+ __u64 phb_err1;
+ __u64 phb_log0;
+ __u64 phb_log1;
+ __u64 mmio_err;
+ __u64 mmio_err1;
+ __u64 mmio_log0;
+ __u64 mmio_log1;
+ __u64 dma_err;
+ __u64 dma_err1;
+ __u64 dma_log0;
+ __u64 dma_log1;
+ __u64 utl_sys_err;
+ __u64 utl_port_err;
+ __u64 utl_rc_err;
+ __u64 unused;
+};
+
+#endif /* __WSP_PCI_H */