summaryrefslogtreecommitdiff
path: root/arch/ia64/sn/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/sn/kernel')
-rw-r--r--arch/ia64/sn/kernel/io_init.c9
-rw-r--r--arch/ia64/sn/kernel/irq.c142
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
4 files changed, 98 insertions, 72 deletions
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 5101ac462643..dc09a6a28a37 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */
*/
static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{
return 0;
}
@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info);
}
-
- /*
- * MSI currently not supported on altix. Remove this when
- * the MSI abstraction patches are integrated into the kernel
- * (sometime after 2.6.16 releases)
- */
- dev->no_msi = 1;
}
/*
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index c265e02f5036..dc8e2b696713 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
-static struct list_head **sn_irq_lh;
+struct list_head **sn_irq_lh;
static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
-static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
- u64 sn_irq_info,
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
+
return ret_stuff.status;
}
-static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
@@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq)
static void sn_irq_info_free(struct rcu_head *head);
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- int cpuid, cpuphys;
+ int vector;
+ int cpuphys;
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
- cpuid = first_cpu(mask);
- cpuphys = cpu_physical_id(cpuid);
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list) {
- u64 bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
- struct sn_pcibus_provider *pci_provider;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- break;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (u64) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- break; /* irq is not a device interrupt */
- }
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+ bridge = (u64) new_irq_info->irq_bridge;
+ if (!bridge) {
+ kfree(new_irq_info);
+ return NULL; /* irq is not a device interrupt */
+ }
- local_nasid = NASID_GET(bridge);
+ local_nasid = NASID_GET(bridge);
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
+ vector = sn_irq_info->irq_irq;
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ /* Update kernels new_irq_info with new target info */
+ unregister_intr_pda(new_irq_info);
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- __pa(new_irq_info), irq,
- cpuid_to_nasid(cpuid),
- cpuid_to_slice(cpuid));
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- break;
- }
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
- new_irq_info->irq_cpuid = cpuid;
- register_intr_pda(new_irq_info);
+ cpuphys = nasid_slice_to_cpuid(nasid, slice);
+ new_irq_info->irq_cpuid = cpuphys;
+ register_intr_pda(new_irq_info);
- pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->target_interrupt)
- (pci_provider->target_interrupt)(new_irq_info);
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- spin_lock(&sn_irq_info_lock);
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
- spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
+
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
- }
+
+ return new_irq_info;
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(first_cpu(mask));
+ slice = cpuid_to_slice(first_cpu(mask));
+
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
struct hw_interrupt_type irq_type_sn = {
@@ -202,6 +221,9 @@ void sn_irq_init(void)
int i;
irq_desc_t *base_desc = irq_desc;
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
for (i = 0; i < NR_IRQS; i++) {
if (base_desc[i].handler == &no_irq_type) {
base_desc[i].handler = &irq_type_sn;
@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
pci_dev_put(pci_dev);
+
}
static inline void
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 30988dfbddff..93577abae36d 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -139,7 +139,7 @@ static int __init pxm_to_nasid(int pxm)
int i;
int nid;
- nid = pxm_to_nid_map[pxm];
+ nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
@@ -704,7 +704,7 @@ void __init build_cnode_tables(void)
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
- nasid = pxm_to_nasid(nid_to_pxm_map[node]);
+ nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0f..2862cb33026d 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
- * from the all caches.
+ * from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
- flush_icache_range(flush_addr, flush_addr+bytes);
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
- flush_icache_range(flush_addr, flush_addr+bytes);
+ flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);