summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-08-03 12:50:20 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-08-03 12:50:20 -0700
commitc31ca59e25f82879644088c97fe9cffbaa292786 (patch)
treeeb1652fb3fe83d5aa2c943ec4e072ec6d775e424 /arch/ia64
parentfd60ae404f104f12369e654af9cf03b1f1047661 (diff)
parente44e41d0c832ebbda7311a1fe43584d844026357 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: [IA64] fix show_mem for VIRTUAL_MEM_MAP+FLATMEM [IA64] align high endpoint of VIRTUAL_MEM_MAP [PATCH] Fix RAID5 + IA64 compile [IA64] Don't alloc empty frame in ia64_switch_mode_phys [IA64] Do not assume output registers be reservered. [IA64] add platform check to snsc driver init [IA64] sparse cleanups [IA64] Fix breakage in simscsi.c [IA64] Format /proc/pal/*/version_info correctly
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/hp/sim/simscsi.c4
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kernel/head.S2
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/pal.S18
-rw-r--r--arch/ia64/kernel/palinfo.c34
-rw-r--r--arch/ia64/lib/Makefile2
-rw-r--r--arch/ia64/mm/contig.c16
-rw-r--r--arch/ia64/mm/discontig.c68
-rw-r--r--arch/ia64/mm/init.c55
-rw-r--r--arch/ia64/mm/ioremap.c6
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c2
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
13 files changed, 107 insertions, 114 deletions
diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c
index a3fe97531134..8a4f0d0d17a3 100644
--- a/arch/ia64/hp/sim/simscsi.c
+++ b/arch/ia64/hp/sim/simscsi.c
@@ -151,7 +151,7 @@ static void
simscsi_sg_readwrite (struct scsi_cmnd *sc, int mode, unsigned long offset)
{
int list_len = sc->use_sg;
- struct scatterlist *sl = (struct scatterlist *)sc->buffer;
+ struct scatterlist *sl = (struct scatterlist *)sc->request_buffer;
struct disk_stat stat;
struct disk_req req;
@@ -244,7 +244,7 @@ static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len)
if (scatterlen == 0)
memcpy(sc->request_buffer, buf, len);
- else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) {
+ else for (slp = (struct scatterlist *)sc->request_buffer; scatterlen-- > 0 && len > 0; slp++) {
unsigned thislen = min(len, slp->length);
memcpy(page_address(slp->page) + slp->offset, buf, thislen);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index e4bfa9dafbce..bb8770a177b5 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -632,7 +632,7 @@ kern_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
static efi_memory_desc_t *
@@ -652,7 +652,7 @@ efi_memory_descriptor (unsigned long phys_addr)
if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
- return 0;
+ return NULL;
}
u32
@@ -923,7 +923,7 @@ find_memmap_space (void)
void
efi_memmap_init(unsigned long *s, unsigned long *e)
{
- struct kern_memdesc *k, *prev = 0;
+ struct kern_memdesc *k, *prev = NULL;
u64 contig_low=0, contig_high=0;
u64 as, ae, lim;
void *efi_map_start, *efi_map_end, *p, *q;
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 561b8f1d3bc7..29236f0c62b5 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -853,7 +853,6 @@ END(__ia64_init_fpu)
*/
GLOBAL_ENTRY(ia64_switch_mode_phys)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
@@ -902,7 +901,6 @@ END(ia64_switch_mode_phys)
*/
GLOBAL_ENTRY(ia64_switch_mode_virt)
{
- alloc r2=ar.pfs,0,0,0,0
rsm psr.i | psr.ic // disable interrupts and interrupt collection
mov r15=ip
}
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index b7cf651ceb14..3ead20fb6f4b 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(__udivdi3);
EXPORT_SYMBOL(__moddi3);
EXPORT_SYMBOL(__umoddi3);
-#if defined(CONFIG_MD_RAID5) || defined(CONFIG_MD_RAID5_MODULE)
+#if defined(CONFIG_MD_RAID456) || defined(CONFIG_MD_RAID456_MODULE)
extern void xor_ia64_2(void);
extern void xor_ia64_3(void);
extern void xor_ia64_4(void);
diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
index 5018c7f2e7a8..ebaf1e685f5e 100644
--- a/arch/ia64/kernel/pal.S
+++ b/arch/ia64/kernel/pal.S
@@ -217,12 +217,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
.body
;;
ld8 loc2 = [loc2] // loc2 <- entry point
- mov out0 = in0 // first argument
- mov out1 = in1 // copy arg2
- mov out2 = in2 // copy arg3
- mov out3 = in3 // copy arg3
- ;;
- mov loc3 = psr // save psr
+ mov loc3 = psr // save psr
;;
mov loc4=ar.rsc // save RSE configuration
dep.z loc2=loc2,0,61 // convert pal entry point to physical
@@ -236,18 +231,23 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked)
;;
andcm r16=loc3,r16 // removes bits to clear from psr
br.call.sptk.many rp=ia64_switch_mode_phys
-.ret6:
+
+ mov out0 = in0 // first argument
+ mov out1 = in1 // copy arg2
+ mov out2 = in2 // copy arg3
+ mov out3 = in3 // copy arg3
mov loc5 = r19
mov loc6 = r20
+
br.call.sptk.many rp=b7 // now make the call
-.ret7:
+
mov ar.rsc=0 // put RSE in enforced lazy, LE mode
mov r16=loc3 // r16= original psr
mov r19=loc5
mov r20=loc6
br.call.sptk.many rp=ia64_switch_mode_virt // return to virtual mode
-.ret8: mov psr.l = loc3 // restore init PSR
+ mov psr.l = loc3 // restore init PSR
mov ar.pfs = loc1
mov rp = loc0
;;
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index ab5b52413e91..0b546e2b36ac 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -566,29 +566,23 @@ version_info(char *page)
pal_version_u_t min_ver, cur_ver;
char *p = page;
- /* The PAL_VERSION call is advertised as being able to support
- * both physical and virtual mode calls. This seems to be a documentation
- * bug rather than firmware bug. In fact, it does only support physical mode.
- * So now the code reflects this fact and the pal_version() has been updated
- * accordingly.
- */
- if (ia64_pal_version(&min_ver, &cur_ver) != 0) return 0;
+ if (ia64_pal_version(&min_ver, &cur_ver) != 0)
+ return 0;
p += sprintf(p,
"PAL_vendor : 0x%02x (min=0x%02x)\n"
- "PAL_A : %x.%x.%x (min=%x.%x.%x)\n"
- "PAL_B : %x.%x.%x (min=%x.%x.%x)\n",
- cur_ver.pal_version_s.pv_pal_vendor, min_ver.pal_version_s.pv_pal_vendor,
-
- cur_ver.pal_version_s.pv_pal_a_model>>4,
- cur_ver.pal_version_s.pv_pal_a_model&0xf, cur_ver.pal_version_s.pv_pal_a_rev,
- min_ver.pal_version_s.pv_pal_a_model>>4,
- min_ver.pal_version_s.pv_pal_a_model&0xf, min_ver.pal_version_s.pv_pal_a_rev,
-
- cur_ver.pal_version_s.pv_pal_b_model>>4,
- cur_ver.pal_version_s.pv_pal_b_model&0xf, cur_ver.pal_version_s.pv_pal_b_rev,
- min_ver.pal_version_s.pv_pal_b_model>>4,
- min_ver.pal_version_s.pv_pal_b_model&0xf, min_ver.pal_version_s.pv_pal_b_rev);
+ "PAL_A : %02x.%02x (min=%02x.%02x)\n"
+ "PAL_B : %02x.%02x (min=%02x.%02x)\n",
+ cur_ver.pal_version_s.pv_pal_vendor,
+ min_ver.pal_version_s.pv_pal_vendor,
+ cur_ver.pal_version_s.pv_pal_a_model,
+ cur_ver.pal_version_s.pv_pal_a_rev,
+ min_ver.pal_version_s.pv_pal_a_model,
+ min_ver.pal_version_s.pv_pal_a_rev,
+ cur_ver.pal_version_s.pv_pal_b_model,
+ cur_ver.pal_version_s.pv_pal_b_rev,
+ min_ver.pal_version_s.pv_pal_b_model,
+ min_ver.pal_version_s.pv_pal_b_rev);
return p - page;
}
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile
index d8536a2c22a9..38fa6e49e791 100644
--- a/arch/ia64/lib/Makefile
+++ b/arch/ia64/lib/Makefile
@@ -14,7 +14,7 @@ lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \
lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o
lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o
lib-$(CONFIG_PERFMON) += carta_random.o
-lib-$(CONFIG_MD_RAID5) += xor.o
+lib-$(CONFIG_MD_RAID456) += xor.o
AFLAGS___divdi3.o =
AFLAGS___udivdi3.o = -DUNSIGNED
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2a88cdd6d924..e004143ba86b 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -27,6 +27,7 @@
#ifdef CONFIG_VIRTUAL_MEM_MAP
static unsigned long num_dma_physpages;
+static unsigned long max_gap;
#endif
/**
@@ -45,9 +46,15 @@ show_mem (void)
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr;
- while (i-- > 0) {
- if (!pfn_valid(i))
+ for (i = 0; i < max_mapnr; i++) {
+ if (!pfn_valid(i)) {
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ if (max_gap < LARGE_GAP)
+ continue;
+ i = vmemmap_find_next_valid_pfn(0, i) - 1;
+#endif
continue;
+ }
total++;
if (PageReserved(mem_map+i))
reserved++;
@@ -234,7 +241,6 @@ paging_init (void)
unsigned long zones_size[MAX_NR_ZONES];
#ifdef CONFIG_VIRTUAL_MEM_MAP
unsigned long zholes_size[MAX_NR_ZONES];
- unsigned long max_gap;
#endif
/* initialize mem_map[] */
@@ -266,7 +272,6 @@ paging_init (void)
}
}
- max_gap = 0;
efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
if (max_gap < LARGE_GAP) {
vmem_map = (struct page *) 0;
@@ -277,7 +282,8 @@ paging_init (void)
/* allocate virtual_mem_map */
- map_size = PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmalloc_end -= map_size;
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 99bd9e30db96..d260bffa01ab 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -534,68 +534,6 @@ void __cpuinit *per_cpu_init(void)
}
#endif /* CONFIG_SMP */
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- unsigned long end_address, hole_next_pfn;
- unsigned long stop_address;
-
- end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
- end_address = PAGE_ALIGN(end_address);
-
- stop_address = (unsigned long) &vmem_map[
- pgdat->node_start_pfn + pgdat->node_spanned_pages];
-
- do {
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- pgd = pgd_offset_k(end_address);
- if (pgd_none(*pgd)) {
- end_address += PGDIR_SIZE;
- continue;
- }
-
- pud = pud_offset(pgd, end_address);
- if (pud_none(*pud)) {
- end_address += PUD_SIZE;
- continue;
- }
-
- pmd = pmd_offset(pud, end_address);
- if (pmd_none(*pmd)) {
- end_address += PMD_SIZE;
- continue;
- }
-
- pte = pte_offset_kernel(pmd, end_address);
-retry_pte:
- if (pte_none(*pte)) {
- end_address += PAGE_SIZE;
- pte++;
- if ((end_address < stop_address) &&
- (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
- goto retry_pte;
- continue;
- }
- /* Found next valid vmem_map page */
- break;
- } while (end_address < stop_address);
-
- end_address = min(end_address, stop_address);
- end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
- hole_next_pfn = end_address / sizeof(struct page);
- return hole_next_pfn - pgdat->node_start_pfn;
-}
-#else
-static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
-{
- return i + 1;
-}
-#endif
-
/**
* show_mem - give short summary of memory stats
*
@@ -625,7 +563,8 @@ void show_mem(void)
if (pfn_valid(pgdat->node_start_pfn + i))
page = pfn_to_page(pgdat->node_start_pfn + i);
else {
- i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
+ i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+ i) - 1;
continue;
}
if (PageReserved(page))
@@ -751,7 +690,8 @@ void __init paging_init(void)
efi_memmap_walk(filter_rsvd_memory, count_node_pages);
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
+ vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
vmem_map = (struct page *) vmalloc_end;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2f50c064513c..30617ccb4f7e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -415,6 +415,61 @@ ia64_mmu_init (void *my_cpu_data)
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
+int vmemmap_find_next_valid_pfn(int node, int i)
+{
+ unsigned long end_address, hole_next_pfn;
+ unsigned long stop_address;
+ pg_data_t *pgdat = NODE_DATA(node);
+
+ end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
+ end_address = PAGE_ALIGN(end_address);
+
+ stop_address = (unsigned long) &vmem_map[
+ pgdat->node_start_pfn + pgdat->node_spanned_pages];
+
+ do {
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pgd = pgd_offset_k(end_address);
+ if (pgd_none(*pgd)) {
+ end_address += PGDIR_SIZE;
+ continue;
+ }
+
+ pud = pud_offset(pgd, end_address);
+ if (pud_none(*pud)) {
+ end_address += PUD_SIZE;
+ continue;
+ }
+
+ pmd = pmd_offset(pud, end_address);
+ if (pmd_none(*pmd)) {
+ end_address += PMD_SIZE;
+ continue;
+ }
+
+ pte = pte_offset_kernel(pmd, end_address);
+retry_pte:
+ if (pte_none(*pte)) {
+ end_address += PAGE_SIZE;
+ pte++;
+ if ((end_address < stop_address) &&
+ (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
+ goto retry_pte;
+ continue;
+ }
+ /* Found next valid vmem_map page */
+ break;
+ } while (end_address < stop_address);
+
+ end_address = min(end_address, stop_address);
+ end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
+ hole_next_pfn = end_address / sizeof(struct page);
+ return hole_next_pfn - pgdat->node_start_pfn;
+}
int __init
create_mem_map_page_table (u64 start, u64 end, void *arg)
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 07bd02b6c372..4280c074d64e 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -32,7 +32,7 @@ ioremap (unsigned long offset, unsigned long size)
*/
attr = kern_mem_attribute(offset, size);
if (attr & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
@@ -43,7 +43,7 @@ ioremap (unsigned long offset, unsigned long size)
gran_base = GRANULEROUNDDOWN(offset);
gran_size = GRANULEROUNDUP(offset + size) - gran_base;
if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
- return phys_to_virt(offset);
+ return (void __iomem *) phys_to_virt(offset);
return __ioremap(offset, size);
}
@@ -53,7 +53,7 @@ void __iomem *
ioremap_nocache (unsigned long offset, unsigned long size)
{
if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
- return 0;
+ return NULL;
return __ioremap(offset, size);
}
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index 99b123a6421a..5e8e59efb347 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -480,7 +480,7 @@ xpc_activating(void *__partid)
partid_t partid = (u64) __partid;
struct xpc_partition *part = &xpc_partitions[partid];
unsigned long irq_flags;
- struct sched_param param = { sched_priority: MAX_RT_PRIO - 1 };
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
int ret;
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 17cd34284886..af7171adcd2c 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -74,7 +74,7 @@ tioce_mmr_war_pre(struct tioce_kernel *kern, void *mmr_addr)
else
mmr_war_offset = 0x158;
- readq_relaxed((void *)(mmr_base + mmr_war_offset));
+ readq_relaxed((void __iomem *)(mmr_base + mmr_war_offset));
}
}
@@ -92,8 +92,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
if (mmr_offset < 0x45000) {
if (mmr_offset == 0x100)
- readq_relaxed((void *)(mmr_base + 0x38));
- readq_relaxed((void *)(mmr_base + 0xb050));
+ readq_relaxed((void __iomem *)(mmr_base + 0x38));
+ readq_relaxed((void __iomem *)(mmr_base + 0xb050));
}
}