summaryrefslogtreecommitdiff
path: root/arch/sparc/mm/srmmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/mm/srmmu.c')
-rw-r--r--arch/sparc/mm/srmmu.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 23d3291a3e81..ee30462598fc 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -50,7 +50,7 @@
#include <asm/btfixup.h>
enum mbus_module srmmu_modtype;
-unsigned int hwbug_bitmask;
+static unsigned int hwbug_bitmask;
int vac_cache_size;
int vac_line_size;
@@ -60,7 +60,7 @@ extern unsigned long last_valid_pfn;
extern unsigned long page_kernel;
-pgd_t *srmmu_swapper_pg_dir;
+static pgd_t *srmmu_swapper_pg_dir;
#ifdef CONFIG_SMP
#define FLUSH_BEGIN(mm)
@@ -83,12 +83,12 @@ BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
char *srmmu_name;
ctxd_t *srmmu_ctx_table_phys;
-ctxd_t *srmmu_context_table;
+static ctxd_t *srmmu_context_table;
int viking_mxcc_present;
static DEFINE_SPINLOCK(srmmu_context_spinlock);
-int is_hypersparc;
+static int is_hypersparc;
/*
* In general all page table modifications should use the V8 atomic
@@ -112,11 +112,11 @@ static inline int srmmu_device_memory(unsigned long x)
return ((x & 0xF0000000) != 0);
}
-int srmmu_cache_pagetables;
+static int srmmu_cache_pagetables;
/* these will be initialized in srmmu_nocache_calcsize() */
-unsigned long srmmu_nocache_size;
-unsigned long srmmu_nocache_end;
+static unsigned long srmmu_nocache_size;
+static unsigned long srmmu_nocache_end;
/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
@@ -324,7 +324,7 @@ static unsigned long __srmmu_get_nocache(int size, int align)
return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
}
-unsigned inline long srmmu_get_nocache(int size, int align)
+static unsigned long srmmu_get_nocache(int size, int align)
{
unsigned long tmp;
@@ -336,7 +336,7 @@ unsigned inline long srmmu_get_nocache(int size, int align)
return tmp;
}
-void srmmu_free_nocache(unsigned long vaddr, int size)
+static void srmmu_free_nocache(unsigned long vaddr, int size)
{
int offset;
@@ -369,7 +369,8 @@ void srmmu_free_nocache(unsigned long vaddr, int size)
bit_map_clear(&srmmu_nocache_map, offset, size);
}
-void srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end);
+static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
+ unsigned long end);
extern unsigned long probe_memory(void); /* in fault.c */
@@ -377,7 +378,7 @@ extern unsigned long probe_memory(void); /* in fault.c */
* Reserve nocache dynamically proportionally to the amount of
* system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
*/
-void srmmu_nocache_calcsize(void)
+static void srmmu_nocache_calcsize(void)
{
unsigned long sysmemavail = probe_memory() / 1024;
int srmmu_nocache_npages;
@@ -398,7 +399,7 @@ void srmmu_nocache_calcsize(void)
srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
}
-void __init srmmu_nocache_init(void)
+static void __init srmmu_nocache_init(void)
{
unsigned int bitmap_bits;
pgd_t *pgd;
@@ -645,7 +646,7 @@ static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
* mappings on the kernel stack without any special code as we did
* need on the sun4c.
*/
-struct thread_info *srmmu_alloc_thread_info(void)
+static struct thread_info *srmmu_alloc_thread_info(void)
{
struct thread_info *ret;
@@ -1045,13 +1046,14 @@ extern void hypersparc_setup_blockops(void);
* around 8mb mapped for us.
*/
-void __init early_pgtable_allocfail(char *type)
+static void __init early_pgtable_allocfail(char *type)
{
prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
prom_halt();
}
-void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
+ unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
@@ -1081,7 +1083,8 @@ void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, unsigned l
}
}
-void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long end)
+static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
+ unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
@@ -1116,7 +1119,8 @@ void __init srmmu_allocate_ptable_skeleton(unsigned long start, unsigned long en
* looking at the prom's page table directly which is what most
* other OS's do. Yuck... this is much better.
*/
-void __init srmmu_inherit_prom_mappings(unsigned long start,unsigned long end)
+static void __init srmmu_inherit_prom_mappings(unsigned long start,
+ unsigned long end)
{
pgd_t *pgdp;
pmd_t *pmdp;
@@ -1348,8 +1352,7 @@ void __init srmmu_paging_init(void)
zones_size[ZONE_HIGHMEM] = npages;
zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
- free_area_init_node(0, &contig_page_data, zones_size,
- pfn_base, zholes_size);
+ free_area_init_node(0, zones_size, pfn_base, zholes_size);
}
}