summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-02-04 16:39:32 -0500
committerDavid S. Miller <davem@davemloft.net>2012-02-04 16:39:32 -0500
commitdd48dc34fe7639a8b2e22d8b609672f5f81aa7cb (patch)
treef16ace0ae09edab16bf6d0be9e8280dfbb7100da /arch/arm/mm
parent8d9eb069eafce49307f839783e4a4673414b1fd5 (diff)
parent5962b35c1de3254a2f03b95efd3b7854b874d7b7 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/ioremap.c3
-rw-r--r--arch/arm/mm/proc-v7.S26
4 files changed, 14 insertions, 18 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4cefb57d9ed2..1a3ca2488164 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -882,6 +882,7 @@ config CACHE_XSC3L2
config ARM_L1_CACHE_SHIFT_6
bool
+ default y if CPU_V7
help
Setting ARM L1 cache line size to 64 Bytes.
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 6ec1226fc62d..5dc7d127a40f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -310,7 +310,7 @@ static void arm_memory_present(void)
static bool arm_memblock_steal_permitted = true;
-phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align)
+phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
{
phys_addr_t phys;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 80632e8d7538..ba159370fa5f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -225,7 +225,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue;
if (__phys_to_pfn(area->phys_addr) > pfn ||
- __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
+ __pfn_to_phys(pfn) + offset + size-1 >
+ area->phys_addr + area->size-1)
continue;
/* we can drop the lock here as we know *area is static */
read_unlock(&vmlist_lock);
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7e9b5bf910c1..0404ccbb8aa3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -148,10 +148,6 @@ ENDPROC(cpu_v7_do_resume)
* Initialise TLB, Caches, and MMU state ready to switch the MMU
* on. Return in r0 the new CP15 C1 control register setting.
*
- * We automatically detect if we have a Harvard cache, and use the
- * Harvard cache control instructions insead of the unified cache
- * control instructions.
- *
* This should be able to cover all ARMv7 cores.
*
* It is assumed that:
@@ -251,9 +247,7 @@ __v7_setup:
#endif
3: mov r10, #0
-#ifdef HARVARD_CACHE
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
-#endif
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
@@ -330,16 +324,6 @@ __v7_ca5mp_proc_info:
.size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
/*
- * ARM Ltd. Cortex A7 processor.
- */
- .type __v7_ca7mp_proc_info, #object
-__v7_ca7mp_proc_info:
- .long 0x410fc070
- .long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
- .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
-
- /*
* ARM Ltd. Cortex A9 processor.
*/
.type __v7_ca9mp_proc_info, #object
@@ -351,6 +335,16 @@ __v7_ca9mp_proc_info:
#endif /* CONFIG_ARM_LPAE */
/*
+ * ARM Ltd. Cortex A7 processor.
+ */
+ .type __v7_ca7mp_proc_info, #object
+__v7_ca7mp_proc_info:
+ .long 0x410fc070
+ .long 0xff0ffff0
+ __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
+
+ /*
* ARM Ltd. Cortex A15 processor.
*/
.type __v7_ca15mp_proc_info, #object