summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/traps.c
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-04-30 22:53:30 +0000
committerPaul Burton <paul.burton@mips.com>2019-05-02 11:20:47 -0700
commit172dcd935c34b022729f45a7bbaae5cc05231533 (patch)
tree2561c53caebe4df0c5369662cc05c98fa914773a /arch/mips/kernel/traps.c
parentf995adb0ac5bcfa0d396ba7706aab420c7a6fec2 (diff)
MIPS: Always allocate exception vector for MIPSr2+
Currently we allocate the exception vector on systems which use a vectored interrupt mode, but otherwise attempt to reuse whatever exception vector the bootloader uses. This can be problematic for a number of reasons: 1) The memory isn't properly marked reserved in the memblock allocator. We've relied on the fact that EBase is generally in the memory below the kernel image which we don't free, but this is about to change. 2) Recent versions of U-Boot place their exception vector high in kseg0, in memory which isn't protected by being lower than the kernel anyway & can end up being clobbered. 3) We are unnecessarily reliant upon there being memory at the address EBase points to upon entry to the kernel. This is often the case, but if the bootloader doesn't configure EBase & leaves it with its default value then we rely upon there being memory at physical address 0 for no good reason. Improve this situation by allocating the exception vector in all cases when running on MIPSr2 or higher, and reserving the memory for MIPSr1 or lower. This ensures we don't clobber the exception vector in any configuration, and for MIPSr2 & higher removes the need for memory at physical address 0. Signed-off-by: Paul Burton <paul.burton@mips.com> Reviewed-by: Serge Semin <fancer.lancer@gmail.com> Tested-by: Serge Semin <fancer.lancer@gmail.com> Cc: linux-mips@vger.kernel.org
Diffstat (limited to 'arch/mips/kernel/traps.c')
-rw-r--r--arch/mips/kernel/traps.c35
1 files changed, 15 insertions, 20 deletions
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 00f44b16385e..9b565ed51662 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -2284,18 +2284,27 @@ void __init trap_init(void)
extern char except_vec3_generic;
extern char except_vec4;
extern char except_vec3_r4000;
- unsigned long i;
+ unsigned long i, vec_size;
+ phys_addr_t ebase_pa;
check_wait();
- if (cpu_has_veic || cpu_has_vint) {
- unsigned long size = 0x200 + VECTORSPACING*64;
- phys_addr_t ebase_pa;
+ if (!cpu_has_mips_r2_r6) {
+ ebase = CAC_BASE;
+ ebase_pa = virt_to_phys((void *)ebase);
+ vec_size = 0x400;
+
+ memblock_reserve(ebase_pa, vec_size);
+ } else {
+ if (cpu_has_veic || cpu_has_vint)
+ vec_size = 0x200 + VECTORSPACING*64;
+ else
+ vec_size = PAGE_SIZE;
- ebase_pa = memblock_phys_alloc(size, 1 << fls(size));
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
if (!ebase_pa)
panic("%s: Failed to allocate %lu bytes align=0x%x\n",
- __func__, size, 1 << fls(size));
+ __func__, vec_size, 1 << fls(vec_size));
/*
* Try to ensure ebase resides in KSeg0 if possible.
@@ -2312,20 +2321,6 @@ void __init trap_init(void)
ebase = CKSEG0ADDR(ebase_pa);
else
ebase = (unsigned long)phys_to_virt(ebase_pa);
- } else {
- ebase = CAC_BASE;
-
- if (cpu_has_mips_r2_r6) {
- if (cpu_has_ebase_wg) {
-#ifdef CONFIG_64BIT
- ebase = (read_c0_ebase_64() & ~0xfff);
-#else
- ebase = (read_c0_ebase() & ~0xfff);
-#endif
- } else {
- ebase += (read_c0_ebase() & 0x3ffff000);
- }
- }
}
if (cpu_has_mmips) {