diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2016-12-25 14:12:18 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2017-03-28 18:23:32 -0400 |
commit | 9a7513cfa26f5fcce15de6130ce3c27d77c0ce55 (patch) | |
tree | 1f7769fc94432e965b7e6d19bb0f28ed40254ae5 | |
parent | d597580d373774b1bdab84b3d26ff0b55162b916 (diff) |
frv: switch to use of fixup_exception()
Massage frv search_exception_table() to
a) taking pt_regs pointer as explicit argument
b) updating ->pc on success
Simplifies callers a bit and allows to convert to generic extable.h,
while we are at it.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r-- | arch/frv/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/frv/include/asm/uaccess.h | 23 | ||||
-rw-r--r-- | arch/frv/kernel/traps.c | 7 | ||||
-rw-r--r-- | arch/frv/mm/extable.c | 27 | ||||
-rw-r--r-- | arch/frv/mm/fault.c | 6 |
5 files changed, 18 insertions, 46 deletions
diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index c33b46715f65..cce3bc3603ea 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -1,6 +1,7 @@ generic-y += clkdev.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 55b3a69c6c53..5bcc57de3c95 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <asm/segment.h> #include <asm/sections.h> +#include <asm/extable.h> #define __ptr(x) ((unsigned long __force *)(x)) @@ -59,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0) #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -extern unsigned long search_exception_table(unsigned long); - /* * These are the main single-value transfer routines. They automatically @@ -314,6 +295,4 @@ extern long strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long search_exception_table(unsigned long addr); - #endif /* _ASM_UACCESS_H */ diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index ce29991e4219..fb08ebe0dab4 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0, siginfo_t info; #ifdef CONFIG_MMU - unsigned long fixup; - - fixup = search_exception_table(__frame->pc); - if (fixup) { - __frame->pc = fixup; + if (fixup_exception(__frame)) return; - } #endif die_if_kernel("-- Memory Access Exception --\n" diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c index a0e8b3e03e4c..9198ddd16092 100644 --- a/arch/frv/mm/extable.c +++ b/arch/frv/mm/extable.c @@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler; extern spinlock_t modlist_lock; - -/*****************************************************************************/ -/* - * see if there's a fixup handler available to deal with a kernel fault - */ -unsigned long search_exception_table(unsigned long pc) +int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *extab; + unsigned long pc = regs->pc; /* determine if the fault lay during a memcpy_user or a memset_user */ - if (__frame->lr == (unsigned long) &__memset_user_error_lr && + if (regs->lr == (unsigned long) &__memset_user_error_lr && (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a clear_user() */ - return (unsigned long) &__memset_user_error_handler; + regs->pc = (unsigned long) &__memset_user_error_handler; + return 1; } - if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && + if (regs->lr == (unsigned long) &__memcpy_user_error_lr && (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a copy_to/from_user() */ - return (unsigned long) &__memcpy_user_error_handler; + regs->pc = (unsigned long) &__memcpy_user_error_handler; + return 1; } extab = search_exception_tables(pc); - if (extab) - return extab->fixup; + if (extab) { + regs->pc = extab->fixup; + return 1; + } return 0; - -} /* end search_exception_table() */ +} diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 614a46c413d2..179e79e220e5 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear { struct vm_area_struct *vma; struct mm_struct *mm; - unsigned long _pme, lrai, lrad, fixup; + unsigned long _pme, lrai, lrad; unsigned long flags = 0; siginfo_t info; pgd_t *pge; @@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear no_context: /* are we prepared to handle this kernel fault? */ - if ((fixup = search_exception_table(__frame->pc)) != 0) { - __frame->pc = fixup; + if (fixup_exception(__frame)) return; - } /* * Oops. The kernel tried to access some bad page. We'll have to |