diff options
Diffstat (limited to 'arch/blackfin/include/asm/uaccess.h')
-rw-r--r-- | arch/blackfin/include/asm/uaccess.h | 47 |
1 files changed, 5 insertions, 42 deletions
diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 0eff88aa6d6a..f54a34f31cea 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -12,7 +12,6 @@ /* * User space memory access functions */ -#include <linux/sched.h> #include <linux/mm.h> #include <linux/string.h> @@ -29,9 +28,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size)) /* @@ -46,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; extern int _access_ok(unsigned long addr, unsigned long size); #endif -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned long insn, fixup; -}; +#include <asm/extable.h> /* * These are the main single-value transfer routines. They automatically @@ -163,41 +144,23 @@ static inline int bad_user_access_length(void) : "a" (__ptr(ptr))); \ }) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { memcpy(to, (const void __force *)from, n); return 0; } static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { memcpy((void __force *)to, from, n); SSYNC(); return 0; } -static inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_from_user(to, from, n); - memset(to, 0, n); - return n; -} - -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - return __copy_to_user(to, from, n); - return n; -} - +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * Copy a null terminated string from userspace. */ |