summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 08:46:08 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-21 08:46:08 -0700
commit7e5cb5e151c5474b4a468f437f5038ba9f67ef4d (patch)
tree899f42e88f926fb4501402cf4ee4aa0903421ef5 /arch
parent8c12fec90c54cfabdd57b2726a1defbc07c19a55 (diff)
parente994defb7b6813ba6fa7a2a36e86d2455ad1dc35 (diff)
Merge branch 'vfs-cleanups' (random vfs cleanups)
This teaches vfs_fstat() to use the appropriate f[get|put]_light functions, allowing it to avoid some unnecessary locking for the common case. More noticeably, it also cleans up and simplifies the "getname_flags()" function, which now relies on the architecture strncpy_from_user() doing all the user access checks properly, instead of hacking around the fact that on x86 it didn't use to do it right (see commit 92ae03f2ef99: "x86: merge 32/64-bit versions of 'strncpy_from_user()' and speed it up"). * vfs-cleanups: VFS: make vfs_fstat() use f[get|put]_light() VFS: clean up and simplify getname_flags() x86: make word-at-a-time strncpy_from_user clear bytes at the end
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/lib/usercopy.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index d6ae30bbd7b..2e4e4b02c37 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -44,13 +44,6 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
}
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
-static inline unsigned long count_bytes(unsigned long mask)
-{
- mask = (mask - 1) & ~mask;
- mask >>= 7;
- return count_masked_bytes(mask);
-}
-
/*
* Do a strncpy, return length of string without final '\0'.
* 'count' is the user-supplied count (return 'count' if we
@@ -69,16 +62,19 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
max = count;
while (max >= sizeof(unsigned long)) {
- unsigned long c;
+ unsigned long c, mask;
/* Fall back to byte-at-a-time if we get a page fault */
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
break;
- /* This can write a few bytes past the NUL character, but that's ok */
+ mask = has_zero(c);
+ if (mask) {
+ mask = (mask - 1) & ~mask;
+ mask >>= 7;
+ *(unsigned long *)(dst+res) = c & mask;
+ return res + count_masked_bytes(mask);
+ }
*(unsigned long *)(dst+res) = c;
- c = has_zero(c);
- if (c)
- return res + count_bytes(c);
res += sizeof(unsigned long);
max -= sizeof(unsigned long);
}