summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-06-12 22:32:27 +0100
committerKees Cook <keescook@chromium.org>2022-06-13 09:54:52 -0700
commit1dfbe9fcda4afc957f0e371e207ae3cb7e8f3b0e (patch)
tree5d113acb38d1b7af478d04036fc46d55134c578b /mm
parent35fb9ae4aa2e838b234323e6f7cf6336ff019e5a (diff)
usercopy: Make usercopy resilient against ridiculously large copies
If 'n' is so large that it's negative, we might wrap around and mistakenly think that the copy is OK when it's not. Such a copy would probably crash, but just doing the arithmetic in a more simple way lets us detect and refuse this case. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Tested-by: Zorro Lang <zlang@redhat.com> Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20220612213227.3881769-4-willy@infradead.org
Diffstat (limited to 'mm')
-rw-r--r--mm/usercopy.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 30a4db3cb1df..4e1da708699b 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -162,27 +162,26 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
bool to_user)
{
uintptr_t addr = (uintptr_t)ptr;
+ unsigned long offset;
struct folio *folio;
if (is_kmap_addr(ptr)) {
- unsigned long page_end = addr | (PAGE_SIZE - 1);
-
- if (addr + n - 1 > page_end)
- usercopy_abort("kmap", NULL, to_user,
- offset_in_page(ptr), n);
+ offset = offset_in_page(ptr);
+ if (n > PAGE_SIZE - offset)
+ usercopy_abort("kmap", NULL, to_user, offset, n);
return;
}
if (is_vmalloc_addr(ptr)) {
struct vmap_area *area = find_vmap_area(addr);
- unsigned long offset;
if (!area)
usercopy_abort("vmalloc", "no area", to_user, 0, n);
- offset = addr - area->va_start;
- if (addr + n > area->va_end)
+ if (n > area->va_end - addr) {
+ offset = addr - area->va_start;
usercopy_abort("vmalloc", NULL, to_user, offset, n);
+ }
return;
}
@@ -195,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
/* Check slab allocator for flags and size. */
__check_heap_object(ptr, n, folio_slab(folio), to_user);
} else if (folio_test_large(folio)) {
- unsigned long offset = ptr - folio_address(folio);
- if (offset + n > folio_size(folio))
+ offset = ptr - folio_address(folio);
+ if (n > folio_size(folio) - offset)
usercopy_abort("page alloc", NULL, to_user, offset, n);
}
}