diff options
author | Kees Cook <keescook@chromium.org> | 2017-11-30 13:04:32 -0800 |
---|---|---|
committer | Kees Cook <keescook@chromium.org> | 2018-01-15 12:07:48 -0800 |
commit | 2d891fbc3bb681ba1f826e7ee70dbe38ca7465fe (patch) | |
tree | 1dac3dd3aac5a0fed8046b6f85bd944211260c70 /mm | |
parent | afcc90f8621e289cd082ba97900e76f01afe778c (diff) |
usercopy: Allow strict enforcement of whitelists
This introduces CONFIG_HARDENED_USERCOPY_FALLBACK to control the
behavior of hardened usercopy whitelist violations. By default, whitelist
violations will continue to WARN() so that any bad or missing usercopy
whitelists can be discovered without being too disruptive.
If this config is disabled at build time or a system is booted with
"slab_common.usercopy_fallback=0", usercopy whitelists will BUG() instead
of WARN(). This is useful for admins that want to use usercopy whitelists
immediately.
Suggested-by: Matthew Garrett <mjg59@google.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slab_common.c | 8 | ||||
-rw-r--r-- | mm/slub.c | 3 |
3 files changed, 12 insertions, 2 deletions
diff --git a/mm/slab.c b/mm/slab.c index 1c02f6e94235..b9b0df620bb9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4426,7 +4426,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, * to be a temporary method to find any missing usercopy * whitelists. */ - if (offset <= cachep->object_size && + if (usercopy_fallback && + offset <= cachep->object_size && n <= cachep->object_size - offset) { usercopy_warn("SLAB object", cachep->name, to_user, offset, n); return; diff --git a/mm/slab_common.c b/mm/slab_common.c index fc3e66bdce75..a51f65408637 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -31,6 +31,14 @@ LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; +#ifdef CONFIG_HARDENED_USERCOPY +bool usercopy_fallback __ro_after_init = + IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK); +module_param(usercopy_fallback, bool, 0400); +MODULE_PARM_DESC(usercopy_fallback, + "WARN instead of reject usercopy whitelist violations"); +#endif + static LIST_HEAD(slab_caches_to_rcu_destroy); static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); static DECLARE_WORK(slab_caches_to_rcu_destroy_work, diff --git a/mm/slub.c b/mm/slub.c index 6d9b1e7d3226..862d835b3042 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3859,7 +3859,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, * whitelists. */ object_size = slab_ksize(s); - if (offset <= object_size && n <= object_size - offset) { + if (usercopy_fallback && + offset <= object_size && n <= object_size - offset) { usercopy_warn("SLUB object", s->name, to_user, offset, n); return; } |