summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c100
1 files changed, 53 insertions, 47 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 60004bfc2dc2..d52c88f29f69 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -756,6 +756,50 @@ static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
return false;
}
+/*
+ * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
+ * family will round up the real request size to these fixed ones, so
+ * there could be an extra area than what is requested. Save the original
+ * request size in the meta data area, for better debug and sanity check.
+ */
+static inline void set_orig_size(struct kmem_cache *s,
+ void *object, unsigned int orig_size)
+{
+ void *p = kasan_reset_tag(object);
+ unsigned int kasan_meta_size;
+
+ if (!slub_debug_orig_size(s))
+ return;
+
+ /*
+ * KASAN can save its free meta data inside of the object at offset 0.
+ * If this meta data size is larger than 'orig_size', it will overlap
+ * the data redzone in [orig_size+1, object_size]. Thus, we adjust
+ * 'orig_size' to be as at least as big as KASAN's meta data.
+ */
+ kasan_meta_size = kasan_metadata_size(s, true);
+ if (kasan_meta_size > orig_size)
+ orig_size = kasan_meta_size;
+
+ p += get_info_end(s);
+ p += sizeof(struct track) * 2;
+
+ *(unsigned int *)p = orig_size;
+}
+
+static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
+{
+ void *p = kasan_reset_tag(object);
+
+ if (!slub_debug_orig_size(s))
+ return s->object_size;
+
+ p += get_info_end(s);
+ p += sizeof(struct track) * 2;
+
+ return *(unsigned int *)p;
+}
+
#ifdef CONFIG_SLUB_DEBUG
static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
static DEFINE_SPINLOCK(object_map_lock);
@@ -985,50 +1029,6 @@ static void print_slab_info(const struct slab *slab)
&slab->__page_flags);
}
-/*
- * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
- * family will round up the real request size to these fixed ones, so
- * there could be an extra area than what is requested. Save the original
- * request size in the meta data area, for better debug and sanity check.
- */
-static inline void set_orig_size(struct kmem_cache *s,
- void *object, unsigned int orig_size)
-{
- void *p = kasan_reset_tag(object);
- unsigned int kasan_meta_size;
-
- if (!slub_debug_orig_size(s))
- return;
-
- /*
- * KASAN can save its free meta data inside of the object at offset 0.
- * If this meta data size is larger than 'orig_size', it will overlap
- * the data redzone in [orig_size+1, object_size]. Thus, we adjust
- * 'orig_size' to be as at least as big as KASAN's meta data.
- */
- kasan_meta_size = kasan_metadata_size(s, true);
- if (kasan_meta_size > orig_size)
- orig_size = kasan_meta_size;
-
- p += get_info_end(s);
- p += sizeof(struct track) * 2;
-
- *(unsigned int *)p = orig_size;
-}
-
-static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
-{
- void *p = kasan_reset_tag(object);
-
- if (!slub_debug_orig_size(s))
- return s->object_size;
-
- p += get_info_end(s);
- p += sizeof(struct track) * 2;
-
- return *(unsigned int *)p;
-}
-
void skip_orig_size_check(struct kmem_cache *s, const void *object)
{
set_orig_size(s, (void *)object, s->object_size);
@@ -1894,7 +1894,6 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
-
#ifndef CONFIG_SLUB_TINY
static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
void **freelist, void *nextfree)
@@ -2239,14 +2238,21 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
*/
if (unlikely(init)) {
int rsize;
- unsigned int inuse;
+ unsigned int inuse, orig_size;
inuse = get_info_end(s);
+ orig_size = get_orig_size(s, x);
if (!kasan_has_integrated_init())
- memset(kasan_reset_tag(x), 0, s->object_size);
+ memset(kasan_reset_tag(x), 0, orig_size);
rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
memset((char *)kasan_reset_tag(x) + inuse, 0,
s->size - inuse - rsize);
+ /*
+ * Restore orig_size, otherwize kmalloc redzone overwritten
+ * would be reported
+ */
+ set_orig_size(s, x, orig_size);
+
}
/* KASAN might put x into memory quarantine, delaying its reuse. */
return !kasan_slab_free(s, x, init);