diff options
Diffstat (limited to 'include/linux/slub_def.h')
-rw-r--r-- | include/linux/slub_def.h | 18 |
1 files changed, 7 insertions, 11 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 9e3a575b2c30..5046f90c1171 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -47,7 +47,6 @@ struct kmem_cache_cpu { struct kmem_cache_node { spinlock_t list_lock; /* Protect partial list and nr_partial */ unsigned long nr_partial; - unsigned long min_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; @@ -90,6 +89,7 @@ struct kmem_cache { void (*ctor)(void *); int inuse; /* Offset to metadata */ int align; /* Alignment */ + unsigned long min_partial; const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ #ifdef CONFIG_SLUB_DEBUG @@ -130,9 +130,9 @@ struct kmem_cache { * This should be dropped to PAGE_SIZE / 2 once the page allocator * "fastpath" becomes competitive with the slab allocator fastpaths. */ -#define SLUB_MAX_SIZE (PAGE_SIZE) +#define SLUB_MAX_SIZE (2 * PAGE_SIZE) -#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 1) +#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2) /* * We keep the general caches in an array of slab caches that are used for @@ -233,8 +233,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) unsigned int order = get_order(size); void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); - kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, _THIS_IP_, ret, - size, PAGE_SIZE << order, flags); + trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags); return ret; } @@ -255,9 +254,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags) ret = kmem_cache_alloc_notrace(s, flags); - kmemtrace_mark_alloc(KMEMTRACE_TYPE_KMALLOC, - _THIS_IP_, ret, - size, s->size, flags); + trace_kmalloc(_THIS_IP_, ret, size, s->size, flags); return ret; } @@ -296,9 +293,8 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) ret = kmem_cache_alloc_node_notrace(s, flags, node); - kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC, - _THIS_IP_, ret, - size, s->size, flags, node); + trace_kmalloc_node(_THIS_IP_, ret, + size, s->size, flags, node); return ret; } |