From aa4a86055b6fd76c414c3ab2af5a1dbd93dd6c93 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 2 Feb 2023 15:20:22 +0100 Subject: mm/slub: fix memory leak with using debugfs_lookup() When calling debugfs_lookup() the result must have dput() called on it, otherwise the memory will leak over time. To make things simpler, just call debugfs_lookup_and_remove() instead which handles all of the logic at once. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Vlastimil Babka Cc: Roman Gushchin Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: linux-mm@kvack.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Greg Kroah-Hartman Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Roman Gushchin Acked-by: David Rientjes Signed-off-by: Vlastimil Babka --- mm/slub.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 13459c69095a..4880e461fcc5 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -6449,7 +6449,7 @@ static void debugfs_slab_add(struct kmem_cache *s) void debugfs_slab_release(struct kmem_cache *s) { - debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root)); + debugfs_lookup_and_remove(s->name, slab_debugfs_root); } static int __init slab_debugfs_init(void) -- cgit v1.2.3 From f5451547b8310868f5b5acff7cd4aa7c0267edb3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 7 Feb 2023 15:16:53 +0100 Subject: mm, slab/slub: Ensure kmem_cache_alloc_bulk() is available early The memory allocators are available during early boot even in the phase where interrupts are disabled and scheduling is not yet possible. The setup is so that GFP_KERNEL allocations work in this phase without causing might_alloc() splats to be emitted because the system state is SYSTEM_BOOTING at that point which prevents the warnings to trigger. Most allocation/free functions use local_irq_save()/restore() or a lock variant of that. But kmem_cache_alloc_bulk() and kmem_cache_free_bulk() use local_[lock]_irq_disable()/enable(), which leads to a lockdep warning when interrupts are enabled during the early boot phase. This went unnoticed so far as there are no early users of these interfaces. The upcoming conversion of the interrupt descriptor store from radix_tree to maple_tree triggered this warning as maple_tree uses the bulk interface. Cure this by moving the kmem_cache_alloc/free() bulk variants of SLUB and SLAB to local[_lock]_irq_save()/restore(). There is obviously no reclaim possible and required at this point so there is no need to expand this coverage further. No functional change. Signed-off-by: Thomas Gleixner Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Vlastimil Babka --- mm/slab.c | 18 ++++++++++-------- mm/slub.c | 9 +++++---- 2 files changed, 15 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index 7a269db050ee..9f3fffdd9b98 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3477,14 +3477,15 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags, int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { - size_t i; struct obj_cgroup *objcg = NULL; + unsigned long irqflags; + size_t i; s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags); if (!s) return 0; - local_irq_disable(); + local_irq_save(irqflags); for (i = 0; i < size; i++) { void *objp = kfence_alloc(s, s->object_size, flags) ?: __do_cache_alloc(s, flags, NUMA_NO_NODE); @@ -3493,7 +3494,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, goto error; p[i] = objp; } - local_irq_enable(); + local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_); @@ -3506,7 +3507,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, /* FIXME: Trace call missing. Christoph would like a bulk variant */ return size; error: - local_irq_enable(); + local_irq_restore(irqflags); cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_); slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size); kmem_cache_free_bulk(s, i, p); @@ -3608,8 +3609,9 @@ EXPORT_SYMBOL(kmem_cache_free); void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) { + unsigned long flags; - local_irq_disable(); + local_irq_save(flags); for (int i = 0; i < size; i++) { void *objp = p[i]; struct kmem_cache *s; @@ -3619,9 +3621,9 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) /* called via kfree_bulk */ if (!folio_test_slab(folio)) { - local_irq_enable(); + local_irq_restore(flags); free_large_kmalloc(folio, objp); - local_irq_disable(); + local_irq_save(flags); continue; } s = folio_slab(folio)->slab_cache; @@ -3638,7 +3640,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p) __cache_free(s, objp, _RET_IP_); } - local_irq_enable(); + local_irq_restore(flags); /* FIXME: add tracing */ } diff --git a/mm/slub.c b/mm/slub.c index 4880e461fcc5..c16d78698e3f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3913,6 +3913,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p, struct obj_cgroup *objcg) { struct kmem_cache_cpu *c; + unsigned long irqflags; int i; /* @@ -3921,7 +3922,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, * handlers invoking normal fastpath. */ c = slub_get_cpu_ptr(s->cpu_slab); - local_lock_irq(&s->cpu_slab->lock); + local_lock_irqsave(&s->cpu_slab->lock, irqflags); for (i = 0; i < size; i++) { void *object = kfence_alloc(s, s->object_size, flags); @@ -3942,7 +3943,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, */ c->tid = next_tid(c->tid); - local_unlock_irq(&s->cpu_slab->lock); + local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); /* * Invoking slow path likely have side-effect @@ -3956,7 +3957,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, c = this_cpu_ptr(s->cpu_slab); maybe_wipe_obj_freeptr(s, p[i]); - local_lock_irq(&s->cpu_slab->lock); + local_lock_irqsave(&s->cpu_slab->lock, irqflags); continue; /* goto for-loop */ } @@ -3965,7 +3966,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, maybe_wipe_obj_freeptr(s, p[i]); } c->tid = next_tid(c->tid); - local_unlock_irq(&s->cpu_slab->lock); + local_unlock_irqrestore(&s->cpu_slab->lock, irqflags); slub_put_cpu_ptr(s->cpu_slab); return i; -- cgit v1.2.3