diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2024-08-07 12:31:16 +0200 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2024-08-27 14:12:51 +0200 |
commit | f77d0cda4a8ebd070bfa1ef9a153c470ea3601ce (patch) | |
tree | a8631a29e48c7e612fa30e54dd8b53dec32d5f09 /mm/slab_common.c | |
parent | 4ec10268ed98a3d568a39861e7b7d0a0fa7cbe60 (diff) |
mm, slab: move kfence_shutdown_cache() outside slab_mutex
kfence_shutdown_cache() is called under slab_mutex when the cache is
destroyed synchronously, and outside slab_mutex during the delayed
destruction of SLAB_TYPESAFE_BY_RCU caches.
It seems it should always be safe to call it outside of slab_mutex so we
can just move the call to kmem_cache_release(), which is called outside.
Reviewed-by: Jann Horn <jannh@google.com>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index db61df3b4282..a079b8540334 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -492,6 +492,7 @@ EXPORT_SYMBOL(kmem_buckets_create); */ static void kmem_cache_release(struct kmem_cache *s) { + kfence_shutdown_cache(s); if (__is_defined(SLAB_SUPPORTS_SYSFS) && slab_state >= FULL) sysfs_slab_release(s); else @@ -521,10 +522,8 @@ static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) rcu_barrier(); - list_for_each_entry_safe(s, s2, &to_destroy, list) { - kfence_shutdown_cache(s); + list_for_each_entry_safe(s, s2, &to_destroy, list) kmem_cache_release(s); - } } void slab_kmem_cache_release(struct kmem_cache *s) @@ -563,9 +562,6 @@ void kmem_cache_destroy(struct kmem_cache *s) list_del(&s->list); - if (!err && !rcu_set) - kfence_shutdown_cache(s); - mutex_unlock(&slab_mutex); cpus_read_unlock(); |