summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristian Brauner <brauner@kernel.org>2024-09-05 09:56:50 +0200
committerVlastimil Babka <vbabka@suse.cz>2024-09-10 11:42:58 +0200
commitfc0eac57d08cad87b478a3be944899c81c950d43 (patch)
tree6e454f720f61c8dd8be8777bb04f8519e807a459 /mm
parent34410a906080e7f669330c45c8e38215fe09f481 (diff)
slab: pull kmem_cache_open() into do_kmem_cache_create()
do_kmem_cache_create() is the only caller and we're going to pass down struct kmem_cache_args in a follow-up patch. Reviewed-by: Kees Cook <kees@kernel.org> Reviewed-by: Jens Axboe <axboe@kernel.dk> Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Christian Brauner <brauner@kernel.org> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c132
1 files changed, 62 insertions, 70 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 23d9d783ff26..30f4ca6335c7 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
return !!oo_objects(s->oo);
}
-static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
-{
- s->flags = kmem_cache_flags(flags, s->name);
-#ifdef CONFIG_SLAB_FREELIST_HARDENED
- s->random = get_random_long();
-#endif
-
- if (!calculate_sizes(s))
- goto error;
- if (disable_higher_order_debug) {
- /*
- * Disable debugging flags that store metadata if the min slab
- * order increased.
- */
- if (get_order(s->size) > get_order(s->object_size)) {
- s->flags &= ~DEBUG_METADATA_FLAGS;
- s->offset = 0;
- if (!calculate_sizes(s))
- goto error;
- }
- }
-
-#ifdef system_has_freelist_aba
- if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
- /* Enable fast mode */
- s->flags |= __CMPXCHG_DOUBLE;
- }
-#endif
-
- /*
- * The larger the object size is, the more slabs we want on the partial
- * list to avoid pounding the page allocator excessively.
- */
- s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
- s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
-
- set_cpu_partial(s);
-
-#ifdef CONFIG_NUMA
- s->remote_node_defrag_ratio = 1000;
-#endif
-
- /* Initialize the pre-computed randomized freelist if slab is up */
- if (slab_state >= UP) {
- if (init_cache_random_seq(s))
- goto error;
- }
-
- if (!init_kmem_cache_nodes(s))
- goto error;
-
- if (alloc_kmem_cache_cpus(s))
- return 0;
-
-error:
- __kmem_cache_release(s);
- return -EINVAL;
-}
-
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
const char *text)
{
@@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
int do_kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
{
- int err;
+ int err = -EINVAL;
- err = kmem_cache_open(s, flags);
- if (err)
- return err;
+ s->flags = kmem_cache_flags(flags, s->name);
+#ifdef CONFIG_SLAB_FREELIST_HARDENED
+ s->random = get_random_long();
+#endif
+
+ if (!calculate_sizes(s))
+ goto out;
+ if (disable_higher_order_debug) {
+ /*
+ * Disable debugging flags that store metadata if the min slab
+ * order increased.
+ */
+ if (get_order(s->size) > get_order(s->object_size)) {
+ s->flags &= ~DEBUG_METADATA_FLAGS;
+ s->offset = 0;
+ if (!calculate_sizes(s))
+ goto out;
+ }
+ }
+
+#ifdef system_has_freelist_aba
+ if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
+ /* Enable fast mode */
+ s->flags |= __CMPXCHG_DOUBLE;
+ }
+#endif
+
+ /*
+ * The larger the object size is, the more slabs we want on the partial
+ * list to avoid pounding the page allocator excessively.
+ */
+ s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
+ s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
+
+ set_cpu_partial(s);
+
+#ifdef CONFIG_NUMA
+ s->remote_node_defrag_ratio = 1000;
+#endif
+
+ /* Initialize the pre-computed randomized freelist if slab is up */
+ if (slab_state >= UP) {
+ if (init_cache_random_seq(s))
+ goto out;
+ }
+
+ if (!init_kmem_cache_nodes(s))
+ goto out;
+
+ if (!alloc_kmem_cache_cpus(s))
+ goto out;
/* Mutex is not taken during early boot */
- if (slab_state <= UP)
- return 0;
+ if (slab_state <= UP) {
+ err = 0;
+ goto out;
+ }
err = sysfs_slab_add(s);
- if (err) {
- __kmem_cache_release(s);
- return err;
- }
+ if (err)
+ goto out;
if (s->flags & SLAB_STORE_USER)
debugfs_slab_add(s);
- return 0;
+out:
+ if (err)
+ __kmem_cache_release(s);
+ return err;
}
#ifdef SLAB_SUPPORTS_SYSFS