diff options
author | Chengming Zhou <chengming.zhou@linux.dev> | 2024-02-22 13:02:33 +0000 |
---|---|---|
committer | Vlastimil Babka <vbabka@suse.cz> | 2024-03-01 16:52:14 +0100 |
commit | 3dd549a557f7dc326d59c5fa105e230ebf3d5458 (patch) | |
tree | 3d914a5517a9dbb1a70be2658c33887e5300e1bd /mm/slub.c | |
parent | 011568eb3117a1b0e1b2e980de37a4ec47952617 (diff) |
mm, slab: remove the corner case of inc_slabs_node()
We already have the inc_slabs_node() after kmem_cache_node->node[node]
initialized in early_kmem_cache_node_alloc(), this special case of
inc_slabs_node() can be removed. Then we don't need to consider the
existence of kmem_cache_node in inc_slabs_node() anymore.
Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 13 |
1 files changed, 2 insertions, 11 deletions
diff --git a/mm/slub.c b/mm/slub.c index 0e02e072693b..12066e69688d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1498,16 +1498,8 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects) { struct kmem_cache_node *n = get_node(s, node); - /* - * May be called early in order to allocate a slab for the - * kmem_cache_node structure. Solve the chicken-egg - * dilemma by deferring the increment of the count during - * bootstrap (see early_kmem_cache_node_alloc). - */ - if (likely(n)) { - atomic_long_inc(&n->nr_slabs); - atomic_long_add(objects, &n->total_objects); - } + atomic_long_inc(&n->nr_slabs); + atomic_long_add(objects, &n->total_objects); } static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects) { @@ -4855,7 +4847,6 @@ static void early_kmem_cache_node_alloc(int node) slab = new_slab(kmem_cache_node, GFP_NOWAIT, node); BUG_ON(!slab); - inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects); if (slab_nid(slab) != node) { pr_err("SLUB: Unable to allocate memory from node %d\n", node); pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n"); |