summaryrefslogtreecommitdiff
path: root/mm/slub.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-04 14:45:58 +0100
committerVlastimil Babka <vbabka@suse.cz>2022-01-06 12:25:51 +0100
commitcc465c3b23f8db1896c1197d2f5799e97f6e617b (patch)
treecde2543f49dae2be439e0f0ee7984f915cd50b75 /mm/slub.c
parent0b3eb091d5759479d44cb793fad2c51ea06bdcec (diff)
mm/slub: Convert detached_freelist to use a struct slab
This gives us a little bit of extra typesafety as we know that nobody called virt_to_page() instead of virt_to_head_page(). [ vbabka@suse.cz: Use folio as intermediate step when filtering out large kmalloc pages ] Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Roman Gushchin <guro@fb.com>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 8b82188849ae..a45b74d2712f 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3532,7 +3532,7 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
EXPORT_SYMBOL(kmem_cache_free);
struct detached_freelist {
- struct page *page;
+ struct slab *slab;
void *tail;
void *freelist;
int cnt;
@@ -3554,8 +3554,8 @@ static inline void free_nonslab_page(struct page *page, void *object)
/*
* This function progressively scans the array with free objects (with
* a limited look ahead) and extract objects belonging to the same
- * page. It builds a detached freelist directly within the given
- * page/objects. This can happen without any need for
+ * slab. It builds a detached freelist directly within the given
+ * slab/objects. This can happen without any need for
* synchronization, because the objects are owned by running process.
* The freelist is build up as a single linked list in the objects.
* The idea is, that this detached freelist can then be bulk
@@ -3570,10 +3570,11 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
size_t first_skipped_index = 0;
int lookahead = 3;
void *object;
- struct page *page;
+ struct folio *folio;
+ struct slab *slab;
/* Always re-init detached_freelist */
- df->page = NULL;
+ df->slab = NULL;
do {
object = p[--size];
@@ -3583,17 +3584,19 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!object)
return 0;
- page = virt_to_head_page(object);
+ folio = virt_to_folio(object);
if (!s) {
/* Handle kalloc'ed objects */
- if (unlikely(!PageSlab(page))) {
- free_nonslab_page(page, object);
+ if (unlikely(!folio_test_slab(folio))) {
+ free_nonslab_page(folio_page(folio, 0), object);
p[size] = NULL; /* mark object processed */
return size;
}
/* Derive kmem_cache from object */
- df->s = page->slab_cache;
+ slab = folio_slab(folio);
+ df->s = slab->slab_cache;
} else {
+ slab = folio_slab(folio);
df->s = cache_from_obj(s, object); /* Support for memcg */
}
@@ -3605,7 +3608,7 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
}
/* Start new detached freelist */
- df->page = page;
+ df->slab = slab;
set_freepointer(df->s, object, NULL);
df->tail = object;
df->freelist = object;
@@ -3617,8 +3620,8 @@ int build_detached_freelist(struct kmem_cache *s, size_t size,
if (!object)
continue; /* Skip processed objects */
- /* df->page is always set at this point */
- if (df->page == virt_to_head_page(object)) {
+ /* df->slab is always set at this point */
+ if (df->slab == virt_to_slab(object)) {
/* Opportunity build freelist */
set_freepointer(df->s, object, df->freelist);
df->freelist = object;
@@ -3650,10 +3653,10 @@ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
struct detached_freelist df;
size = build_detached_freelist(s, size, p, &df);
- if (!df.page)
+ if (!df.slab)
continue;
- slab_free(df.s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
+ slab_free(df.s, slab_page(df.slab), df.freelist, df.tail, df.cnt, _RET_IP_);
} while (likely(size));
}
EXPORT_SYMBOL(kmem_cache_free_bulk);