diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 01:26:06 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:01 -0700 |
commit | 8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f (patch) | |
tree | 4f690448c1363bf02f74abd9293126c3e3a9e4c9 | |
parent | dfb4f09609827301740ef0a11b37530d190f1681 (diff) |
SLUB: Do not use page->mapping
After moving the lockless_freelist to kmem_cache_cpu we no longer need
page->lockless_freelist. Restructure the use of the struct page fields in
such a way that we never touch the mapping field.
This is turn allows us to remove the special casing of SLUB when determining
the mapping of a page (needed for corner cases of virtual caches machines that
need to flush caches of processors mapping a page).
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | include/linux/mm_types.h | 9 | ||||
-rw-r--r-- | mm/slub.c | 2 |
3 files changed, 2 insertions, 13 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 6a68d41444f8..292c68623759 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page) VM_BUG_ON(PageSlab(page)); if (unlikely(PageSwapCache(page))) mapping = &swapper_space; -#ifdef CONFIG_SLUB - else if (unlikely(PageSlab(page))) - mapping = NULL; -#endif else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) mapping = NULL; return mapping; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 145b3d053048..0cdc8fbf6431 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -62,13 +62,8 @@ struct page { #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS spinlock_t ptl; #endif - struct { /* SLUB uses */ - void **lockless_freelist; - struct kmem_cache *slab; /* Pointer to slab */ - }; - struct { - struct page *first_page; /* Compound pages */ - }; + struct kmem_cache *slab; /* SLUB: Pointer to slab */ + struct page *first_page; /* Compound tail pages */ }; union { pgoff_t index; /* Our offset within mapping. */ diff --git a/mm/slub.c b/mm/slub.c index 4b8037f14fce..aa8bb072651b 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1127,7 +1127,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) set_freepointer(s, last, NULL); page->freelist = start; - page->lockless_freelist = NULL; page->inuse = 0; out: if (flags & __GFP_WAIT) @@ -1153,7 +1152,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page) NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE, - pages); - page->mapping = NULL; __free_pages(page, s->order); } |