diff options
-rw-r--r-- | include/linux/mmzone.h | 12 | ||||
-rw-r--r-- | mm/page_alloc.c | 11 | ||||
-rw-r--r-- | mm/shuffle.c | 23 | ||||
-rw-r--r-- | mm/shuffle.h | 12 |
4 files changed, 56 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index acd6f9cb01bd..70394cabaf4e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -116,6 +116,18 @@ static inline void add_to_free_area_tail(struct page *page, struct free_area *ar area->nr_free++; } +#ifdef CONFIG_SHUFFLE_PAGE_ALLOCATOR +/* Used to preserve page allocation order entropy */ +void add_to_free_area_random(struct page *page, struct free_area *area, + int migratetype); +#else +static inline void add_to_free_area_random(struct page *page, + struct free_area *area, int migratetype) +{ + add_to_free_area(page, area, migratetype); +} +#endif + /* Used for pages which are on another list */ static inline void move_to_free_area(struct page *page, struct free_area *area, int migratetype) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b674625762c4..3b13d3914176 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -43,6 +43,7 @@ #include <linux/mempolicy.h> #include <linux/memremap.h> #include <linux/stop_machine.h> +#include <linux/random.h> #include <linux/sort.h> #include <linux/pfn.h> #include <linux/backing-dev.h> @@ -958,7 +959,8 @@ done_merging: * so it's less likely to be used soon and more likely to be merged * as a higher order page */ - if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { + if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn) + && !is_shuffle_order(order)) { struct page *higher_page, *higher_buddy; combined_pfn = buddy_pfn & pfn; higher_page = page + (combined_pfn - pfn); @@ -972,7 +974,12 @@ done_merging: } } - add_to_free_area(page, &zone->free_area[order], migratetype); + if (is_shuffle_order(order)) + add_to_free_area_random(page, &zone->free_area[order], + migratetype); + else + add_to_free_area(page, &zone->free_area[order], migratetype); + } /* diff --git a/mm/shuffle.c b/mm/shuffle.c index bc0419a61fbe..3ce12481b1dc 100644 --- a/mm/shuffle.c +++ b/mm/shuffle.c @@ -182,3 +182,26 @@ void __meminit __shuffle_free_memory(pg_data_t *pgdat) for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) shuffle_zone(z); } + +void add_to_free_area_random(struct page *page, struct free_area *area, + int migratetype) +{ + static u64 rand; + static u8 rand_bits; + + /* + * The lack of locking is deliberate. If 2 threads race to + * update the rand state it just adds to the entropy. + */ + if (rand_bits == 0) { + rand_bits = 64; + rand = get_random_u64(); + } + + if (rand & 1) + add_to_free_area(page, area, migratetype); + else + add_to_free_area_tail(page, area, migratetype); + rand_bits--; + rand >>= 1; +} diff --git a/mm/shuffle.h b/mm/shuffle.h index 644c8ee97b9e..777a257a0d2f 100644 --- a/mm/shuffle.h +++ b/mm/shuffle.h @@ -36,6 +36,13 @@ static inline void shuffle_zone(struct zone *z) return; __shuffle_zone(z); } + +static inline bool is_shuffle_order(int order) +{ + if (!static_branch_unlikely(&page_alloc_shuffle_key)) + return false; + return order >= SHUFFLE_ORDER; +} #else static inline void shuffle_free_memory(pg_data_t *pgdat) { @@ -48,5 +55,10 @@ static inline void shuffle_zone(struct zone *z) static inline void page_alloc_shuffle(enum mm_shuffle_ctl ctl) { } + +static inline bool is_shuffle_order(int order) +{ + return false; +} #endif #endif /* _MM_SHUFFLE_H */ |