diff options
author | Vishal Moola (Oracle) <vishal.moola@gmail.com> | 2022-12-21 10:08:48 -0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-01-18 17:12:47 -0800 |
commit | 5a9e34747c9f731bbb6b7fd7521c4fec0d840593 (patch) | |
tree | 59fd6abae02f33f684f536b34ceeac6ecd07e32e | |
parent | f70da5ee8fe15b21501613ccab27eb2f722a3394 (diff) |
mm/swap: convert deactivate_page() to folio_deactivate()
Deactivate_page() has already been converted to use folios, this change
converts it to take in a folio argument instead of calling page_folio().
It also renames the function folio_deactivate() to be more consistent with
other folio functions.
[akpm@linux-foundation.org: fix left-over comments, per Yu Zhao]
Link: https://lkml.kernel.org/r/20221221180848.20774-5-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | include/linux/swap.h | 2 | ||||
-rw-r--r-- | mm/damon/paddr.c | 2 | ||||
-rw-r--r-- | mm/madvise.c | 4 | ||||
-rw-r--r-- | mm/page-writeback.c | 4 | ||||
-rw-r--r-- | mm/swap.c | 14 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
6 files changed, 13 insertions, 15 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h index 93f1cebd8545..87cecb8c0bdc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -401,7 +401,7 @@ extern void lru_add_drain(void); extern void lru_add_drain_cpu(int cpu); extern void lru_add_drain_cpu_zone(struct zone *zone); extern void lru_add_drain_all(void); -extern void deactivate_page(struct page *page); +void folio_deactivate(struct folio *folio); void folio_mark_lazyfree(struct folio *folio); extern void swap_setup(void); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 884c8bf18b12..6334c99e5152 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -297,7 +297,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (mark_accessed) folio_mark_accessed(folio); else - deactivate_page(&folio->page); + folio_deactivate(folio); folio_put(folio); applied += folio_nr_pages(folio); } diff --git a/mm/madvise.c b/mm/madvise.c index 575ebf0363b8..e407d335e614 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -416,7 +416,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, list_add(&folio->lru, &folio_list); } } else - deactivate_page(&folio->page); + folio_deactivate(folio); huge_unlock: spin_unlock(ptl); if (pageout) @@ -510,7 +510,7 @@ regular_folio: list_add(&folio->lru, &folio_list); } } else - deactivate_page(&folio->page); + folio_deactivate(folio); } arch_leave_lazy_mmu_mode(); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index ad608ef2a243..41128ea9c997 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2846,11 +2846,11 @@ bool folio_mark_dirty(struct folio *folio) if (likely(mapping)) { /* - * readahead/lru_deactivate_page could remain + * readahead/folio_deactivate could remain * PG_readahead/PG_reclaim due to race with folio_end_writeback * About readahead, if the folio is written, the flags would be * reset. So no problem. - * About lru_deactivate_page, if the folio is redirtied, + * About folio_deactivate, if the folio is redirtied, * the flag will be reset. So no problem. but if the * folio is used by readahead it will confuse readahead * and make it restart the size rampup process. But it's diff --git a/mm/swap.c b/mm/swap.c index 5e5eba186930..e54e2a252e27 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -733,17 +733,15 @@ void deactivate_file_folio(struct folio *folio) } /* - * deactivate_page - deactivate a page - * @page: page to deactivate + * folio_deactivate - deactivate a folio + * @folio: folio to deactivate * - * deactivate_page() moves @page to the inactive list if @page was on the active - * list and was not an unevictable page. This is done to accelerate the reclaim - * of @page. + * folio_deactivate() moves @folio to the inactive list if @folio was on the + * active list and was not unevictable. This is done to accelerate the + * reclaim of @folio. */ -void deactivate_page(struct page *page) +void folio_deactivate(struct folio *folio) { - struct folio *folio = page_folio(page); - if (folio_test_lru(folio) && !folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) { struct folio_batch *fbatch; diff --git a/mm/vmscan.c b/mm/vmscan.c index bd6637fcd8f9..aa8c252949da 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1920,7 +1920,7 @@ retry: !test_bit(PGDAT_DIRTY, &pgdat->flags))) { /* * Immediately reclaim when written back. - * Similar in principle to deactivate_page() + * Similar in principle to folio_deactivate() * except we already have the folio isolated * and know it's dirty */ |