From b9c91c43412f2e07a5287dfe7027acdd8fb0b1ef Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 7 Jun 2023 19:51:43 +0000 Subject: mm: zswap: support exclusive loads Commit 71024cb4a0bf ("frontswap: remove frontswap_tmem_exclusive_gets") removed support for exclusive loads from frontswap as it was not used. Bring back exclusive loads support to frontswap by adding an "exclusive" output parameter to frontswap_ops->load. On the zswap side, add a module parameter to enable/disable exclusive loads, and a config option to control the boot default value. Refactor zswap entry invalidation in zswap_frontswap_invalidate_page() into zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if exclusive loads are enabled. With exclusive loads, we avoid having two copies of the same page in memory (compressed & uncompressed) after faulting it in from zswap. On the other hand, if the page is to be reclaimed again without being dirtied, it will be re-compressed. Compression is not usually slow, and a page that was just faulted in is less likely to be reclaimed again soon. Link: https://lkml.kernel.org/r/20230607195143.1473802-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Suggested-by: Yu Zhao Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Konrad Rzeszutek Wilk Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/Kconfig | 16 ++++++++++++++++ mm/frontswap.c | 10 ++++++++-- mm/zswap.c | 28 ++++++++++++++++++++-------- 3 files changed, 44 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..12f32f8d26bf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON The selection made here can be overridden by using the kernel command line 'zswap.enabled=' option. +config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON + bool "Invalidate zswap entries when pages are loaded" + depends on ZSWAP + help + If selected, exclusive loads for zswap will be enabled at boot, + otherwise it will be disabled. + + If exclusive loads are enabled, when a page is loaded from zswap, + the zswap entry is invalidated at once, as opposed to leaving it + in zswap until the swap entry is freed. + + This avoids having two copies of the same page in memory + (compressed and uncompressed) after faulting in a page from zswap. + The cost is that if the page was never dirtied and needs to be + swapped out again, it will be re-compressed. + choice prompt "Default compressor" depends on ZSWAP diff --git a/mm/frontswap.c b/mm/frontswap.c index 279e55b4ed87..2fb5df3384b8 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -206,6 +206,7 @@ int __frontswap_load(struct page *page) int type = swp_type(entry); struct swap_info_struct *sis = swap_info[type]; pgoff_t offset = swp_offset(entry); + bool exclusive = false; VM_BUG_ON(!frontswap_ops); VM_BUG_ON(!PageLocked(page)); @@ -215,9 +216,14 @@ int __frontswap_load(struct page *page) return -1; /* Try loading from each implementation, until one succeeds. */ - ret = frontswap_ops->load(type, offset, page); - if (ret == 0) + ret = frontswap_ops->load(type, offset, page, &exclusive); + if (ret == 0) { inc_frontswap_loads(); + if (exclusive) { + SetPageDirty(page); + __frontswap_clear(sis, offset); + } + } return ret; } diff --git a/mm/zswap.c b/mm/zswap.c index bcb82e09eb64..9fa86265f6dd 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true; module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, bool, 0644); +static bool zswap_exclusive_loads_enabled = IS_ENABLED( + CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON); +module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); + /********************************* * data structures **********************************/ @@ -1340,12 +1344,22 @@ shrink: goto reject; } +static void zswap_invalidate_entry(struct zswap_tree *tree, + struct zswap_entry *entry) +{ + /* remove from rbtree */ + zswap_rb_erase(&tree->rbroot, entry); + + /* drop the initial reference from entry creation */ + zswap_entry_put(tree, entry); +} + /* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error */ static int zswap_frontswap_load(unsigned type, pgoff_t offset, - struct page *page) + struct page *page, bool *exclusive) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; @@ -1415,6 +1429,10 @@ stats: freeentry: spin_lock(&tree->lock); zswap_entry_put(tree, entry); + if (!ret && zswap_exclusive_loads_enabled) { + zswap_invalidate_entry(tree, entry); + *exclusive = true; + } spin_unlock(&tree->lock); return ret; @@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) spin_unlock(&tree->lock); return; } - - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); - + zswap_invalidate_entry(tree, entry); spin_unlock(&tree->lock); } -- cgit v1.2.3