diff options
-rw-r--r-- | mm/Kconfig | 16 | ||||
-rw-r--r-- | mm/zswap.c | 14 |
2 files changed, 3 insertions, 27 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 88ba99d84ac3..2b267553f793 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -45,22 +45,6 @@ config ZSWAP_DEFAULT_ON The selection made here can be overridden by using the kernel command line 'zswap.enabled=' option. -config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON - bool "Invalidate zswap entries when pages are loaded" - depends on ZSWAP - help - If selected, exclusive loads for zswap will be enabled at boot, - otherwise it will be disabled. - - If exclusive loads are enabled, when a page is loaded from zswap, - the zswap entry is invalidated at once, as opposed to leaving it - in zswap until the swap entry is freed. - - This avoids having two copies of the same page in memory - (compressed and uncompressed) after faulting in a page from zswap. - The cost is that if the page was never dirtied and needs to be - swapped out again, it will be re-compressed. - config ZSWAP_SHRINKER_DEFAULT_ON bool "Shrink the zswap pool on memory pressure" depends on ZSWAP diff --git a/mm/zswap.c b/mm/zswap.c index 91df925359c8..7a69142817cb 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -139,10 +139,6 @@ static bool zswap_non_same_filled_pages_enabled = true; module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, bool, 0644); -static bool zswap_exclusive_loads_enabled = IS_ENABLED( - CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON); -module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); - /* Number of zpools in zswap_pool (empirically determined for scalability) */ #define ZSWAP_NR_ZPOOLS 32 @@ -1723,16 +1719,12 @@ bool zswap_load(struct folio *folio) count_objcg_event(entry->objcg, ZSWPIN); spin_lock(&tree->lock); - if (zswap_exclusive_loads_enabled) { - zswap_invalidate_entry(tree, entry); - folio_mark_dirty(folio); - } else if (entry->length) { - zswap_lru_del(&entry->pool->list_lru, entry); - zswap_lru_add(&entry->pool->list_lru, entry); - } + zswap_invalidate_entry(tree, entry); zswap_entry_put(entry); spin_unlock(&tree->lock); + folio_mark_dirty(folio); + return true; } |