diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-27 19:42:02 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-04-27 19:42:02 -0700 |
commit | 7fa8a8ee9400fe8ec188426e40e481717bc5e924 (patch) | |
tree | cc8fd6b4f936ec01e73238643757451e20478c07 /mm/zswap.c | |
parent | 91ec4b0d11fe115581ce2835300558802ce55e6c (diff) | |
parent | 4d4b6d66db63ceed399f1fb1a4b24081d2590eb1 (diff) |
Merge tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
- Nick Piggin's "shoot lazy tlbs" series, to improve the peformance of
switching from a user process to a kernel thread.
- More folio conversions from Kefeng Wang, Zhang Peng and Pankaj
Raghav.
- zsmalloc performance improvements from Sergey Senozhatsky.
- Yue Zhao has found and fixed some data race issues around the
alteration of memcg userspace tunables.
- VFS rationalizations from Christoph Hellwig:
- removal of most of the callers of write_one_page()
- make __filemap_get_folio()'s return value more useful
- Luis Chamberlain has changed tmpfs so it no longer requires swap
backing. Use `mount -o noswap'.
- Qi Zheng has made the slab shrinkers operate locklessly, providing
some scalability benefits.
- Keith Busch has improved dmapool's performance, making part of its
operations O(1) rather than O(n).
- Peter Xu adds the UFFD_FEATURE_WP_UNPOPULATED feature to userfaultd,
permitting userspace to wr-protect anon memory unpopulated ptes.
- Kirill Shutemov has changed MAX_ORDER's meaning to be inclusive
rather than exclusive, and has fixed a bunch of errors which were
caused by its unintuitive meaning.
- Axel Rasmussen give userfaultfd the UFFDIO_CONTINUE_MODE_WP feature,
which causes minor faults to install a write-protected pte.
- Vlastimil Babka has done some maintenance work on vma_merge():
cleanups to the kernel code and improvements to our userspace test
harness.
- Cleanups to do_fault_around() by Lorenzo Stoakes.
- Mike Rapoport has moved a lot of initialization code out of various
mm/ files and into mm/mm_init.c.
- Lorenzo Stoakes removd vmf_insert_mixed_prot(), which was added for
DRM, but DRM doesn't use it any more.
- Lorenzo has also coverted read_kcore() and vread() to use iterators
and has thereby removed the use of bounce buffers in some cases.
- Lorenzo has also contributed further cleanups of vma_merge().
- Chaitanya Prakash provides some fixes to the mmap selftesting code.
- Matthew Wilcox changes xfs and afs so they no longer take sleeping
locks in ->map_page(), a step towards RCUification of pagefaults.
- Suren Baghdasaryan has improved mmap_lock scalability by switching to
per-VMA locking.
- Frederic Weisbecker has reworked the percpu cache draining so that it
no longer causes latency glitches on cpu isolated workloads.
- Mike Rapoport cleans up and corrects the ARCH_FORCE_MAX_ORDER Kconfig
logic.
- Liu Shixin has changed zswap's initialization so we no longer waste a
chunk of memory if zswap is not being used.
- Yosry Ahmed has improved the performance of memcg statistics
flushing.
- David Stevens has fixed several issues involving khugepaged,
userfaultfd and shmem.
- Christoph Hellwig has provided some cleanup work to zram's IO-related
code paths.
- David Hildenbrand has fixed up some issues in the selftest code's
testing of our pte state changing.
- Pankaj Raghav has made page_endio() unneeded and has removed it.
- Peter Xu contributed some rationalizations of the userfaultfd
selftests.
- Yosry Ahmed has fixed an issue around memcg's page recalim
accounting.
- Chaitanya Prakash has fixed some arm-related issues in the
selftests/mm code.
- Longlong Xia has improved the way in which KSM handles hwpoisoned
pages.
- Peter Xu fixes a few issues with uffd-wp at fork() time.
- Stefan Roesch has changed KSM so that it may now be used on a
per-process and per-cgroup basis.
* tag 'mm-stable-2023-04-27-15-30' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (369 commits)
mm,unmap: avoid flushing TLB in batch if PTE is inaccessible
shmem: restrict noswap option to initial user namespace
mm/khugepaged: fix conflicting mods to collapse_file()
sparse: remove unnecessary 0 values from rc
mm: move 'mmap_min_addr' logic from callers into vm_unmapped_area()
hugetlb: pte_alloc_huge() to replace huge pte_alloc_map()
maple_tree: fix allocation in mas_sparse_area()
mm: do not increment pgfault stats when page fault handler retries
zsmalloc: allow only one active pool compaction context
selftests/mm: add new selftests for KSM
mm: add new KSM process and sysfs knobs
mm: add new api to enable ksm per process
mm: shrinkers: fix debugfs file permissions
mm: don't check VMA write permissions if the PTE/PMD indicates write permissions
migrate_pages_batch: fix statistics for longterm pin retry
userfaultfd: use helper function range_in_vma()
lib/show_mem.c: use for_each_populated_zone() simplify code
mm: correct arg in reclaim_pages()/reclaim_clean_pages_from_list()
fs/buffer: convert create_page_buffers to folio_create_buffers
fs/buffer: add folio_create_empty_buffers helper
...
Diffstat (limited to 'mm/zswap.c')
-rw-r--r-- | mm/zswap.c | 138 |
1 files changed, 89 insertions, 49 deletions
diff --git a/mm/zswap.c b/mm/zswap.c index f2fc0373b967..e1e621d0b6a0 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -81,6 +81,8 @@ static bool zswap_pool_reached_full; #define ZSWAP_PARAM_UNSET "" +static int zswap_setup(void); + /* Enable/disable zswap */ static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON); static int zswap_enabled_param_set(const char *, @@ -214,11 +216,16 @@ static DEFINE_SPINLOCK(zswap_pools_lock); /* pool counter to provide unique names to zpool */ static atomic_t zswap_pools_count = ATOMIC_INIT(0); -/* used by param callback function */ -static bool zswap_init_started; +enum zswap_init_type { + ZSWAP_UNINIT, + ZSWAP_INIT_SUCCEED, + ZSWAP_INIT_FAILED +}; + +static enum zswap_init_type zswap_init_state; -/* fatal error during init */ -static bool zswap_init_failed; +/* used to ensure the integrity of initialization */ +static DEFINE_MUTEX(zswap_init_lock); /* init completed, but couldn't create the initial pool */ static bool zswap_has_pool; @@ -272,17 +279,6 @@ static void zswap_update_total_size(void) **********************************/ static struct kmem_cache *zswap_entry_cache; -static int __init zswap_entry_cache_create(void) -{ - zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); - return zswap_entry_cache == NULL; -} - -static void __init zswap_entry_cache_destroy(void) -{ - kmem_cache_destroy(zswap_entry_cache); -} - static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp) { struct zswap_entry *entry; @@ -663,7 +659,7 @@ error: return NULL; } -static __init struct zswap_pool *__zswap_pool_create_fallback(void) +static struct zswap_pool *__zswap_pool_create_fallback(void) { bool has_comp, has_zpool; @@ -764,28 +760,43 @@ static void zswap_pool_put(struct zswap_pool *pool) * param callbacks **********************************/ +static bool zswap_pool_changed(const char *s, const struct kernel_param *kp) +{ + /* no change required */ + if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) + return false; + return true; +} + /* val must be a null-terminated string */ static int __zswap_param_set(const char *val, const struct kernel_param *kp, char *type, char *compressor) { struct zswap_pool *pool, *put_pool = NULL; char *s = strstrip((char *)val); - int ret; - - if (zswap_init_failed) { + int ret = 0; + bool new_pool = false; + + mutex_lock(&zswap_init_lock); + switch (zswap_init_state) { + case ZSWAP_UNINIT: + /* if this is load-time (pre-init) param setting, + * don't create a pool; that's done during init. + */ + ret = param_set_charp(s, kp); + break; + case ZSWAP_INIT_SUCCEED: + new_pool = zswap_pool_changed(s, kp); + break; + case ZSWAP_INIT_FAILED: pr_err("can't set param, initialization failed\n"); - return -ENODEV; + ret = -ENODEV; } + mutex_unlock(&zswap_init_lock); - /* no change required */ - if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool) - return 0; - - /* if this is load-time (pre-init) param setting, - * don't create a pool; that's done during init. - */ - if (!zswap_init_started) - return param_set_charp(s, kp); + /* no need to create a new pool, return directly */ + if (!new_pool) + return ret; if (!type) { if (!zpool_has_pool(s)) { @@ -875,16 +886,30 @@ static int zswap_zpool_param_set(const char *val, static int zswap_enabled_param_set(const char *val, const struct kernel_param *kp) { - if (zswap_init_failed) { + int ret = -ENODEV; + + /* if this is load-time (pre-init) param setting, only set param. */ + if (system_state != SYSTEM_RUNNING) + return param_set_bool(val, kp); + + mutex_lock(&zswap_init_lock); + switch (zswap_init_state) { + case ZSWAP_UNINIT: + if (zswap_setup()) + break; + fallthrough; + case ZSWAP_INIT_SUCCEED: + if (!zswap_has_pool) + pr_err("can't enable, no pool configured\n"); + else + ret = param_set_bool(val, kp); + break; + case ZSWAP_INIT_FAILED: pr_err("can't enable, initialization failed\n"); - return -ENODEV; - } - if (!zswap_has_pool && zswap_init_started) { - pr_err("can't enable, no pool configured\n"); - return -ENODEV; } + mutex_unlock(&zswap_init_lock); - return param_set_bool(val, kp); + return ret; } /********************************* @@ -1073,15 +1098,23 @@ fail: static int zswap_is_page_same_filled(void *ptr, unsigned long *value) { - unsigned int pos; unsigned long *page; + unsigned long val; + unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1; page = (unsigned long *)ptr; - for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) { - if (page[pos] != page[0]) + val = page[0]; + + if (val != page[last_pos]) + return 0; + + for (pos = 1; pos < last_pos; pos++) { + if (val != page[pos]) return 0; } - *value = page[0]; + + *value = val; + return 1; } @@ -1434,7 +1467,7 @@ static const struct frontswap_ops zswap_frontswap_ops = { static struct dentry *zswap_debugfs_root; -static int __init zswap_debugfs_init(void) +static int zswap_debugfs_init(void) { if (!debugfs_initialized()) return -ENODEV; @@ -1465,7 +1498,7 @@ static int __init zswap_debugfs_init(void) return 0; } #else -static int __init zswap_debugfs_init(void) +static int zswap_debugfs_init(void) { return 0; } @@ -1474,14 +1507,13 @@ static int __init zswap_debugfs_init(void) /********************************* * module init and exit **********************************/ -static int __init init_zswap(void) +static int zswap_setup(void) { struct zswap_pool *pool; int ret; - zswap_init_started = true; - - if (zswap_entry_cache_create()) { + zswap_entry_cache = KMEM_CACHE(zswap_entry, 0); + if (!zswap_entry_cache) { pr_err("entry cache creation failed\n"); goto cache_fail; } @@ -1520,6 +1552,7 @@ static int __init init_zswap(void) goto destroy_wq; if (zswap_debugfs_init()) pr_warn("debugfs initialization failed\n"); + zswap_init_state = ZSWAP_INIT_SUCCEED; return 0; destroy_wq: @@ -1530,15 +1563,22 @@ fallback_fail: hp_fail: cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); dstmem_fail: - zswap_entry_cache_destroy(); + kmem_cache_destroy(zswap_entry_cache); cache_fail: /* if built-in, we aren't unloaded on failure; don't allow use */ - zswap_init_failed = true; + zswap_init_state = ZSWAP_INIT_FAILED; zswap_enabled = false; return -ENOMEM; } + +static int __init zswap_init(void) +{ + if (!zswap_enabled) + return 0; + return zswap_setup(); +} /* must be late so crypto has time to come up */ -late_initcall(init_zswap); +late_initcall(zswap_init); MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>"); MODULE_DESCRIPTION("Compressed cache for swap pages"); |