summaryrefslogtreecommitdiff
path: root/mm/ksm.c
diff options
context:
space:
mode:
authorChengming Zhou <chengming.zhou@linux.dev>2024-06-21 15:54:30 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-07-04 18:05:50 -0700
commitd58a361b0350128bf5a5cf47773edaedbb6ea838 (patch)
treedebfbd1bf6c77bdfc53bc52ffbf8bb4e191301e2 /mm/ksm.c
parentac90c56bbd734addc9bfb4567f64f1c180c64f5d (diff)
mm/ksm: don't waste time searching stable tree for fast changing page
The code flow in cmp_and_merge_page() is suboptimal for handling the ksm page and non-ksm page at the same time. For example: - ksm page 1. Mostly just return if this ksm page is not migrated and this rmap_item has been on the rmap hlist. Or we have to fix this rmap_item mapping. 2. But we absolutely don't need to checksum for this ksm page, since it can't change. - non-ksm page 1. First don't need to waste time searching stable tree if fast changing. 2. Should try to merge with zero page before search the stable tree. 3. Then search stable tree to find mergeable ksm page. This patch optimizes the code flow so the handling differences between ksm page and non-ksm page become clearer and more efficient too. Link: https://lkml.kernel.org/r/20240621-b4-ksm-scan-optimize-v2-2-1c328aa9e30b@linux.dev Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Hildenbrand <david@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Stefan Roesch <shr@devkernel.io> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index dd9ed0bdb9f6..fd8843e4a8c5 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -2366,6 +2366,23 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
*/
if (!is_page_sharing_candidate(stable_node))
max_page_sharing_bypass = true;
+ } else {
+ remove_rmap_item_from_tree(rmap_item);
+
+ /*
+ * If the hash value of the page has changed from the last time
+ * we calculated it, this page is changing frequently: therefore we
+ * don't want to insert it in the unstable tree, and we don't want
+ * to waste our time searching for something identical to it there.
+ */
+ checksum = calc_checksum(page);
+ if (rmap_item->oldchecksum != checksum) {
+ rmap_item->oldchecksum = checksum;
+ return;
+ }
+
+ if (!try_to_merge_with_zero_page(rmap_item, page))
+ return;
}
/* We first start with searching the page inside the stable tree */
@@ -2396,21 +2413,6 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
return;
}
- /*
- * If the hash value of the page has changed from the last time
- * we calculated it, this page is changing frequently: therefore we
- * don't want to insert it in the unstable tree, and we don't want
- * to waste our time searching for something identical to it there.
- */
- checksum = calc_checksum(page);
- if (rmap_item->oldchecksum != checksum) {
- rmap_item->oldchecksum = checksum;
- return;
- }
-
- if (!try_to_merge_with_zero_page(rmap_item, page))
- return;
-
tree_rmap_item =
unstable_tree_search_insert(rmap_item, page, &tree_page);
if (tree_rmap_item) {