summaryrefslogtreecommitdiff
path: root/mm/page_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_counter.c')
-rw-r--r--mm/page_counter.c29
1 files changed, 21 insertions, 8 deletions
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 3887bd152b75..b249d15af9dd 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -87,9 +87,22 @@ void page_counter_charge(struct page_counter *counter, unsigned long nr_pages)
/*
* This is indeed racy, but we can live with some
* inaccuracy in the watermark.
+ *
+ * Notably, we have two watermarks to allow for both a globally
+ * visible peak and one that can be reset at a smaller scope.
+ *
+ * Since we reset both watermarks when the global reset occurs,
+ * we can guarantee that watermark >= local_watermark, so we
+ * don't need to do both comparisons every time.
+ *
+ * On systems with branch predictors, the inner condition should
+ * be almost free.
*/
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
}
@@ -140,12 +153,12 @@ bool page_counter_try_charge(struct page_counter *counter,
if (protection)
propagate_protected_usage(c, new);
- /*
- * Just like with failcnt, we can live with some
- * inaccuracy in the watermark.
- */
- if (new > READ_ONCE(c->watermark))
- WRITE_ONCE(c->watermark, new);
+ /* see comment on page_counter_charge */
+ if (new > READ_ONCE(c->local_watermark)) {
+ WRITE_ONCE(c->local_watermark, new);
+ if (new > READ_ONCE(c->watermark))
+ WRITE_ONCE(c->watermark, new);
+ }
}
return true;