summaryrefslogtreecommitdiff
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorGregory Price <gourry.memverge@gmail.com>2024-02-02 12:02:36 -0500
committerAndrew Morton <akpm@linux-foundation.org>2024-02-22 10:24:46 -0800
commit9685e6e30d116d72fb013b0bce261a676b7575c1 (patch)
tree2cd0bac3b51a8d02741dc60d50bb90fea6259907 /mm/mempolicy.c
parentdce41f5ae2539d1c20ae8de4e039630aec3c3f3c (diff)
mm/mempolicy: refactor a read-once mechanism into a function for re-use
Move the use of barrier() to force policy->nodemask onto the stack into a function `read_once_policy_nodemask` so that it may be re-used. Link: https://lkml.kernel.org/r/20240202170238.90004-3-gregory.price@memverge.com Signed-off-by: Gregory Price <gregory.price@memverge.com> Suggested-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Hasan Al Maruf <Hasan.Maruf@amd.com> Cc: Honggyu Kim <honggyu.kim@sk.com> Cc: Hyeongtak Ji <hyeongtak.ji@sk.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rakie Kim <rakie.kim@sk.com> Cc: Ravi Jonnalagadda <ravis.opensrc@micron.com> Cc: Srinivasulu Thanneeru <sthanneeru.opensrc@micron.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b4fccc921b62..1bdc7d0d1b0b 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1905,6 +1905,20 @@ unsigned int mempolicy_slab_node(void)
}
}
+static unsigned int read_once_policy_nodemask(struct mempolicy *pol,
+ nodemask_t *mask)
+{
+ /*
+ * barrier stabilizes the nodemask locally so that it can be iterated
+ * over safely without concern for changes. Allocators validate node
+ * selection does not violate mems_allowed, so this is safe.
+ */
+ barrier();
+ memcpy(mask, &pol->nodes, sizeof(nodemask_t));
+ barrier();
+ return nodes_weight(*mask);
+}
+
/*
* Do static interleaving for interleave index @ilx. Returns the ilx'th
* node in pol->nodes (starting from ilx=0), wrapping around if ilx
@@ -1912,20 +1926,12 @@ unsigned int mempolicy_slab_node(void)
*/
static unsigned int interleave_nid(struct mempolicy *pol, pgoff_t ilx)
{
- nodemask_t nodemask = pol->nodes;
+ nodemask_t nodemask;
unsigned int target, nnodes;
int i;
int nid;
- /*
- * The barrier will stabilize the nodemask in a register or on
- * the stack so that it will stop changing under the code.
- *
- * Between first_node() and next_node(), pol->nodes could be changed
- * by other threads. So we put pol->nodes in a local stack.
- */
- barrier();
- nnodes = nodes_weight(nodemask);
+ nnodes = read_once_policy_nodemask(pol, &nodemask);
if (!nnodes)
return numa_node_id();
target = ilx % nnodes;