summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBaolin Wang <baolin.wang@linux.alibaba.com>2024-03-06 18:13:26 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-04-25 20:56:06 -0700
commite42dfe4e0a51b476dcc6f1461c51fdb1b76573aa (patch)
treec122fbfd6a59507ed8c0ca59bf091a238b4834a1
parentaaab830ad887629156ef17097c2ad24ce6fb8177 (diff)
mm: record the migration reason for struct migration_target_control
Patch series "make the hugetlb migration strategy consistent", v2. As discussed in previous thread [1], there is an inconsistency when handling hugetlb migration. When handling the migration of freed hugetlb, it prevents fallback to other NUMA nodes in alloc_and_dissolve_hugetlb_folio(). However, when dealing with in-use hugetlb, it allows fallback to other NUMA nodes in alloc_hugetlb_folio_nodemask(), which can break the per-node hugetlb pool and might result in unexpected failures when node bound workloads doesn't get what is asssumed available. This patchset tries to make the hugetlb migration strategy more clear and consistent. Please find details in each patch. [1] https://lore.kernel.org/all/6f26ce22d2fcd523418a085f2c588fe0776d46e7.1706794035.git.baolin.wang@linux.alibaba.com/ This patch (of 2): To support different hugetlb allocation strategies during hugetlb migration based on various migration reasons, record the migration reason in the migration_target_control structure as a preparation. Link: https://lkml.kernel.org/r/cover.1709719720.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/7b95d4981e07211f57139fc5b1f7ce91b920cee4.1709719720.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Cc: David Hildenbrand <david@redhat.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/gup.c1
-rw-r--r--mm/internal.h1
-rw-r--r--mm/memory-failure.c1
-rw-r--r--mm/memory_hotplug.c1
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/vmscan.c3
8 files changed, 9 insertions, 1 deletions
diff --git a/mm/gup.c b/mm/gup.c
index 8433d3dc31fc..6d8d15f8c7f9 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -2145,6 +2145,7 @@ static int migrate_longterm_unpinnable_pages(
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_NOWARN,
+ .reason = MR_LONGTERM_PIN,
};
if (migrate_pages(movable_page_list, alloc_migration_target,
diff --git a/mm/internal.h b/mm/internal.h
index fb219e31f0f0..63bdac6d0413 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -1045,6 +1045,7 @@ struct migration_target_control {
int nid; /* preferred node id */
nodemask_t *nmask;
gfp_t gfp_mask;
+ enum migrate_reason reason;
};
/*
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 0a7a8a4ba421..9e50586f2e37 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -2669,6 +2669,7 @@ static int soft_offline_in_use_page(struct page *page)
struct migration_target_control mtc = {
.nid = NUMA_NO_NODE,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .reason = MR_MEMORY_FAILURE,
};
if (!huge && folio_test_large(folio)) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a444e2d7dd2b..b79ba36e09e0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1841,6 +1841,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
struct migration_target_control mtc = {
.nmask = &nmask,
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .reason = MR_MEMORY_HOTPLUG,
};
int ret;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0b3def99174a..e128e6b7bbcb 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1070,6 +1070,7 @@ static long migrate_to_node(struct mm_struct *mm, int source, int dest,
struct migration_target_control mtc = {
.nid = dest,
.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+ .reason = MR_SYSCALL,
};
nodes_clear(nmask);
diff --git a/mm/migrate.c b/mm/migrate.c
index 73a052a382f1..bde63010a3cf 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -2060,6 +2060,7 @@ static int do_move_pages_to_node(struct list_head *pagelist, int node)
struct migration_target_control mtc = {
.nid = node,
.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
+ .reason = MR_SYSCALL,
};
err = migrate_pages(pagelist, alloc_migration_target, NULL,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 382d1c98f8e5..daab8cab91cc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6351,6 +6351,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc,
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .reason = MR_CONTIG_RANGE,
};
struct page *page;
unsigned long total_mapped = 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 3ef654addd44..289121e76753 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -967,7 +967,8 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
.gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
__GFP_NOMEMALLOC | GFP_NOWAIT,
.nid = target_nid,
- .nmask = &allowed_mask
+ .nmask = &allowed_mask,
+ .reason = MR_DEMOTION,
};
if (list_empty(demote_folios))