diff options
author | Andrew Morton <akpm@linux-foundation.org> | 2024-01-22 19:23:56 -0800 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-01-22 19:23:56 -0800 |
commit | f0b7a0d1d46625db5b0e631c05ae96d78eda6c70 (patch) | |
tree | 44dde92e4dbd16f25c7ce50240bf53a7b753e7ad /mm | |
parent | 5aa1c96e8ef347430e7a7c898f290425d1b568ef (diff) | |
parent | 6613476e225e090cc9aad49be7fa504e290dd33d (diff) |
Merge branch 'master' into mm-hotfixes-stable
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 3 | ||||
-rw-r--r-- | mm/compaction.c | 43 | ||||
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/init-mm.c | 3 | ||||
-rw-r--r-- | mm/memblock.c | 2 | ||||
-rw-r--r-- | mm/memory-tiers.c | 12 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 8 |
8 files changed, 46 insertions, 29 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 1902cfe4cc4f..ffc3a2ba3a8c 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1258,6 +1258,9 @@ config LOCK_MM_AND_FIND_VMA bool depends on !STACK_GROWSUP +config IOMMU_MM_DATA + bool + source "mm/damon/Kconfig" endmenu diff --git a/mm/compaction.c b/mm/compaction.c index 27ada42924d5..4add68d40e8d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -882,6 +882,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, /* Time to isolate some pages for migration */ for (; low_pfn < end_pfn; low_pfn++) { + bool is_dirty, is_unevictable; if (skip_on_failure && low_pfn >= next_skip_pfn) { /* @@ -1079,8 +1080,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!folio_test_lru(folio)) goto isolate_fail_put; + is_unevictable = folio_test_unevictable(folio); + /* Compaction might skip unevictable pages but CMA takes them */ - if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) + if (!(mode & ISOLATE_UNEVICTABLE) && is_unevictable) goto isolate_fail_put; /* @@ -1092,26 +1095,42 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; - if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { - bool migrate_dirty; + is_dirty = folio_test_dirty(folio); + + if (((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) || + (mapping && is_unevictable)) { + bool migrate_dirty = true; + bool is_unmovable; /* * Only folios without mappings or that have - * a ->migrate_folio callback are possible to - * migrate without blocking. However, we may - * be racing with truncation, which can free - * the mapping. Truncation holds the folio lock - * until after the folio is removed from the page - * cache so holding it ourselves is sufficient. + * a ->migrate_folio callback are possible to migrate + * without blocking. + * + * Folios from unmovable mappings are not migratable. + * + * However, we can be racing with truncation, which can + * free the mapping that we need to check. Truncation + * holds the folio lock until after the folio is removed + * from the page so holding it ourselves is sufficient. + * + * To avoid locking the folio just to check unmovable, + * assume every unmovable folio is also unevictable, + * which is a cheaper test. If our assumption goes + * wrong, it's not a correctness bug, just potentially + * wasted cycles. */ if (!folio_trylock(folio)) goto isolate_fail_put; mapping = folio_mapping(folio); - migrate_dirty = !mapping || - mapping->a_ops->migrate_folio; + if ((mode & ISOLATE_ASYNC_MIGRATE) && is_dirty) { + migrate_dirty = !mapping || + mapping->a_ops->migrate_folio; + } + is_unmovable = mapping && mapping_unmovable(mapping); folio_unlock(folio); - if (!migrate_dirty) + if (!migrate_dirty || is_unmovable) goto isolate_fail_put; } diff --git a/mm/filemap.c b/mm/filemap.c index ea49677c6338..750e779c23db 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2688,6 +2688,7 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count) return filemap_write_and_wait_range(mapping, pos, end); } +EXPORT_SYMBOL_GPL(kiocb_write_and_wait); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) { @@ -2715,6 +2716,7 @@ int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT); } +EXPORT_SYMBOL_GPL(kiocb_invalidate_pages); /** * generic_file_read_iter - generic filesystem read routine diff --git a/mm/init-mm.c b/mm/init-mm.c index cfd367822cdd..24c809379274 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -44,9 +44,6 @@ struct mm_struct init_mm = { #endif .user_ns = &init_user_ns, .cpu_bitmap = CPU_BITS_NONE, -#ifdef CONFIG_IOMMU_SVA - .pasid = IOMMU_PASID_INVALID, -#endif INIT_MM_CONTEXT(init_mm) }; diff --git a/mm/memblock.c b/mm/memblock.c index 8c194d8afeec..abd92869874d 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1885,7 +1885,7 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn, int mid = memblock_search(type, PFN_PHYS(pfn)); if (mid == -1) - return -1; + return NUMA_NO_NODE; *start_pfn = PFN_DOWN(type->regions[mid].base); *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size); diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index 8d5291add2bc..5462d9e3c84c 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -109,7 +109,7 @@ static struct demotion_nodes *node_demotion __read_mostly; static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms); static bool default_dram_perf_error; -static struct node_hmem_attrs default_dram_perf; +static struct access_coordinate default_dram_perf; static int default_dram_perf_ref_nid = NUMA_NO_NODE; static const char *default_dram_perf_ref_source; @@ -601,15 +601,15 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype) } EXPORT_SYMBOL_GPL(clear_node_memory_type); -static void dump_hmem_attrs(struct node_hmem_attrs *attrs, const char *prefix) +static void dump_hmem_attrs(struct access_coordinate *coord, const char *prefix) { pr_info( "%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n", - prefix, attrs->read_latency, attrs->write_latency, - attrs->read_bandwidth, attrs->write_bandwidth); + prefix, coord->read_latency, coord->write_latency, + coord->read_bandwidth, coord->write_bandwidth); } -int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf, +int mt_set_default_dram_perf(int nid, struct access_coordinate *perf, const char *source) { int rc = 0; @@ -666,7 +666,7 @@ out: return rc; } -int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist) +int mt_perf_to_adistance(struct access_coordinate *perf, int *adist) { if (default_dram_perf_error) return -EIO; diff --git a/mm/migrate.c b/mm/migrate.c index bde8273cf15b..cc9f2bcd73b4 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -962,6 +962,8 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, if (!mapping) rc = migrate_folio(mapping, dst, src, mode); + else if (mapping_unmovable(mapping)) + rc = -EOPNOTSUPP; else if (mapping->a_ops->migrate_folio) /* * Most folios have a mapping and most filesystems diff --git a/mm/percpu.c b/mm/percpu.c index 7b97d31df767..4e11fc1e6def 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -3333,13 +3333,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t if (rc < 0) panic("failed to map percpu area, err=%d\n", rc); - /* - * FIXME: Archs with virtual cache should flush local - * cache for the linear mapping here - something - * equivalent to flush_cache_vmap() on the local cpu. - * flush_cache_vmap() can't be used as most supporting - * data structures are not set up yet. - */ + flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size); /* copy static data */ memcpy((void *)unit_addr, __per_cpu_load, ai->static_size); |