diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 31 | ||||
-rw-r--r-- | mm/memcontrol.c | 24 | ||||
-rw-r--r-- | mm/page_alloc.c | 49 | ||||
-rw-r--r-- | mm/vmscan.c | 28 | ||||
-rw-r--r-- | mm/vmstat.c | 95 | ||||
-rw-r--r-- | mm/zsmalloc.c | 67 | ||||
-rw-r--r-- | mm/zswap.c | 172 |
7 files changed, 172 insertions, 294 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 0409a4ad6ea1..0d37192d9423 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2043,33 +2043,38 @@ void kcompactd_stop(int nid) * away, we get changed to run anywhere: as the first one comes back, * restore their cpu bindings. */ -static int cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int kcompactd_cpu_online(unsigned int cpu) { int nid; - if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { - for_each_node_state(nid, N_MEMORY) { - pg_data_t *pgdat = NODE_DATA(nid); - const struct cpumask *mask; + for_each_node_state(nid, N_MEMORY) { + pg_data_t *pgdat = NODE_DATA(nid); + const struct cpumask *mask; - mask = cpumask_of_node(pgdat->node_id); + mask = cpumask_of_node(pgdat->node_id); - if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) - /* One of our CPUs online: restore mask */ - set_cpus_allowed_ptr(pgdat->kcompactd, mask); - } + if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) + /* One of our CPUs online: restore mask */ + set_cpus_allowed_ptr(pgdat->kcompactd, mask); } - return NOTIFY_OK; + return 0; } static int __init kcompactd_init(void) { int nid; + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "mm/compaction:online", + kcompactd_cpu_online, NULL); + if (ret < 0) { + pr_err("kcompactd: failed to register hotplug callbacks.\n"); + return ret; + } for_each_node_state(nid, N_MEMORY) kcompactd_run(nid); - hotcpu_notifier(cpu_callback, 0); return 0; } subsys_initcall(kcompactd_init) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0f870ba43942..6c2043509fb5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1816,22 +1816,13 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) mutex_unlock(&percpu_charge_mutex); } -static int memcg_cpu_hotplug_callback(struct notifier_block *nb, - unsigned long action, - void *hcpu) +static int memcg_hotplug_cpu_dead(unsigned int cpu) { - int cpu = (unsigned long)hcpu; struct memcg_stock_pcp *stock; - if (action == CPU_ONLINE) - return NOTIFY_OK; - - if (action != CPU_DEAD && action != CPU_DEAD_FROZEN) - return NOTIFY_OK; - stock = &per_cpu(memcg_stock, cpu); drain_stock(stock); - return NOTIFY_OK; + return 0; } static void reclaim_high(struct mem_cgroup *memcg, @@ -5774,16 +5765,17 @@ __setup("cgroup.memory=", cgroup_memory); /* * subsys_initcall() for memory controller. * - * Some parts like hotcpu_notifier() have to be initialized from this context - * because of lock dependencies (cgroup_lock -> cpu hotplug) but basically - * everything that doesn't depend on a specific mem_cgroup structure should - * be initialized from here. + * Some parts like memcg_hotplug_cpu_dead() have to be initialized from this + * context because of lock dependencies (cgroup_lock -> cpu hotplug) but + * basically everything that doesn't depend on a specific mem_cgroup structure + * should be initialized from here. */ static int __init mem_cgroup_init(void) { int cpu, node; - hotcpu_notifier(memcg_cpu_hotplug_callback, 0); + cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL, + memcg_hotplug_cpu_dead); for_each_possible_cpu(cpu) INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 61b0988bba8c..3dcc54da5637 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6491,38 +6491,39 @@ void __init free_area_init(unsigned long *zones_size) __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); } -static int page_alloc_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int page_alloc_cpu_dead(unsigned int cpu) { - int cpu = (unsigned long)hcpu; - if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { - lru_add_drain_cpu(cpu); - drain_pages(cpu); + lru_add_drain_cpu(cpu); + drain_pages(cpu); - /* - * Spill the event counters of the dead processor - * into the current processors event counters. - * This artificially elevates the count of the current - * processor. - */ - vm_events_fold_cpu(cpu); + /* + * Spill the event counters of the dead processor + * into the current processors event counters. + * This artificially elevates the count of the current + * processor. + */ + vm_events_fold_cpu(cpu); - /* - * Zero the differential counters of the dead processor - * so that the vm statistics are consistent. - * - * This is only okay since the processor is dead and cannot - * race with what we are doing. - */ - cpu_vm_stats_fold(cpu); - } - return NOTIFY_OK; + /* + * Zero the differential counters of the dead processor + * so that the vm statistics are consistent. + * + * This is only okay since the processor is dead and cannot + * race with what we are doing. + */ + cpu_vm_stats_fold(cpu); + return 0; } void __init page_alloc_init(void) { - hotcpu_notifier(page_alloc_cpu_notify, 0); + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD, + "mm/page_alloc:dead", NULL, + page_alloc_cpu_dead); + WARN_ON(ret < 0); } /* diff --git a/mm/vmscan.c b/mm/vmscan.c index d75cdf360730..0c8f28a6d89f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3558,24 +3558,21 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int kswapd_cpu_online(unsigned int cpu) { int nid; - if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { - for_each_node_state(nid, N_MEMORY) { - pg_data_t *pgdat = NODE_DATA(nid); - const struct cpumask *mask; + for_each_node_state(nid, N_MEMORY) { + pg_data_t *pgdat = NODE_DATA(nid); + const struct cpumask *mask; - mask = cpumask_of_node(pgdat->node_id); + mask = cpumask_of_node(pgdat->node_id); - if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) - /* One of our CPUs online: restore mask */ - set_cpus_allowed_ptr(pgdat->kswapd, mask); - } + if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) + /* One of our CPUs online: restore mask */ + set_cpus_allowed_ptr(pgdat->kswapd, mask); } - return NOTIFY_OK; + return 0; } /* @@ -3617,12 +3614,15 @@ void kswapd_stop(int nid) static int __init kswapd_init(void) { - int nid; + int nid, ret; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); - hotcpu_notifier(cpu_callback, 0); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "mm/vmscan:online", kswapd_cpu_online, + NULL); + WARN_ON(ret < 0); return 0; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 604f26a4f696..7c28df36f50f 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1720,75 +1720,66 @@ static void __init start_shepherd_timer(void) static void __init init_cpu_node_state(void) { - int cpu; + int node; - get_online_cpus(); - for_each_online_cpu(cpu) - node_set_state(cpu_to_node(cpu), N_CPU); - put_online_cpus(); + for_each_online_node(node) { + if (cpumask_weight(cpumask_of_node(node)) > 0) + node_set_state(node, N_CPU); + } } -static void vmstat_cpu_dead(int node) +static int vmstat_cpu_online(unsigned int cpu) { - int cpu; - - get_online_cpus(); - for_each_online_cpu(cpu) - if (cpu_to_node(cpu) == node) - goto end; + refresh_zone_stat_thresholds(); + node_set_state(cpu_to_node(cpu), N_CPU); + return 0; +} - node_clear_state(node, N_CPU); -end: - put_online_cpus(); +static int vmstat_cpu_down_prep(unsigned int cpu) +{ + cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); + return 0; } -/* - * Use the cpu notifier to insure that the thresholds are recalculated - * when necessary. - */ -static int vmstat_cpuup_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) -{ - long cpu = (long)hcpu; - - switch (action) { - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - refresh_zone_stat_thresholds(); - node_set_state(cpu_to_node(cpu), N_CPU); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - cancel_delayed_work_sync(&per_cpu(vmstat_work, cpu)); - break; - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - refresh_zone_stat_thresholds(); - vmstat_cpu_dead(cpu_to_node(cpu)); - break; - default: - break; - } - return NOTIFY_OK; +static int vmstat_cpu_dead(unsigned int cpu) +{ + const struct cpumask *node_cpus; + int node; + + node = cpu_to_node(cpu); + + refresh_zone_stat_thresholds(); + node_cpus = cpumask_of_node(node); + if (cpumask_weight(node_cpus) > 0) + return 0; + + node_clear_state(node, N_CPU); + return 0; } -static struct notifier_block vmstat_notifier = - { &vmstat_cpuup_callback, NULL, 0 }; #endif static int __init setup_vmstat(void) { #ifdef CONFIG_SMP - cpu_notifier_register_begin(); - __register_cpu_notifier(&vmstat_notifier); + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_MM_VMSTAT_DEAD, "mm/vmstat:dead", + NULL, vmstat_cpu_dead); + if (ret < 0) + pr_err("vmstat: failed to register 'dead' hotplug state\n"); + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "mm/vmstat:online", + vmstat_cpu_online, + vmstat_cpu_down_prep); + if (ret < 0) + pr_err("vmstat: failed to register 'online' hotplug state\n"); + + get_online_cpus(); init_cpu_node_state(); + put_online_cpus(); start_shepherd_timer(); - cpu_notifier_register_done(); #endif #ifdef CONFIG_PROC_FS proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b0bc023d25c5..9cc3c0b2c2c1 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1284,61 +1284,21 @@ out: #endif /* CONFIG_PGTABLE_MAPPING */ -static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action, - void *pcpu) +static int zs_cpu_prepare(unsigned int cpu) { - int ret, cpu = (long)pcpu; struct mapping_area *area; - switch (action) { - case CPU_UP_PREPARE: - area = &per_cpu(zs_map_area, cpu); - ret = __zs_cpu_up(area); - if (ret) - return notifier_from_errno(ret); - break; - case CPU_DEAD: - case CPU_UP_CANCELED: - area = &per_cpu(zs_map_area, cpu); - __zs_cpu_down(area); - break; - } - - return NOTIFY_OK; + area = &per_cpu(zs_map_area, cpu); + return __zs_cpu_up(area); } -static struct notifier_block zs_cpu_nb = { - .notifier_call = zs_cpu_notifier -}; - -static int zs_register_cpu_notifier(void) +static int zs_cpu_dead(unsigned int cpu) { - int cpu, uninitialized_var(ret); - - cpu_notifier_register_begin(); - - __register_cpu_notifier(&zs_cpu_nb); - for_each_online_cpu(cpu) { - ret = zs_cpu_notifier(NULL, CPU_UP_PREPARE, (void *)(long)cpu); - if (notifier_to_errno(ret)) - break; - } - - cpu_notifier_register_done(); - return notifier_to_errno(ret); -} - -static void zs_unregister_cpu_notifier(void) -{ - int cpu; - - cpu_notifier_register_begin(); - - for_each_online_cpu(cpu) - zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); - __unregister_cpu_notifier(&zs_cpu_nb); + struct mapping_area *area; - cpu_notifier_register_done(); + area = &per_cpu(zs_map_area, cpu); + __zs_cpu_down(area); + return 0; } static void __init init_zs_size_classes(void) @@ -2534,10 +2494,10 @@ static int __init zs_init(void) if (ret) goto out; - ret = zs_register_cpu_notifier(); - + ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare", + zs_cpu_prepare, zs_cpu_dead); if (ret) - goto notifier_fail; + goto hp_setup_fail; init_zs_size_classes(); @@ -2549,8 +2509,7 @@ static int __init zs_init(void) return 0; -notifier_fail: - zs_unregister_cpu_notifier(); +hp_setup_fail: zsmalloc_unmount(); out: return ret; @@ -2562,7 +2521,7 @@ static void __exit zs_exit(void) zpool_unregister_driver(&zs_zpool_driver); #endif zsmalloc_unmount(); - zs_unregister_cpu_notifier(); + cpuhp_remove_state(CPUHP_MM_ZS_PREPARE); zs_stat_exit(); } diff --git a/mm/zswap.c b/mm/zswap.c index 275b22cc8df4..067a0d62f318 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -118,7 +118,7 @@ struct zswap_pool { struct kref kref; struct list_head list; struct work_struct work; - struct notifier_block notifier; + struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; }; @@ -352,143 +352,58 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root, **********************************/ static DEFINE_PER_CPU(u8 *, zswap_dstmem); -static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu) +static int zswap_dstmem_prepare(unsigned int cpu) { u8 *dst; - switch (action) { - case CPU_UP_PREPARE: - dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); - if (!dst) { - pr_err("can't allocate compressor buffer\n"); - return NOTIFY_BAD; - } - per_cpu(zswap_dstmem, cpu) = dst; - break; - case CPU_DEAD: - case CPU_UP_CANCELED: - dst = per_cpu(zswap_dstmem, cpu); - kfree(dst); - per_cpu(zswap_dstmem, cpu) = NULL; - break; - default: - break; + dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); + if (!dst) { + pr_err("can't allocate compressor buffer\n"); + return -ENOMEM; } - return NOTIFY_OK; + per_cpu(zswap_dstmem, cpu) = dst; + return 0; } -static int zswap_cpu_dstmem_notifier(struct notifier_block *nb, - unsigned long action, void *pcpu) +static int zswap_dstmem_dead(unsigned int cpu) { - return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu); -} + u8 *dst; -static struct notifier_block zswap_dstmem_notifier = { - .notifier_call = zswap_cpu_dstmem_notifier, -}; + dst = per_cpu(zswap_dstmem, cpu); + kfree(dst); + per_cpu(zswap_dstmem, cpu) = NULL; -static int __init zswap_cpu_dstmem_init(void) -{ - unsigned long cpu; - - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) == - NOTIFY_BAD) - goto cleanup; - __register_cpu_notifier(&zswap_dstmem_notifier); - cpu_notifier_register_done(); return 0; - -cleanup: - for_each_online_cpu(cpu) - __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); - cpu_notifier_register_done(); - return -ENOMEM; -} - -static void zswap_cpu_dstmem_destroy(void) -{ - unsigned long cpu; - - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); - __unregister_cpu_notifier(&zswap_dstmem_notifier); - cpu_notifier_register_done(); } -static int __zswap_cpu_comp_notifier(struct zswap_pool *pool, - unsigned long action, unsigned long cpu) +static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node) { + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); struct crypto_comp *tfm; - switch (action) { - case CPU_UP_PREPARE: - if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) - break; - tfm = crypto_alloc_comp(pool->tfm_name, 0, 0); - if (IS_ERR_OR_NULL(tfm)) { - pr_err("could not alloc crypto comp %s : %ld\n", - pool->tfm_name, PTR_ERR(tfm)); - return NOTIFY_BAD; - } - *per_cpu_ptr(pool->tfm, cpu) = tfm; - break; - case CPU_DEAD: - case CPU_UP_CANCELED: - tfm = *per_cpu_ptr(pool->tfm, cpu); - if (!IS_ERR_OR_NULL(tfm)) - crypto_free_comp(tfm); - *per_cpu_ptr(pool->tfm, cpu) = NULL; - break; - default: - break; - } - return NOTIFY_OK; -} - -static int zswap_cpu_comp_notifier(struct notifier_block *nb, - unsigned long action, void *pcpu) -{ - unsigned long cpu = (unsigned long)pcpu; - struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier); - - return __zswap_cpu_comp_notifier(pool, action, cpu); -} + if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) + return 0; -static int zswap_cpu_comp_init(struct zswap_pool *pool) -{ - unsigned long cpu; - - memset(&pool->notifier, 0, sizeof(pool->notifier)); - pool->notifier.notifier_call = zswap_cpu_comp_notifier; - - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) == - NOTIFY_BAD) - goto cleanup; - __register_cpu_notifier(&pool->notifier); - cpu_notifier_register_done(); + tfm = crypto_alloc_comp(pool->tfm_name, 0, 0); + if (IS_ERR_OR_NULL(tfm)) { + pr_err("could not alloc crypto comp %s : %ld\n", + pool->tfm_name, PTR_ERR(tfm)); + return -ENOMEM; + } + *per_cpu_ptr(pool->tfm, cpu) = tfm; return 0; - -cleanup: - for_each_online_cpu(cpu) - __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); - cpu_notifier_register_done(); - return -ENOMEM; } -static void zswap_cpu_comp_destroy(struct zswap_pool *pool) +static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node) { - unsigned long cpu; + struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node); + struct crypto_comp *tfm; - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); - __unregister_cpu_notifier(&pool->notifier); - cpu_notifier_register_done(); + tfm = *per_cpu_ptr(pool->tfm, cpu); + if (!IS_ERR_OR_NULL(tfm)) + crypto_free_comp(tfm); + *per_cpu_ptr(pool->tfm, cpu) = NULL; + return 0; } /********************************* @@ -569,6 +484,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) struct zswap_pool *pool; char name[38]; /* 'zswap' + 32 char (max) num + \0 */ gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; + int ret; pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) { @@ -593,7 +509,9 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) goto error; } - if (zswap_cpu_comp_init(pool)) + ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE, + &pool->node); + if (ret) goto error; pr_debug("using %s compressor\n", pool->tfm_name); @@ -647,7 +565,7 @@ static void zswap_pool_destroy(struct zswap_pool *pool) { zswap_pool_debug("destroying", pool); - zswap_cpu_comp_destroy(pool); + cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node); free_percpu(pool->tfm); zpool_destroy_pool(pool->zpool); kfree(pool); @@ -1238,6 +1156,7 @@ static void __exit zswap_debugfs_exit(void) { } static int __init init_zswap(void) { struct zswap_pool *pool; + int ret; zswap_init_started = true; @@ -1246,11 +1165,20 @@ static int __init init_zswap(void) goto cache_fail; } - if (zswap_cpu_dstmem_init()) { + ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare", + zswap_dstmem_prepare, zswap_dstmem_dead); + if (ret) { pr_err("dstmem alloc failed\n"); goto dstmem_fail; } + ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE, + "mm/zswap_pool:prepare", + zswap_cpu_comp_prepare, + zswap_cpu_comp_dead); + if (ret) + goto hp_fail; + pool = __zswap_pool_create_fallback(); if (!pool) { pr_err("pool creation failed\n"); @@ -1267,7 +1195,9 @@ static int __init init_zswap(void) return 0; pool_fail: - zswap_cpu_dstmem_destroy(); + cpuhp_remove_state_nocalls(CPUHP_MM_ZSWP_POOL_PREPARE); +hp_fail: + cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE); dstmem_fail: zswap_entry_cache_destroy(); cache_fail: |