diff options
-rw-r--r-- | arch/x86/mm/init.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 4 | ||||
-rw-r--r-- | drivers/base/devres.c | 19 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 28 | ||||
-rw-r--r-- | include/linux/device.h | 16 | ||||
-rw-r--r-- | include/linux/pmem.h | 26 | ||||
-rw-r--r-- | kernel/memremap.c | 16 |
7 files changed, 48 insertions, 65 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1d8a83df153a..4b9ea3f27de4 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -354,7 +354,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, } for (i = 0; i < nr_range; i++) - printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", + pr_debug(" [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, page_size_string(&mr[i])); @@ -401,7 +401,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long ret = 0; int nr_range, i; - pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n", + pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n", start, end - 1); memset(mr, 0, sizeof(mr)); diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index df48430c279b..e5d42f1a2a71 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1268,7 +1268,7 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, /* check to see if we have contiguous blocks */ if (p_end != p || node_start != node) { if (p_start) - printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", + pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); addr_start = addr; node_start = node; @@ -1366,7 +1366,7 @@ void register_page_bootmem_memmap(unsigned long section_nr, void __meminit vmemmap_populate_print_last(void) { if (p_start) { - printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n", + pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n", addr_start, addr_end-1, p_start, p_end-1, node_start); p_start = NULL; p_end = NULL; diff --git a/drivers/base/devres.c b/drivers/base/devres.c index 875464690117..8fc654f0807b 100644 --- a/drivers/base/devres.c +++ b/drivers/base/devres.c @@ -82,12 +82,12 @@ static struct devres_group * node_to_group(struct devres_node *node) } static __always_inline struct devres * alloc_dr(dr_release_t release, - size_t size, gfp_t gfp) + size_t size, gfp_t gfp, int nid) { size_t tot_size = sizeof(struct devres) + size; struct devres *dr; - dr = kmalloc_track_caller(tot_size, gfp); + dr = kmalloc_node_track_caller(tot_size, gfp, nid); if (unlikely(!dr)) return NULL; @@ -106,24 +106,25 @@ static void add_dr(struct device *dev, struct devres_node *node) } #ifdef CONFIG_DEBUG_DEVRES -void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp, +void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, const char *name) { struct devres *dr; - dr = alloc_dr(release, size, gfp | __GFP_ZERO); + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); if (unlikely(!dr)) return NULL; set_node_dbginfo(&dr->node, name, size); return dr->data; } -EXPORT_SYMBOL_GPL(__devres_alloc); +EXPORT_SYMBOL_GPL(__devres_alloc_node); #else /** * devres_alloc - Allocate device resource data * @release: Release function devres will be associated with * @size: Allocation size * @gfp: Allocation flags + * @nid: NUMA node * * Allocate devres of @size bytes. The allocated area is zeroed, then * associated with @release. The returned pointer can be passed to @@ -132,16 +133,16 @@ EXPORT_SYMBOL_GPL(__devres_alloc); * RETURNS: * Pointer to allocated devres on success, NULL on failure. */ -void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) { struct devres *dr; - dr = alloc_dr(release, size, gfp | __GFP_ZERO); + dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); if (unlikely(!dr)) return NULL; return dr->data; } -EXPORT_SYMBOL_GPL(devres_alloc); +EXPORT_SYMBOL_GPL(devres_alloc_node); #endif /** @@ -776,7 +777,7 @@ void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) struct devres *dr; /* use raw alloc_dr for kmalloc caller tracing */ - dr = alloc_dr(devm_kmalloc_release, size, gfp); + dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); if (unlikely(!dr)) return NULL; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 0ba6a978f227..349f03e7ed06 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -150,18 +150,15 @@ static struct pmem_device *pmem_alloc(struct device *dev, return ERR_PTR(-EBUSY); } - if (pmem_should_map_pages(dev)) { - void *addr = devm_memremap_pages(dev, res); + if (pmem_should_map_pages(dev)) + pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, res); + else + pmem->virt_addr = (void __pmem *) devm_memremap(dev, + pmem->phys_addr, pmem->size, + ARCH_MEMREMAP_PMEM); - if (IS_ERR(addr)) - return addr; - pmem->virt_addr = (void __pmem *) addr; - } else { - pmem->virt_addr = memremap_pmem(dev, pmem->phys_addr, - pmem->size); - if (!pmem->virt_addr) - return ERR_PTR(-ENXIO); - } + if (IS_ERR(pmem->virt_addr)) + return (void __force *) pmem->virt_addr; return pmem; } @@ -179,9 +176,10 @@ static void pmem_detach_disk(struct pmem_device *pmem) static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns, struct pmem_device *pmem) { + int nid = dev_to_node(dev); struct gendisk *disk; - pmem->pmem_queue = blk_alloc_queue(GFP_KERNEL); + pmem->pmem_queue = blk_alloc_queue_node(GFP_KERNEL, nid); if (!pmem->pmem_queue) return -ENOMEM; @@ -191,7 +189,7 @@ static int pmem_attach_disk(struct device *dev, blk_queue_bounce_limit(pmem->pmem_queue, BLK_BOUNCE_ANY); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, pmem->pmem_queue); - disk = alloc_disk(0); + disk = alloc_disk_node(0, nid); if (!disk) { blk_cleanup_queue(pmem->pmem_queue); return -ENOMEM; @@ -363,8 +361,8 @@ static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) /* establish pfn range for lookup, and switch to direct map */ pmem = dev_get_drvdata(dev); - memunmap_pmem(dev, pmem->virt_addr); - pmem->virt_addr = (void __pmem *)devm_memremap_pages(dev, &nsio->res); + devm_memunmap(dev, (void __force *) pmem->virt_addr); + pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &nsio->res); if (IS_ERR(pmem->virt_addr)) { rc = PTR_ERR(pmem->virt_addr); goto err; diff --git a/include/linux/device.h b/include/linux/device.h index 5d7bc6349930..b8f411b57dcb 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -604,13 +604,21 @@ typedef void (*dr_release_t)(struct device *dev, void *res); typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); #ifdef CONFIG_DEBUG_DEVRES -extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp, - const char *name); +extern void *__devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid, const char *name); #define devres_alloc(release, size, gfp) \ - __devres_alloc(release, size, gfp, #release) + __devres_alloc_node(release, size, gfp, NUMA_NO_NODE, #release) +#define devres_alloc_node(release, size, gfp, nid) \ + __devres_alloc_node(release, size, gfp, nid, #release) #else -extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); +extern void *devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, + int nid); +static inline void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp) +{ + return devres_alloc_node(release, size, gfp, NUMA_NO_NODE); +} #endif + extern void devres_for_each_res(struct device *dev, dr_release_t release, dr_match_t match, void *match_data, void (*fn)(struct device *, void *, void *), diff --git a/include/linux/pmem.h b/include/linux/pmem.h index 85f810b33917..acfea8ce4a07 100644 --- a/include/linux/pmem.h +++ b/include/linux/pmem.h @@ -65,11 +65,6 @@ static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t si memcpy(dst, (void __force const *) src, size); } -static inline void memunmap_pmem(struct device *dev, void __pmem *addr) -{ - devm_memunmap(dev, (void __force *) addr); -} - static inline bool arch_has_pmem_api(void) { return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API); @@ -93,7 +88,7 @@ static inline bool arch_has_wmb_pmem(void) * These defaults seek to offer decent performance and minimize the * window between i/o completion and writes being durable on media. * However, it is undefined / architecture specific whether - * default_memremap_pmem + default_memcpy_to_pmem is sufficient for + * ARCH_MEMREMAP_PMEM + default_memcpy_to_pmem is sufficient for * making data durable relative to i/o completion. */ static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src, @@ -117,25 +112,6 @@ static inline void default_clear_pmem(void __pmem *addr, size_t size) } /** - * memremap_pmem - map physical persistent memory for pmem api - * @offset: physical address of persistent memory - * @size: size of the mapping - * - * Establish a mapping of the architecture specific memory type expected - * by memcpy_to_pmem() and wmb_pmem(). For example, it may be - * the case that an uncacheable or writethrough mapping is sufficient, - * or a writeback mapping provided memcpy_to_pmem() and - * wmb_pmem() arrange for the data to be written through the - * cache to persistent media. - */ -static inline void __pmem *memremap_pmem(struct device *dev, - resource_size_t offset, unsigned long size) -{ - return (void __pmem *) devm_memremap(dev, offset, size, - ARCH_MEMREMAP_PMEM); -} - -/** * memcpy_to_pmem - copy data to persistent memory * @dst: destination buffer for the copy * @src: source buffer for the copy diff --git a/kernel/memremap.c b/kernel/memremap.c index 72b0c66628b6..3218e8b1fc28 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -114,9 +114,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset, { void **ptr, *addr; - ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL); + ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); if (!ptr) - return NULL; + return ERR_PTR(-ENOMEM); addr = memremap(offset, size, flags); if (addr) { @@ -131,9 +132,8 @@ EXPORT_SYMBOL(devm_memremap); void devm_memunmap(struct device *dev, void *addr) { - WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match, - addr)); - memunmap(addr); + WARN_ON(devres_release(dev, devm_memremap_release, + devm_memremap_match, addr)); } EXPORT_SYMBOL(devm_memunmap); @@ -166,8 +166,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res) if (is_ram == REGION_INTERSECTS) return __va(res->start); - page_map = devres_alloc(devm_memremap_pages_release, - sizeof(*page_map), GFP_KERNEL); + page_map = devres_alloc_node(devm_memremap_pages_release, + sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); if (!page_map) return ERR_PTR(-ENOMEM); @@ -175,7 +175,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res) nid = dev_to_node(dev); if (nid < 0) - nid = 0; + nid = numa_mem_id(); error = arch_add_memory(nid, res->start, resource_size(res), true); if (error) { |