diff options
author | Dan Williams <dan.j.williams@intel.com> | 2017-07-03 16:54:58 -0700 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2017-07-03 16:54:58 -0700 |
commit | 9d92573fff3ec70785ef1815cc80573f70e7a921 (patch) | |
tree | bce6e6bbad56f805d1adcebddabf9dd9e8072ce4 /drivers/nvdimm | |
parent | 2de5148ffb12ff6b4088125f44818771e78e6830 (diff) | |
parent | 0b277961f4484fb3f142caaa1dd1748cb0b2cbee (diff) |
Merge branch 'for-4.13/dax' into libnvdimm-for-next
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r-- | drivers/nvdimm/bus.c | 8 | ||||
-rw-r--r-- | drivers/nvdimm/claim.c | 6 | ||||
-rw-r--r-- | drivers/nvdimm/core.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/dax_devs.c | 2 | ||||
-rw-r--r-- | drivers/nvdimm/dimm_devs.c | 10 | ||||
-rw-r--r-- | drivers/nvdimm/namespace_devs.c | 14 | ||||
-rw-r--r-- | drivers/nvdimm/nd-core.h | 9 | ||||
-rw-r--r-- | drivers/nvdimm/pfn_devs.c | 4 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.c | 40 | ||||
-rw-r--r-- | drivers/nvdimm/pmem.h | 14 | ||||
-rw-r--r-- | drivers/nvdimm/region_devs.c | 43 |
11 files changed, 107 insertions, 45 deletions
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 9730db48a01b..937fafa1886a 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -38,13 +38,13 @@ static int to_nd_device_type(struct device *dev) { if (is_nvdimm(dev)) return ND_DEVICE_DIMM; - else if (is_nd_pmem(dev)) + else if (is_memory(dev)) return ND_DEVICE_REGION_PMEM; else if (is_nd_blk(dev)) return ND_DEVICE_REGION_BLK; else if (is_nd_dax(dev)) return ND_DEVICE_DAX_PMEM; - else if (is_nd_pmem(dev->parent) || is_nd_blk(dev->parent)) + else if (is_nd_region(dev->parent)) return nd_region_to_nstype(to_nd_region(dev->parent)); return 0; @@ -56,7 +56,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) * Ensure that region devices always have their numa node set as * early as possible. */ - if (is_nd_pmem(dev) || is_nd_blk(dev)) + if (is_nd_region(dev)) set_dev_node(dev, to_nd_region(dev)->numa_node); return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT, to_nd_device_type(dev)); @@ -65,7 +65,7 @@ static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env) static struct module *to_bus_provider(struct device *dev) { /* pin bus providers while regions are enabled */ - if (is_nd_pmem(dev) || is_nd_blk(dev)) { + if (is_nd_region(dev)) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); return nvdimm_bus->nd_desc->module; diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c index f8ad92b4dcd2..47770460f3d3 100644 --- a/drivers/nvdimm/claim.c +++ b/drivers/nvdimm/claim.c @@ -12,8 +12,8 @@ */ #include <linux/device.h> #include <linux/sizes.h> -#include <linux/pmem.h> #include "nd-core.h" +#include "pmem.h" #include "pfn.h" #include "btt.h" #include "nd.h" @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns, cleared /= 512; badblocks_clear(&nsio->bb, sector, cleared); } - invalidate_pmem(nsio->addr + offset, size); + arch_invalidate_pmem(nsio->addr + offset, size); } else rc = -EIO; } - memcpy_to_pmem(nsio->addr + offset, buf, size); + memcpy_flushcache(nsio->addr + offset, buf, size); nvdimm_flush(to_nd_region(ndns->dev.parent)); return rc; diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index ed0bf174d128..7cd99b1f8596 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -504,7 +504,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region, struct nvdimm_bus *nvdimm_bus; struct list_head *poison_list; - if (!is_nd_pmem(&nd_region->dev)) { + if (!is_memory(&nd_region->dev)) { dev_WARN_ONCE(&nd_region->dev, 1, "%s only valid for pmem regions\n", __func__); return; diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c index 59f676381ae5..1bf2bd318371 100644 --- a/drivers/nvdimm/dax_devs.c +++ b/drivers/nvdimm/dax_devs.c @@ -89,7 +89,7 @@ struct device *nd_dax_create(struct nd_region *nd_region) struct device *dev = NULL; struct nd_dax *nd_dax; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nd_dax = nd_dax_alloc(nd_region); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 9852a3355509..f0d1b7e5de01 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -20,6 +20,7 @@ #include <linux/mm.h> #include "nd-core.h" #include "label.h" +#include "pmem.h" #include "nd.h" static DEFINE_IDA(dimm_ida); @@ -235,6 +236,13 @@ struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr) } EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm); +unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr) +{ + /* pmem mapping properties are private to libnvdimm */ + return ARCH_MEMREMAP_PMEM; +} +EXPORT_SYMBOL_GPL(nd_blk_memremap_flags); + struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping) { struct nvdimm *nvdimm = nd_mapping->nvdimm; @@ -411,7 +419,7 @@ int alias_dpa_busy(struct device *dev, void *data) struct resource *res; int i; - if (!is_nd_pmem(dev)) + if (!is_memory(dev)) return 0; nd_region = to_nd_region(dev); diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index c96e31330213..5f1c6756e57c 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -14,10 +14,10 @@ #include <linux/device.h> #include <linux/sort.h> #include <linux/slab.h> -#include <linux/pmem.h> #include <linux/list.h> #include <linux/nd.h> #include "nd-core.h" +#include "pmem.h" #include "nd.h" static void namespace_io_release(struct device *dev) @@ -112,7 +112,7 @@ static int is_uuid_busy(struct device *dev, void *data) static int is_namespace_uuid_busy(struct device *dev, void *data) { - if (is_nd_pmem(dev) || is_nd_blk(dev)) + if (is_nd_region(dev)) return device_for_each_child(dev, data, is_uuid_busy); return 0; } @@ -155,11 +155,7 @@ bool pmem_should_map_pages(struct device *dev) IORES_DESC_NONE) == REGION_MIXED) return false; -#ifdef ARCH_MEMREMAP_PMEM return ARCH_MEMREMAP_PMEM == MEMREMAP_WB; -#else - return false; -#endif } EXPORT_SYMBOL(pmem_should_map_pages); @@ -810,7 +806,7 @@ static int __reserve_free_pmem(struct device *dev, void *data) struct nd_label_id label_id; int i; - if (!is_nd_pmem(dev)) + if (!is_memory(dev)) return 0; nd_region = to_nd_region(dev); @@ -2057,7 +2053,7 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region) struct resource *res; struct device *dev; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nspm = kzalloc(sizeof(*nspm), GFP_KERNEL); @@ -2360,7 +2356,7 @@ static struct device **scan_labels(struct nd_region *nd_region) } dev->parent = &nd_region->dev; devs[count++] = dev; - } else if (is_nd_pmem(&nd_region->dev)) { + } else if (is_memory(&nd_region->dev)) { /* clean unselected labels */ for (i = 0; i < nd_region->ndr_mappings; i++) { struct list_head *l, *e; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index 4c4bd209e725..86bc19ae30da 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -64,7 +64,16 @@ struct blk_alloc_info { bool is_nvdimm(struct device *dev); bool is_nd_pmem(struct device *dev); +bool is_nd_volatile(struct device *dev); bool is_nd_blk(struct device *dev); +static inline bool is_nd_region(struct device *dev) +{ + return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev); +} +static inline bool is_memory(struct device *dev) +{ + return is_nd_pmem(dev) || is_nd_volatile(dev); +} struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev); int __init nvdimm_bus_init(void); void nvdimm_bus_exit(void); diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 5e4041276d6f..5fcb6f5b22a2 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -331,7 +331,7 @@ struct device *nd_pfn_create(struct nd_region *nd_region) struct nd_pfn *nd_pfn; struct device *dev; - if (!is_nd_pmem(&nd_region->dev)) + if (!is_memory(&nd_region->dev)) return NULL; nd_pfn = nd_pfn_alloc(nd_region); @@ -354,7 +354,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) if (!pfn_sb || !ndns) return -ENODEV; - if (!is_nd_pmem(nd_pfn->dev.parent)) + if (!is_memory(nd_pfn->dev.parent)) return -ENODEV; if (nvdimm_read_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0)) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 4a9cffc14512..e0f6d83c5a6e 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -28,7 +28,7 @@ #include <linux/blk-mq.h> #include <linux/pfn_t.h> #include <linux/slab.h> -#include <linux/pmem.h> +#include <linux/uio.h> #include <linux/dax.h> #include <linux/nd.h> #include "pmem.h" @@ -72,7 +72,7 @@ static int pmem_clear_poison(struct pmem_device *pmem, phys_addr_t offset, sysfs_notify_dirent(pmem->bb_state); } - invalidate_pmem(pmem->virt_addr + offset, len); + arch_invalidate_pmem(pmem->virt_addr + offset, len); return rc; } @@ -82,7 +82,7 @@ static void write_pmem(void *pmem_addr, struct page *page, { void *mem = kmap_atomic(page); - memcpy_to_pmem(pmem_addr, mem + off, len); + memcpy_flushcache(pmem_addr, mem + off, len); kunmap_atomic(mem); } @@ -237,8 +237,27 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev, return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn); } +static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t bytes, struct iov_iter *i) +{ + return copy_from_iter_flushcache(addr, bytes, i); +} + +static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, + void *addr, size_t size) +{ + arch_wb_cache_pmem(addr, size); +} + static const struct dax_operations pmem_dax_ops = { .direct_access = pmem_dax_direct_access, + .copy_from_iter = pmem_copy_from_iter, + .flush = pmem_dax_flush, +}; + +static const struct attribute_group *pmem_attribute_groups[] = { + &dax_attribute_group, + NULL, }; static void pmem_release_queue(void *q) @@ -267,14 +286,15 @@ static int pmem_attach_disk(struct device *dev, struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); struct nd_region *nd_region = to_nd_region(dev->parent); struct vmem_altmap __altmap, *altmap = NULL; + int nid = dev_to_node(dev), fua, wbc; struct resource *res = &nsio->res; struct nd_pfn *nd_pfn = NULL; struct dax_device *dax_dev; - int nid = dev_to_node(dev); struct nd_pfn_sb *pfn_sb; struct pmem_device *pmem; struct resource pfn_res; struct request_queue *q; + struct device *gendev; struct gendisk *disk; void *addr; @@ -296,8 +316,12 @@ static int pmem_attach_disk(struct device *dev, dev_set_drvdata(dev, pmem); pmem->phys_addr = res->start; pmem->size = resource_size(res); - if (nvdimm_has_flush(nd_region) < 0) + fua = nvdimm_has_flush(nd_region); + if (!IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) || fua < 0) { dev_warn(dev, "unable to guarantee persistence of writes\n"); + fua = 0; + } + wbc = nvdimm_has_cache(nd_region); if (!devm_request_mem_region(dev, res->start, resource_size(res), dev_name(&ndns->dev))) { @@ -341,7 +365,7 @@ static int pmem_attach_disk(struct device *dev, return PTR_ERR(addr); pmem->virt_addr = addr; - blk_queue_write_cache(q, true, true); + blk_queue_write_cache(q, wbc, fua); blk_queue_make_request(q, pmem_make_request); blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_logical_block_size(q, pmem_sector_size(ndns)); @@ -372,8 +396,12 @@ static int pmem_attach_disk(struct device *dev, put_disk(disk); return -ENOMEM; } + dax_write_cache(dax_dev, wbc); pmem->dax_dev = dax_dev; + gendev = disk_to_dev(disk); + gendev->groups = pmem_attribute_groups; + device_add_disk(dev, disk); if (devm_add_action_or_reset(dev, pmem_release_disk, pmem)) return -ENOMEM; diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h index c5917f040fa7..5434321cad67 100644 --- a/drivers/nvdimm/pmem.h +++ b/drivers/nvdimm/pmem.h @@ -5,6 +5,20 @@ #include <linux/pfn_t.h> #include <linux/fs.h> +#ifdef CONFIG_ARCH_HAS_PMEM_API +#define ARCH_MEMREMAP_PMEM MEMREMAP_WB +void arch_wb_cache_pmem(void *addr, size_t size); +void arch_invalidate_pmem(void *addr, size_t size); +#else +#define ARCH_MEMREMAP_PMEM MEMREMAP_WT +static inline void arch_wb_cache_pmem(void *addr, size_t size) +{ +} +static inline void arch_invalidate_pmem(void *addr, size_t size) +{ +} +#endif + /* this definition is in it's own header for tools/testing/nvdimm to consume */ struct pmem_device { /* One contiguous memory region per device */ diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ab141f8b5140..5954cfbea3fc 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -15,7 +15,6 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/hash.h> -#include <linux/pmem.h> #include <linux/sort.h> #include <linux/io.h> #include <linux/nd.h> @@ -169,6 +168,11 @@ bool is_nd_blk(struct device *dev) return dev ? dev->type == &nd_blk_device_type : false; } +bool is_nd_volatile(struct device *dev) +{ + return dev ? dev->type == &nd_volatile_device_type : false; +} + struct nd_region *to_nd_region(struct device *dev) { struct nd_region *nd_region = container_of(dev, struct nd_region, dev); @@ -215,7 +219,7 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); */ int nd_region_to_nstype(struct nd_region *nd_region) { - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { u16 i, alias; for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { @@ -243,7 +247,7 @@ static ssize_t size_show(struct device *dev, struct nd_region *nd_region = to_nd_region(dev); unsigned long long size = 0; - if (is_nd_pmem(dev)) { + if (is_memory(dev)) { size = nd_region->ndr_size; } else if (nd_region->ndr_mappings == 1) { struct nd_mapping *nd_mapping = &nd_region->mapping[0]; @@ -309,7 +313,7 @@ static ssize_t set_cookie_show(struct device *dev, struct nd_interleave_set *nd_set = nd_region->nd_set; ssize_t rc = 0; - if (is_nd_pmem(dev) && nd_set) + if (is_memory(dev) && nd_set) /* pass, should be precluded by region_visible */; else return -ENXIO; @@ -363,7 +367,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region) if (!ndd) return 0; - if (is_nd_pmem(&nd_region->dev)) { + if (is_memory(&nd_region->dev)) { available += nd_pmem_available_dpa(nd_region, nd_mapping, &overlap); if (overlap > blk_max_overlap) { @@ -549,10 +553,10 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) struct nd_interleave_set *nd_set = nd_region->nd_set; int type = nd_region_to_nstype(nd_region); - if (!is_nd_pmem(dev) && a == &dev_attr_pfn_seed.attr) + if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr) return 0; - if (!is_nd_pmem(dev) && a == &dev_attr_dax_seed.attr) + if (!is_memory(dev) && a == &dev_attr_dax_seed.attr) return 0; if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr) @@ -580,7 +584,7 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) || type == ND_DEVICE_NAMESPACE_BLK) && a == &dev_attr_available_size.attr) return a->mode; - else if (is_nd_pmem(dev) && nd_set) + else if (is_memory(dev) && nd_set) return a->mode; return 0; @@ -637,7 +641,7 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, { struct nd_region *nd_region; - if (!probe && (is_nd_pmem(dev) || is_nd_blk(dev))) { + if (!probe && is_nd_region(dev)) { int i; nd_region = to_nd_region(dev); @@ -655,12 +659,8 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, if (ndd) atomic_dec(&nvdimm->busy); } - - if (is_nd_pmem(dev)) - return; } - if (dev->parent && (is_nd_blk(dev->parent) || is_nd_pmem(dev->parent)) - && probe) { + if (dev->parent && is_nd_region(dev->parent) && probe) { nd_region = to_nd_region(dev->parent); nvdimm_bus_lock(dev); if (nd_region->ns_seed == dev) @@ -1048,8 +1048,8 @@ void nvdimm_flush(struct nd_region *nd_region) * The first wmb() is needed to 'sfence' all previous writes * such that they are architecturally visible for the platform * buffer flush. Note that we've already arranged for pmem - * writes to avoid the cache via arch_memcpy_to_pmem(). The - * final wmb() ensures ordering for the NVDIMM flush write. + * writes to avoid the cache via memcpy_flushcache(). The final + * wmb() ensures ordering for the NVDIMM flush write. */ wmb(); for (i = 0; i < nd_region->ndr_mappings; i++) @@ -1071,8 +1071,9 @@ int nvdimm_has_flush(struct nd_region *nd_region) { int i; - /* no nvdimm == flushing capability unknown */ - if (nd_region->ndr_mappings == 0) + /* no nvdimm or pmem api == flushing capability unknown */ + if (nd_region->ndr_mappings == 0 + || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API)) return -ENXIO; for (i = 0; i < nd_region->ndr_mappings; i++) { @@ -1092,6 +1093,12 @@ int nvdimm_has_flush(struct nd_region *nd_region) } EXPORT_SYMBOL_GPL(nvdimm_has_flush); +int nvdimm_has_cache(struct nd_region *nd_region) +{ + return is_nd_pmem(&nd_region->dev); +} +EXPORT_SYMBOL_GPL(nvdimm_has_cache); + void __exit nd_region_devs_exit(void) { ida_destroy(®ion_ida); |