diff options
Diffstat (limited to 'kernel/memremap.c')
-rw-r--r-- | kernel/memremap.c | 53 |
1 files changed, 49 insertions, 4 deletions
diff --git a/kernel/memremap.c b/kernel/memremap.c index 562f6471fe90..3eb8944265d5 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c @@ -179,6 +179,29 @@ static void pgmap_radix_release(struct resource *res) mutex_unlock(&pgmap_lock); } +static unsigned long pfn_first(struct page_map *page_map) +{ + struct dev_pagemap *pgmap = &page_map->pgmap; + const struct resource *res = &page_map->res; + struct vmem_altmap *altmap = pgmap->altmap; + unsigned long pfn; + + pfn = res->start >> PAGE_SHIFT; + if (altmap) + pfn += vmem_altmap_offset(altmap); + return pfn; +} + +static unsigned long pfn_end(struct page_map *page_map) +{ + const struct resource *res = &page_map->res; + + return (res->start + resource_size(res)) >> PAGE_SHIFT; +} + +#define for_each_device_pfn(pfn, map) \ + for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) + static void devm_memremap_pages_release(struct device *dev, void *data) { struct page_map *page_map = data; @@ -186,6 +209,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data) resource_size_t align_start, align_size; struct dev_pagemap *pgmap = &page_map->pgmap; + if (percpu_ref_tryget_live(pgmap->ref)) { + dev_WARN(dev, "%s: page mapping is still live!\n", __func__); + percpu_ref_put(pgmap->ref); + } + pgmap_radix_release(res); /* pages are dead and unused, undo the arch mapping */ @@ -211,20 +239,26 @@ struct dev_pagemap *find_dev_pagemap(resource_size_t phys) * devm_memremap_pages - remap and provide memmap backing for the given resource * @dev: hosting device for @res * @res: "host memory" address range + * @ref: a live per-cpu reference count * @altmap: optional descriptor for allocating the memmap from @res * - * Note, the expectation is that @res is a host memory range that could - * feasibly be treated as a "System RAM" range, i.e. not a device mmio - * range, but this is not enforced. + * Notes: + * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time + * (or devm release event). + * + * 2/ @res is expected to be a host memory range that could feasibly be + * treated as a "System RAM" range, i.e. not a device mmio range, but + * this is not enforced. */ void *devm_memremap_pages(struct device *dev, struct resource *res, - struct vmem_altmap *altmap) + struct percpu_ref *ref, struct vmem_altmap *altmap) { int is_ram = region_intersects(res->start, resource_size(res), "System RAM"); resource_size_t key, align_start, align_size; struct dev_pagemap *pgmap; struct page_map *page_map; + unsigned long pfn; int error, nid; if (is_ram == REGION_MIXED) { @@ -242,6 +276,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, return ERR_PTR(-ENXIO); } + if (!ref) + return ERR_PTR(-EINVAL); + page_map = devres_alloc_node(devm_memremap_pages_release, sizeof(*page_map), GFP_KERNEL, dev_to_node(dev)); if (!page_map) @@ -255,6 +292,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, memcpy(&page_map->altmap, altmap, sizeof(*altmap)); pgmap->altmap = &page_map->altmap; } + pgmap->ref = ref; pgmap->res = &page_map->res; mutex_lock(&pgmap_lock); @@ -292,6 +330,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, if (error) goto err_add_memory; + for_each_device_pfn(pfn, page_map) { + struct page *page = pfn_to_page(pfn); + + /* ZONE_DEVICE pages must never appear on a slab lru */ + list_force_poison(&page->lru); + page->pgmap = pgmap; + } devres_add(dev, page_map); return __va(res->start); |