summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-02-26 16:40:21 +0100
committerChristoph Hellwig <hch@lst.de>2022-03-03 14:00:57 +0300
commitf5ff79fddf0efecca538046b5cc20fb3ded2ec4f (patch)
tree865b9157a52b891d906b279721ac2df29d9065ff /kernel
parentfba09099c6e506608e05e08ac717bf34501f821b (diff)
dma-mapping: remove CONFIG_DMA_REMAP
CONFIG_DMA_REMAP is used to build a few helpers around the core vmalloc code, and to use them in case there is a highmem page in dma-direct, and to make dma coherent allocations be able to use non-contiguous pages allocations for DMA allocations in the dma-iommu layer. Right now it needs to be explicitly selected by architectures, and is only done so by architectures that require remapping to deal with devices that are not DMA coherent. Make it unconditional for builds with CONFIG_MMU as it is very little extra code, but makes it much more likely that large DMA allocations succeed on x86. This fixes hot plugging a NVMe thunderbolt SSD for me, which tries to allocate a 1MB buffer that is otherwise hard to obtain due to memory fragmentation on a heavily used laptop. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Robin Murphy <robin.murphy@arm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/dma/Kconfig7
-rw-r--r--kernel/dma/Makefile2
-rw-r--r--kernel/dma/direct.c18
3 files changed, 9 insertions, 18 deletions
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
index 1b02179758cb..56866aaa2ae1 100644
--- a/kernel/dma/Kconfig
+++ b/kernel/dma/Kconfig
@@ -110,15 +110,10 @@ config DMA_GLOBAL_POOL
select DMA_DECLARE_COHERENT
bool
-config DMA_REMAP
- bool
- depends on MMU
- select DMA_NONCOHERENT_MMAP
-
config DMA_DIRECT_REMAP
bool
- select DMA_REMAP
select DMA_COHERENT_POOL
+ select DMA_NONCOHERENT_MMAP
config DMA_CMA
bool "DMA Contiguous Memory Allocator"
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
index 0dd65ec1d234..21926e46ef4f 100644
--- a/kernel/dma/Makefile
+++ b/kernel/dma/Makefile
@@ -8,5 +8,5 @@ obj-$(CONFIG_DMA_DECLARE_COHERENT) += coherent.o
obj-$(CONFIG_DMA_API_DEBUG) += debug.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_DMA_COHERENT_POOL) += pool.o
-obj-$(CONFIG_DMA_REMAP) += remap.o
+obj-$(CONFIG_MMU) += remap.o
obj-$(CONFIG_DMA_MAP_BENCHMARK) += map_benchmark.o
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 50f48e9e4598..35a1d29d6a2e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -265,17 +265,13 @@ void *dma_direct_alloc(struct device *dev, size_t size,
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
+
+ /*
+ * dma_alloc_contiguous can return highmem pages depending on a
+ * combination the cma= arguments and per-arch setup. These need to be
+ * remapped to return a kernel virtual address.
+ */
if (PageHighMem(page)) {
- /*
- * Depending on the cma= arguments and per-arch setup,
- * dma_alloc_contiguous could return highmem pages.
- * Without remapping there is no way to return them here, so
- * log an error and fail.
- */
- if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
- dev_info(dev, "Rejecting highmem page from CMA.\n");
- goto out_free_pages;
- }
remap = true;
set_uncached = false;
}
@@ -349,7 +345,7 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
+ if (is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr);
} else {
if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))