summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 14:12:21 -0800
commitaf7ddd8a627c62a835524b3f5b471edbbbcce025 (patch)
treeaf9777ddef6d394c7cc01fca599328b584ca2bc1 /arch/ia64
parentfe2b0cdabcd9e6aeca66a104bc03576946e5fee2 (diff)
parent8b1cce9f5832a8eda17d37a3c49fb7dd2d650f46 (diff)
Merge tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping
Pull DMA mapping updates from Christoph Hellwig: "A huge update this time, but a lot of that is just consolidating or removing code: - provide a common DMA_MAPPING_ERROR definition and avoid indirect calls for dma_map_* error checking - use direct calls for the DMA direct mapping case, avoiding huge retpoline overhead for high performance workloads - merge the swiotlb dma_map_ops into dma-direct - provide a generic remapping DMA consistent allocator for architectures that have devices that perform DMA that is not cache coherent. Based on the existing arm64 implementation and also used for csky now. - improve the dma-debug infrastructure, including dynamic allocation of entries (Robin Murphy) - default to providing chaining scatterlist everywhere, with opt-outs for the few architectures (alpha, parisc, most arm32 variants) that can't cope with it - misc sparc32 dma-related cleanups - remove the dma_mark_clean arch hook used by swiotlb on ia64 and replace it with the generic noncoherent infrastructure - fix the return type of dma_set_max_seg_size (Niklas Söderlund) - move the dummy dma ops for not DMA capable devices from arm64 to common code (Robin Murphy) - ensure dma_alloc_coherent returns zeroed memory to avoid kernel data leaks through userspace. We already did this for most common architectures, but this ensures we do it everywhere. dma_zalloc_coherent has been deprecated and can hopefully be removed after -rc1 with a coccinelle script" * tag 'dma-mapping-4.21' of git://git.infradead.org/users/hch/dma-mapping: (73 commits) dma-mapping: fix inverted logic in dma_supported dma-mapping: deprecate dma_zalloc_coherent dma-mapping: zero memory returned from dma_alloc_* sparc/iommu: fix ->map_sg return value sparc/io-unit: fix ->map_sg return value arm64: default to the direct mapping in get_arch_dma_ops PCI: Remove unused attr variable in pci_dma_configure ia64: only select ARCH_HAS_DMA_COHERENT_TO_PFN if swiotlb is enabled dma-mapping: bypass indirect calls for dma-direct vmd: use the proper dma_* APIs instead of direct methods calls dma-direct: merge swiotlb_dma_ops into the dma_direct code dma-direct: use dma_direct_map_page to implement dma_direct_map_sg dma-direct: improve addressability error reporting swiotlb: remove dma_mark_clean swiotlb: remove SWIOTLB_MAP_ERROR ACPI / scan: Refactor _CCA enforcement dma-mapping: factor out dummy DMA ops dma-mapping: always build the direct mapping code dma-mapping: move dma_cache_sync out of line dma-mapping: move various slow path functions out of line ...
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/hp/common/hwsw_iommu.c2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c87
-rw-r--r--arch/ia64/kernel/dma-mapping.c21
-rw-r--r--arch/ia64/mm/init.c19
-rw-r--r--arch/ia64/sn/pci/pci_dma.c8
6 files changed, 63 insertions, 78 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 36773def6920..cbf6c67c7166 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -28,8 +28,8 @@ config IA64
select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
- select ARCH_HAS_DMA_MARK_CLEAN
- select ARCH_HAS_SG_CHAIN
+ select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
index 58969039bed2..8840ed97712f 100644
--- a/arch/ia64/hp/common/hwsw_iommu.c
+++ b/arch/ia64/hp/common/hwsw_iommu.c
@@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev)
const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{
if (use_swiotlb(dev))
- return &swiotlb_dma_ops;
+ return NULL;
return &sba_dma_ops;
}
EXPORT_SYMBOL(hwsw_dma_get_ops);
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index e8a93b07283e..5a361e51cb1e 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -907,11 +907,12 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
}
/**
- * sba_map_single_attrs - map one buffer and return IOVA for DMA
+ * sba_map_page - map one buffer and return IOVA for DMA
* @dev: instance of PCI owned by the driver that's asking.
- * @addr: driver buffer to map.
- * @size: number of bytes to map in driver buffer.
- * @dir: R/W or both.
+ * @page: page to map
+ * @poff: offset into page
+ * @size: number of bytes to map
+ * @dir: dma direction
* @attrs: optional dma attributes
*
* See Documentation/DMA-API-HOWTO.txt
@@ -944,7 +945,7 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
** Device is bit capable of DMA'ing to the buffer...
** just return the PCI address of ptr
*/
- DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
+ DBG_BYPASS("sba_map_page() bypass mask/addr: "
"0x%lx/0x%lx\n",
to_pci_dev(dev)->dma_mask, pci_addr);
return pci_addr;
@@ -966,14 +967,14 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
- if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
+ if (sba_check_pdir(ioc,"Check before sba_map_page()"))
panic("Sanity check failed");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
pide = sba_alloc_range(ioc, dev, size);
if (pide < 0)
- return 0;
+ return DMA_MAPPING_ERROR;
iovp = (dma_addr_t) pide << iovp_shift;
@@ -997,20 +998,12 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
/* form complete address */
#ifdef ASSERT_PDIR_SANITY
spin_lock_irqsave(&ioc->res_lock, flags);
- sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
+ sba_check_pdir(ioc,"Check after sba_map_page()");
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
return SBA_IOVA(ioc, iovp, offset);
}
-static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return sba_map_page(dev, virt_to_page(addr),
- (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
-}
-
#ifdef ENABLE_MARK_CLEAN
static SBA_INLINE void
sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
@@ -1036,7 +1029,7 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
#endif
/**
- * sba_unmap_single_attrs - unmap one IOVA and free resources
+ * sba_unmap_page - unmap one IOVA and free resources
* @dev: instance of PCI owned by the driver that's asking.
* @iova: IOVA of driver buffer previously mapped.
* @size: number of bytes mapped in driver buffer.
@@ -1063,7 +1056,7 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
/*
** Address does not fall w/in IOVA, must be bypassing
*/
- DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
+ DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
iova);
#ifdef ENABLE_MARK_CLEAN
@@ -1114,12 +1107,6 @@ static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
#endif /* DELAYED_RESOURCE_CNT == 0 */
}
-void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- sba_unmap_page(dev, iova, size, dir, attrs);
-}
-
/**
* sba_alloc_coherent - allocate/map shared mem for DMA
* @dev: instance of PCI owned by the driver that's asking.
@@ -1132,30 +1119,24 @@ static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t flags, unsigned long attrs)
{
+ struct page *page;
struct ioc *ioc;
+ int node = -1;
void *addr;
ioc = GET_IOC(dev);
ASSERT(ioc);
-
#ifdef CONFIG_NUMA
- {
- struct page *page;
-
- page = alloc_pages_node(ioc->node, flags, get_order(size));
- if (unlikely(!page))
- return NULL;
-
- addr = page_address(page);
- }
-#else
- addr = (void *) __get_free_pages(flags, get_order(size));
+ node = ioc->node;
#endif
- if (unlikely(!addr))
+
+ page = alloc_pages_node(node, flags, get_order(size));
+ if (unlikely(!page))
return NULL;
+ addr = page_address(page);
memset(addr, 0, size);
- *dma_handle = virt_to_phys(addr);
+ *dma_handle = page_to_phys(page);
#ifdef ALLOW_IOV_BYPASS
ASSERT(dev->coherent_dma_mask);
@@ -1174,9 +1155,10 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
* If device can't bypass or bypass is disabled, pass the 32bit fake
* device to map single to get an iova mapping.
*/
- *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
- size, 0, 0);
-
+ *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
+ DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, *dma_handle))
+ return NULL;
return addr;
}
@@ -1193,7 +1175,7 @@ sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
+ sba_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
@@ -1483,7 +1465,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
/* Fast path single entry scatterlists. */
if (nents == 1) {
sglist->dma_length = sglist->length;
- sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
+ sglist->dma_address = sba_map_page(dev, sg_page(sglist),
+ sglist->offset, sglist->length, dir, attrs);
+ if (dma_mapping_error(dev, sglist->dma_address))
+ return 0;
return 1;
}
@@ -1572,8 +1557,8 @@ static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
while (nents && sglist->dma_length) {
- sba_unmap_single_attrs(dev, sglist->dma_address,
- sglist->dma_length, dir, attrs);
+ sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
+ dir, attrs);
sglist = sg_next(sglist);
nents--;
}
@@ -2080,8 +2065,6 @@ static int __init acpi_sba_ioc_init_acpi(void)
/* This has to run before acpi_scan_init(). */
arch_initcall(acpi_sba_ioc_init_acpi);
-extern const struct dma_map_ops swiotlb_dma_ops;
-
static int __init
sba_init(void)
{
@@ -2095,7 +2078,7 @@ sba_init(void)
* a successful kdump kernel boot is to use the swiotlb.
*/
if (is_kdump_kernel()) {
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option");
@@ -2117,7 +2100,7 @@ sba_init(void)
* If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec.
*/
- dma_ops = &swiotlb_dma_ops;
+ dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option");
@@ -2170,11 +2153,6 @@ static int sba_dma_supported (struct device *dev, u64 mask)
return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
}
-static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
__setup("nosbagart", nosbagart);
static int __init
@@ -2208,7 +2186,6 @@ const struct dma_map_ops sba_dma_ops = {
.map_sg = sba_map_sg_attrs,
.unmap_sg = sba_unmap_sg_attrs,
.dma_supported = sba_dma_supported,
- .mapping_error = sba_dma_mapping_error,
};
void sba_dma_init(void)
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
index 7a471d8d67d4..ad7d9963de34 100644
--- a/arch/ia64/kernel/dma-mapping.c
+++ b/arch/ia64/kernel/dma-mapping.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
@@ -16,9 +16,26 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
EXPORT_SYMBOL(dma_get_ops);
#ifdef CONFIG_SWIOTLB
+void *arch_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
+{
+ return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
+}
+
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ return page_to_pfn(virt_to_page(cpu_addr));
+}
+
void __init swiotlb_dma_init(void)
{
- dma_ops = &swiotlb_dma_ops;
swiotlb_init(1);
}
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index d5e12ff1d73c..0cf43bb13d6e 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/dma-noncoherent.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
-void
-dma_mark_clean(void *addr, size_t size)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
+ unsigned long pfn = PHYS_PFN(paddr);
+
+ do {
+ set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
+ } while (++pfn <= PHYS_PFN(paddr + size - 1));
}
inline void
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 4ce4ee4ef9f2..b7d42e4edc1f 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -196,7 +196,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __func__);
- return 0;
+ return DMA_MAPPING_ERROR;
}
return dma_addr;
}
@@ -314,11 +314,6 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
return nhwentries;
}
-static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
-{
- return 0;
-}
-
static u64 sn_dma_get_required_mask(struct device *dev)
{
return DMA_BIT_MASK(64);
@@ -441,7 +436,6 @@ static struct dma_map_ops sn_dma_ops = {
.unmap_page = sn_dma_unmap_page,
.map_sg = sn_dma_map_sg,
.unmap_sg = sn_dma_unmap_sg,
- .mapping_error = sn_dma_mapping_error,
.dma_supported = sn_dma_supported,
.get_required_mask = sn_dma_get_required_mask,
};