diff options
author | Krzysztof Kozlowski <k.kozlowski@samsung.com> | 2016-08-03 13:46:00 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-04 08:50:07 -0400 |
commit | 00085f1efa387a8ce100e3734920f7639c80caa3 (patch) | |
tree | 41ff3d6e6884918b4fc4f1ae96a284098167c5b0 /include/linux | |
parent | 1605d2715ad2e67ddd0485a26e05ed670a4285ca (diff) |
dma-mapping: use unsigned long for dma_attrs
The dma-mapping core and the implementations do not change the DMA
attributes passed by pointer. Thus the pointer can point to const data.
However the attributes do not have to be a bitfield. Instead unsigned
long will do fine:
1. This is just simpler. Both in terms of reading the code and setting
attributes. Instead of initializing local attributes on the stack
and passing pointer to it to dma_set_attr(), just set the bits.
2. It brings safeness and checking for const correctness because the
attributes are passed by value.
Semantic patches for this change (at least most of them):
virtual patch
virtual context
@r@
identifier f, attrs;
@@
f(...,
- struct dma_attrs *attrs
+ unsigned long attrs
, ...)
{
...
}
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
and
// Options: --all-includes
virtual patch
virtual context
@r@
identifier f, attrs;
type t;
@@
t f(..., struct dma_attrs *attrs);
@@
identifier r.f;
@@
f(...,
- NULL
+ 0
)
Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com
Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Acked-by: Vineet Gupta <vgupta@synopsys.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no>
Acked-by: Mark Salter <msalter@redhat.com> [c6x]
Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris]
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm]
Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com>
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp]
Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core]
Acked-by: David Vrabel <david.vrabel@citrix.com> [xen]
Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb]
Acked-by: Joerg Roedel <jroedel@suse.de> [iommu]
Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon]
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k]
Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390]
Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32]
Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc]
Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/dma-attrs.h | 71 | ||||
-rw-r--r-- | include/linux/dma-iommu.h | 6 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 128 | ||||
-rw-r--r-- | include/linux/swiotlb.h | 10 |
4 files changed, 92 insertions, 123 deletions
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h deleted file mode 100644 index 5246239a4953..000000000000 --- a/include/linux/dma-attrs.h +++ /dev/null @@ -1,71 +0,0 @@ -#ifndef _DMA_ATTR_H -#define _DMA_ATTR_H - -#include <linux/bitmap.h> -#include <linux/bitops.h> -#include <linux/bug.h> - -/** - * an enum dma_attr represents an attribute associated with a DMA - * mapping. The semantics of each attribute should be defined in - * Documentation/DMA-attributes.txt. - */ -enum dma_attr { - DMA_ATTR_WRITE_BARRIER, - DMA_ATTR_WEAK_ORDERING, - DMA_ATTR_WRITE_COMBINE, - DMA_ATTR_NON_CONSISTENT, - DMA_ATTR_NO_KERNEL_MAPPING, - DMA_ATTR_SKIP_CPU_SYNC, - DMA_ATTR_FORCE_CONTIGUOUS, - DMA_ATTR_ALLOC_SINGLE_PAGES, - DMA_ATTR_MAX, -}; - -#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) - -/** - * struct dma_attrs - an opaque container for DMA attributes - * @flags - bitmask representing a collection of enum dma_attr - */ -struct dma_attrs { - unsigned long flags[__DMA_ATTRS_LONGS]; -}; - -#define DEFINE_DMA_ATTRS(x) \ - struct dma_attrs x = { \ - .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ - } - -static inline void init_dma_attrs(struct dma_attrs *attrs) -{ - bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); -} - -/** - * dma_set_attr - set a specific attribute - * @attr: attribute to set - * @attrs: struct dma_attrs (may be NULL) - */ -static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - if (attrs == NULL) - return; - BUG_ON(attr >= DMA_ATTR_MAX); - __set_bit(attr, attrs->flags); -} - -/** - * dma_get_attr - check for a specific attribute - * @attr: attribute to set - * @attrs: struct dma_attrs (may be NULL) - */ -static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) -{ - if (attrs == NULL) - return 0; - BUG_ON(attr >= DMA_ATTR_MAX); - return test_bit(attr, attrs->flags); -} - -#endif /* _DMA_ATTR_H */ diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 8443bbb5c071..81c5c8d167ad 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -39,7 +39,7 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent); * the arch code to take care of attributes and cache maintenance */ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, - struct dma_attrs *attrs, int prot, dma_addr_t *handle, + unsigned long attrs, int prot, dma_addr_t *handle, void (*flush_page)(struct device *, const void *, phys_addr_t)); void iommu_dma_free(struct device *dev, struct page **pages, size_t size, dma_addr_t *handle); @@ -56,9 +56,9 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * directly as DMA mapping callbacks for simplicity */ void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, unsigned long attrs); int iommu_dma_supported(struct device *dev, u64 mask); int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 71c1b215ef66..66533e18276c 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -5,13 +5,58 @@ #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> -#include <linux/dma-attrs.h> #include <linux/dma-debug.h> #include <linux/dma-direction.h> #include <linux/scatterlist.h> #include <linux/kmemcheck.h> #include <linux/bug.h> +/** + * List of possible attributes associated with a DMA mapping. The semantics + * of each attribute should be defined in Documentation/DMA-attributes.txt. + * + * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute + * forces all pending DMA writes to complete. + */ +#define DMA_ATTR_WRITE_BARRIER (1UL << 0) +/* + * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping + * may be weakly ordered, that is that reads and writes may pass each other. + */ +#define DMA_ATTR_WEAK_ORDERING (1UL << 1) +/* + * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be + * buffered to improve performance. + */ +#define DMA_ATTR_WRITE_COMBINE (1UL << 2) +/* + * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either + * consistent or non-consistent memory as it sees fit. + */ +#define DMA_ATTR_NON_CONSISTENT (1UL << 3) +/* + * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel + * virtual mapping for the allocated buffer. + */ +#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4) +/* + * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of + * the CPU cache for the given buffer assuming that it has been already + * transferred to 'device' domain. + */ +#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5) +/* + * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer + * in physical memory. + */ +#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6) +/* + * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem + * that it's probably not worth the time to try to allocate memory to in a way + * that gives better TLB efficiency. + */ +#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7) + /* * A dma_addr_t can hold any valid DMA or bus address for the platform. * It can be given to a device to use as a DMA source or target. A CPU cannot @@ -21,34 +66,35 @@ struct dma_map_ops { void* (*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, - struct dma_attrs *attrs); + unsigned long attrs); void (*free)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, - struct dma_attrs *attrs); + unsigned long attrs); int (*mmap)(struct device *, struct vm_area_struct *, - void *, dma_addr_t, size_t, struct dma_attrs *attrs); + void *, dma_addr_t, size_t, + unsigned long attrs); int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, - dma_addr_t, size_t, struct dma_attrs *attrs); + dma_addr_t, size_t, unsigned long attrs); dma_addr_t (*map_page)(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); /* * map_sg returns 0 on error and a value > 0 on success. * It should never return a value < 0. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); @@ -123,7 +169,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev) static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); dma_addr_t addr; @@ -142,7 +188,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -158,7 +204,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, */ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); int i, ents; @@ -176,7 +222,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -195,7 +241,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, kmemcheck_mark_initialized(page_address(page) + offset, size); BUG_ON(!valid_dma_direction(dir)); - addr = ops->map_page(dev, page, offset, size, dir, NULL); + addr = ops->map_page(dev, page, offset, size, dir, 0); debug_dma_map_page(dev, page, offset, size, dir, addr, false); return addr; @@ -208,7 +254,7 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, BUG_ON(!valid_dma_direction(dir)); if (ops->unmap_page) - ops->unmap_page(dev, addr, size, dir, NULL); + ops->unmap_page(dev, addr, size, dir, 0); debug_dma_unmap_page(dev, addr, size, dir, false); } @@ -289,10 +335,10 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, } -#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) -#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) -#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) -#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) +#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0) +#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0) +#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0) +#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0) extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); @@ -321,7 +367,7 @@ void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); */ static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); @@ -330,7 +376,7 @@ dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); } -#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) +#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0) int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, @@ -338,7 +384,8 @@ dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, static inline int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, - dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) + dma_addr_t dma_addr, size_t size, + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); BUG_ON(!ops); @@ -348,7 +395,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); } -#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) +#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0) #ifndef arch_dma_alloc_attrs #define arch_dma_alloc_attrs(dev, flag) (true) @@ -356,7 +403,7 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, static inline void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); void *cpu_addr; @@ -378,7 +425,7 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, static inline void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) + unsigned long attrs) { struct dma_map_ops *ops = get_dma_ops(dev); @@ -398,31 +445,27 @@ static inline void dma_free_attrs(struct device *dev, size_t size, static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { - return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); + return dma_alloc_attrs(dev, size, dma_handle, flag, 0); } static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); + return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0); } static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); + return dma_alloc_attrs(dev, size, dma_handle, gfp, + DMA_ATTR_NON_CONSISTENT); } static inline void dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { - DEFINE_DMA_ATTRS(attrs); - - dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); - dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); + dma_free_attrs(dev, size, cpu_addr, dma_handle, + DMA_ATTR_NON_CONSISTENT); } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -646,9 +689,8 @@ static inline void dmam_release_declared_memory(struct device *dev) static inline void *dma_alloc_wc(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t gfp) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); + return dma_alloc_attrs(dev, size, dma_addr, gfp, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_alloc_writecombine #define dma_alloc_writecombine dma_alloc_wc @@ -657,9 +699,8 @@ static inline void *dma_alloc_wc(struct device *dev, size_t size, static inline void dma_free_wc(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_addr) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); + return dma_free_attrs(dev, size, cpu_addr, dma_addr, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_free_writecombine #define dma_free_writecombine dma_free_wc @@ -670,9 +711,8 @@ static inline int dma_mmap_wc(struct device *dev, void *cpu_addr, dma_addr_t dma_addr, size_t size) { - DEFINE_DMA_ATTRS(attrs); - dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); - return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); + return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, + DMA_ATTR_WRITE_COMBINE); } #ifndef dma_mmap_writecombine #define dma_mmap_writecombine dma_mmap_wc diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 017fced60242..5f81f8a187f2 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -6,7 +6,6 @@ #include <linux/types.h> struct device; -struct dma_attrs; struct page; struct scatterlist; @@ -68,10 +67,10 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, @@ -83,12 +82,13 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, extern int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - enum dma_data_direction dir, struct dma_attrs *attrs); + enum dma_data_direction dir, + unsigned long attrs); extern void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, enum dma_data_direction dir, - struct dma_attrs *attrs); + unsigned long attrs); extern void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, |