diff options
author | Dan Williams <dan.j.williams@intel.com> | 2013-10-18 19:35:22 +0200 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2013-11-13 16:25:06 -0800 |
commit | 56ea27fd61f546117a35236113be72c8aaec382d (patch) | |
tree | 14c7a110d4b7c8ccd8f0e70a5d5551e417d7e5bf /drivers/dma | |
parent | d1cab34c039584ebe76b04d2f2109e0d87d344e1 (diff) |
dmaengine: consolidate memcpy apis
Copying from page to page (dma_async_memcpy_pg_to_pg) is the superset,
make the other two apis use that one in preparation for providing a
common dma unmap implementation. The common implementation just wants
to assume all buffers are mapped with dma_map_page().
Cc: Vinod Koul <vinod.koul@intel.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/dmaengine.c | 137 |
1 files changed, 45 insertions, 92 deletions
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 9162ac80c18f..bbc89df6bc56 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -902,20 +902,23 @@ void dma_async_device_unregister(struct dma_device *device) EXPORT_SYMBOL(dma_async_device_unregister); /** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * dma_async_memcpy_pg_to_pg - offloaded copy from page to page * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) + * @dest_pg: destination page + * @dest_off: offset in page to copy to + * @src_pg: source page + * @src_off: offset in page to copy from * @len: length * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). + * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus + * address according to the DMA mapping API rules for streaming mappings. + * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident + * (kernel memory or locked user space pages). */ dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) +dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, + unsigned int dest_off, struct page *src_pg, unsigned int src_off, + size_t len) { struct dma_device *dev = chan->device; struct dma_async_tx_descriptor *tx; @@ -923,16 +926,15 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, dma_cookie_t cookie; unsigned long flags; - dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); - dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | - DMA_COMPL_SRC_UNMAP_SINGLE | - DMA_COMPL_DEST_UNMAP_SINGLE; + dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); + dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, + DMA_FROM_DEVICE); + flags = DMA_CTRL_ACK; tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); + dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); + dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); return -ENOMEM; } @@ -946,6 +948,29 @@ dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, return cookie; } +EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); + +/** + * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses + * @chan: DMA channel to offload copy to + * @dest: destination address (virtual) + * @src: source address (virtual) + * @len: length + * + * Both @dest and @src must be mappable to a bus address according to the + * DMA mapping API rules for streaming mappings. + * Both @dest and @src must stay memory resident (kernel memory or locked + * user space pages). + */ +dma_cookie_t +dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, + void *src, size_t len) +{ + return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), + (unsigned long) dest & ~PAGE_MASK, + virt_to_page(src), + (unsigned long) src & ~PAGE_MASK, len); +} EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); /** @@ -963,86 +988,14 @@ EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); */ dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) + unsigned int offset, void *kdata, size_t len) { - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; - - dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); - - if (!tx) { - dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; - } - - tx->callback = NULL; - cookie = tx->tx_submit(tx); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; + return dma_async_memcpy_pg_to_pg(chan, page, offset, + virt_to_page(kdata), + (unsigned long) kdata & ~PAGE_MASK, len); } EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -dma_cookie_t -dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, - unsigned int dest_off, struct page *src_pg, unsigned int src_off, - size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - dma_addr_t dma_dest, dma_src; - dma_cookie_t cookie; - unsigned long flags; - - dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); - dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); - flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); - - if (!tx) { - dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); - dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); - return -ENOMEM; - } - - tx->callback = NULL; - cookie = tx->tx_submit(tx); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); - void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { |