diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-07 20:39:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-07 20:39:25 -0400 |
commit | d0cd84817c745655428dbfdb1e3f754230b46bef (patch) | |
tree | a7b6f422f6ac50f506ffa7a66f8e83387f90f212 /drivers | |
parent | bdf428feb225229b1d4715b45bbdad4a934cd89c (diff) | |
parent | 3f334078567245429540e6461c81c749fce87f70 (diff) |
Merge tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine
Pull dmaengine updates from Dan Williams:
"Even though this has fixes marked for -stable, given the size and the
needed conflict resolutions this is 3.18-rc1/merge-window material.
These patches have been languishing in my tree for a long while. The
fact that I do not have the time to do proper/prompt maintenance of
this tree is a primary factor in the decision to step down as
dmaengine maintainer. That and the fact that the bulk of drivers/dma/
activity is going through Vinod these days.
The net_dma removal has not been in -next. It has developed simple
conflicts against mainline and net-next (for-3.18).
Continuing thanks to Vinod for staying on top of drivers/dma/.
Summary:
1/ Step down as dmaengine maintainer see commit 08223d80df38
"dmaengine maintainer update"
2/ Removal of net_dma, as it has been marked 'broken' since 3.13
(commit 77873803363c "net_dma: mark broken"), without reports of
performance regression.
3/ Miscellaneous fixes"
* tag 'dmaengine-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
net: make tcp_cleanup_rbuf private
net_dma: revert 'copied_early'
net_dma: simple removal
dmaengine maintainer update
dmatest: prevent memory leakage on error path in thread
ioat: Use time_before_jiffies()
dmaengine: fix xor sources continuation
dma: mv_xor: Rename __mv_xor_slot_cleanup() to mv_xor_slot_cleanup()
dma: mv_xor: Remove all callers of mv_xor_slot_cleanup()
dma: mv_xor: Remove unneeded mv_xor_clean_completed_slots() call
ioat: Use pci_enable_msix_exact() instead of pci_enable_msix()
drivers: dma: Include appropriate header file in dca.c
drivers: dma: Mark functions as static in dma_v3.c
dma: mv_xor: Add DMA API error checks
ioat/dca: Use dev_is_pci() to check whether it is pci device
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/dma/Kconfig | 12 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 104 | ||||
-rw-r--r-- | drivers/dma/dmatest.c | 4 | ||||
-rw-r--r-- | drivers/dma/ioat/dca.c | 13 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 3 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 7 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 4 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 7 | ||||
-rw-r--r-- | drivers/dma/iovlock.c | 280 | ||||
-rw-r--r-- | drivers/dma/mv_xor.c | 80 |
11 files changed, 78 insertions, 437 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 9b1ea0ef59af..a016490c95ae 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -427,18 +427,6 @@ config DMA_OF comment "DMA Clients" depends on DMA_ENGINE -config NET_DMA - bool "Network: TCP receive copy offload" - depends on DMA_ENGINE && NET - default (INTEL_IOATDMA || FSL_DMA) - depends on BROKEN - help - This enables the use of DMA engines in the network stack to - offload receive copy-to-user operations, freeing CPU cycles. - - Say Y here if you enabled INTEL_IOATDMA or FSL_DMA, otherwise - say N. - config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c6adb925f0b9..cb626c179911 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -6,7 +6,6 @@ obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o -obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index d5d30ed863ce..24bfaf0b92ba 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1081,110 +1081,6 @@ dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) } EXPORT_SYMBOL(dmaengine_get_unmap_data); -/** - * dma_async_memcpy_pg_to_pg - offloaded copy from page to page - * @chan: DMA channel to offload copy to - * @dest_pg: destination page - * @dest_off: offset in page to copy to - * @src_pg: source page - * @src_off: offset in page to copy from - * @len: length - * - * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus - * address according to the DMA mapping API rules for streaming mappings. - * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident - * (kernel memory or locked user space pages). - */ -dma_cookie_t -dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, - unsigned int dest_off, struct page *src_pg, unsigned int src_off, - size_t len) -{ - struct dma_device *dev = chan->device; - struct dma_async_tx_descriptor *tx; - struct dmaengine_unmap_data *unmap; - dma_cookie_t cookie; - unsigned long flags; - - unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT); - if (!unmap) - return -ENOMEM; - - unmap->to_cnt = 1; - unmap->from_cnt = 1; - unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len, - DMA_TO_DEVICE); - unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len, - DMA_FROM_DEVICE); - unmap->len = len; - flags = DMA_CTRL_ACK; - tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0], - len, flags); - - if (!tx) { - dmaengine_unmap_put(unmap); - return -ENOMEM; - } - - dma_set_unmap(tx, unmap); - cookie = tx->tx_submit(tx); - dmaengine_unmap_put(unmap); - - preempt_disable(); - __this_cpu_add(chan->local->bytes_transferred, len); - __this_cpu_inc(chan->local->memcpy_count); - preempt_enable(); - - return cookie; -} -EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); - -/** - * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses - * @chan: DMA channel to offload copy to - * @dest: destination address (virtual) - * @src: source address (virtual) - * @len: length - * - * Both @dest and @src must be mappable to a bus address according to the - * DMA mapping API rules for streaming mappings. - * Both @dest and @src must stay memory resident (kernel memory or locked - * user space pages). - */ -dma_cookie_t -dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, - void *src, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest), - (unsigned long) dest & ~PAGE_MASK, - virt_to_page(src), - (unsigned long) src & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); - -/** - * dma_async_memcpy_buf_to_pg - offloaded copy from address to page - * @chan: DMA channel to offload copy to - * @page: destination page - * @offset: offset in page to copy to - * @kdata: source address (virtual) - * @len: length - * - * Both @page/@offset and @kdata must be mappable to a bus address according - * to the DMA mapping API rules for streaming mappings. - * Both @page/@offset and @kdata must stay memory resident (kernel memory or - * locked user space pages) - */ -dma_cookie_t -dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, - unsigned int offset, void *kdata, size_t len) -{ - return dma_async_memcpy_pg_to_pg(chan, page, offset, - virt_to_page(kdata), - (unsigned long) kdata & ~PAGE_MASK, len); -} -EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); - void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, struct dma_chan *chan) { diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index e27cec25c59e..a8d7809e2f4c 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -688,14 +688,14 @@ static int dmatest_func(void *data) runtime = ktime_us_delta(ktime_get(), ktime); ret = 0; +err_dstbuf: for (i = 0; thread->dsts[i]; i++) kfree(thread->dsts[i]); -err_dstbuf: kfree(thread->dsts); err_dsts: +err_srcbuf: for (i = 0; thread->srcs[i]; i++) kfree(thread->srcs[i]); -err_srcbuf: kfree(thread->srcs); err_srcs: kfree(pq_coefs); diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index 9e84d5bc9307..3b55bb8d969a 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -35,6 +35,7 @@ #include "dma.h" #include "registers.h" +#include "dma_v2.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 @@ -147,7 +148,7 @@ static int ioat_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 id; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -179,7 +180,7 @@ static int ioat_dca_remove_requester(struct dca_provider *dca, int i; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); @@ -320,7 +321,7 @@ static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -354,7 +355,7 @@ static int ioat2_dca_remove_requester(struct dca_provider *dca, u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); @@ -496,7 +497,7 @@ static int ioat3_dca_add_requester(struct dca_provider *dca, struct device *dev) u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); id = dcaid_from_pcidev(pdev); @@ -530,7 +531,7 @@ static int ioat3_dca_remove_requester(struct dca_provider *dca, u16 global_req_table; /* This implementation only supports PCI-Express */ - if (dev->bus != &pci_bus_type) + if (!dev_is_pci(dev)) return -ENODEV; pdev = to_pci_dev(dev); diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 4e3549a16132..940c1502a8b5 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -947,7 +947,7 @@ msix: for (i = 0; i < msixcnt; i++) device->msix_entries[i].entry = i; - err = pci_enable_msix(pdev, device->msix_entries, msixcnt); + err = pci_enable_msix_exact(pdev, device->msix_entries, msixcnt); if (err) goto msi; @@ -1222,7 +1222,6 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(4096); err = ioat_register(device); if (err) return err; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e982f00a9843..d63f68b1aa35 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -214,13 +214,6 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, #define dump_desc_dbg(c, d) \ ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) -static inline void ioat_set_tcp_copy_break(unsigned long copybreak) -{ - #ifdef CONFIG_NET_DMA - sysctl_tcp_dma_copybreak = copybreak; - #endif -} - static inline struct ioat_chan_common * ioat_chan_by_index(struct ioatdma_device *device, int index) { diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 8d1058085eeb..695483e6be32 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -735,7 +735,8 @@ int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) * called under bh_disabled so we need to trigger the timer * event directly */ - if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { + if (time_is_before_jiffies(chan->timer.expires) + && timer_pending(&chan->timer)) { struct ioatdma_device *device = chan->device; mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); @@ -899,7 +900,6 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(2048); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index b9b38a1cf92f..895f869d6c2c 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -740,7 +740,7 @@ ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) @@ -1091,7 +1091,7 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, } } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) @@ -1133,7 +1133,7 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags); } -struct dma_async_tx_descriptor * +static struct dma_async_tx_descriptor * ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) @@ -1655,7 +1655,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) err = ioat_probe(device); if (err) return err; - ioat_set_tcp_copy_break(262144); list_for_each_entry(c, &dma->channels, device_node) { chan = to_chan_common(c); diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c deleted file mode 100644 index bb48a57c2fc1..000000000000 --- a/drivers/dma/iovlock.c +++ /dev/null @@ -1,280 +0,0 @@ -/* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. - * Portions based on net/core/datagram.c and copyrighted by their authors. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 2 of the License, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 - * Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * The full GNU General Public License is included in this distribution in the - * file called COPYING. - */ - -/* - * This code allows the net stack to make use of a DMA engine for - * skb to iovec copies. - */ - -#include <linux/dmaengine.h> -#include <linux/pagemap.h> -#include <linux/slab.h> -#include <net/tcp.h> /* for memcpy_toiovec */ -#include <asm/io.h> -#include <asm/uaccess.h> - -static int num_pages_spanned(struct iovec *iov) -{ - return - ((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - - ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT); -} - -/* - * Pin down all the iovec pages needed for len bytes. - * Return a struct dma_pinned_list to keep track of pages pinned down. - * - * We are allocating a single chunk of memory, and then carving it up into - * 3 sections, the latter 2 whose size depends on the number of iovecs and the - * total number of pages, respectively. - */ -struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len) -{ - struct dma_pinned_list *local_list; - struct page **pages; - int i; - int ret; - int nr_iovecs = 0; - int iovec_len_used = 0; - int iovec_pages_used = 0; - - /* don't pin down non-user-based iovecs */ - if (segment_eq(get_fs(), KERNEL_DS)) - return NULL; - - /* determine how many iovecs/pages there are, up front */ - do { - iovec_len_used += iov[nr_iovecs].iov_len; - iovec_pages_used += num_pages_spanned(&iov[nr_iovecs]); - nr_iovecs++; - } while (iovec_len_used < len); - - /* single kmalloc for pinned list, page_list[], and the page arrays */ - local_list = kmalloc(sizeof(*local_list) - + (nr_iovecs * sizeof (struct dma_page_list)) - + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL); - if (!local_list) - goto out; - - /* list of pages starts right after the page list array */ - pages = (struct page **) &local_list->page_list[nr_iovecs]; - - local_list->nr_iovecs = 0; - - for (i = 0; i < nr_iovecs; i++) { - struct dma_page_list *page_list = &local_list->page_list[i]; - - len -= iov[i].iov_len; - - if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) - goto unpin; - - page_list->nr_pages = num_pages_spanned(&iov[i]); - page_list->base_address = iov[i].iov_base; - - page_list->pages = pages; - pages += page_list->nr_pages; - - /* pin pages down */ - down_read(¤t->mm->mmap_sem); - ret = get_user_pages( - current, - current->mm, - (unsigned long) iov[i].iov_base, - page_list->nr_pages, - 1, /* write */ - 0, /* force */ - page_list->pages, - NULL); - up_read(¤t->mm->mmap_sem); - - if (ret != page_list->nr_pages) - goto unpin; - - local_list->nr_iovecs = i + 1; - } - - return local_list; - -unpin: - dma_unpin_iovec_pages(local_list); -out: - return NULL; -} - -void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list) -{ - int i, j; - - if (!pinned_list) - return; - - for (i = 0; i < pinned_list->nr_iovecs; i++) { - struct dma_page_list *page_list = &pinned_list->page_list[i]; - for (j = 0; j < page_list->nr_pages; j++) { - set_page_dirty_lock(page_list->pages[j]); - page_cache_release(page_list->pages[j]); - } - } - - kfree(pinned_list); -} - - -/* - * We have already pinned down the pages we will be using in the iovecs. - * Each entry in iov array has corresponding entry in pinned_list->page_list. - * Using array indexing to keep iov[] and page_list[] in sync. - * Initial elements in iov array's iov->iov_len will be 0 if already copied into - * by another call. - * iov array length remaining guaranteed to be bigger than len. - */ -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len) -{ - int iov_byte_offset; - int copy; - dma_cookie_t dma_cookie = 0; - int iovec_idx; - int page_idx; - - if (!chan) - return memcpy_toiovec(iov, kdata, len); - - iovec_idx = 0; - while (iovec_idx < pinned_list->nr_iovecs) { - struct dma_page_list *page_list; - - /* skip already used-up iovecs */ - while (!iov[iovec_idx].iov_len) - iovec_idx++; - - page_list = &pinned_list->page_list[iovec_idx]; - - iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK); - page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK) - - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT; - - /* break up copies to not cross page boundary */ - while (iov[iovec_idx].iov_len) { - copy = min_t(int, PAGE_SIZE - iov_byte_offset, len); - copy = min_t(int, copy, iov[iovec_idx].iov_len); - - dma_cookie = dma_async_memcpy_buf_to_pg(chan, - page_list->pages[page_idx], - iov_byte_offset, - kdata, - copy); - /* poll for a descriptor slot */ - if (unlikely(dma_cookie < 0)) { - dma_async_issue_pending(chan); - continue; - } - - len -= copy; - iov[iovec_idx].iov_len -= copy; - iov[iovec_idx].iov_base += copy; - - if (!len) - return dma_cookie; - - kdata += copy; - iov_byte_offset = 0; - page_idx++; - } - iovec_idx++; - } - - /* really bad if we ever run out of iovecs */ - BUG(); - return -EFAULT; -} - -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, - struct dma_pinned_list *pinned_list, struct page *page, - unsigned int offset, size_t len) -{ - int iov_byte_offset; - int copy; - dma_cookie_t dma_cookie = 0; - int iovec_idx; - int page_idx; - int err; - - /* this needs as-yet-unimplemented buf-to-buff, so punt. */ - /* TODO: use dma for this */ - if (!chan || !pinned_list) { - u8 *vaddr = kmap(page); - err = memcpy_toiovec(iov, vaddr + offset, len); - kunmap(page); - return err; - } - - iovec_idx = 0; - while (iovec_idx < pinned_list->nr_iovecs) { - struct dma_page_list *page_list; - - /* skip already used-up iovecs */ - while (!iov[iovec_idx].iov_len) - iovec_idx++; - - page_list = &pinned_list->page_list[iovec_idx]; - - iov_byte_offset = ((unsigned long)iov[iovec_idx].iov_base & ~PAGE_MASK); - page_idx = (((unsigned long)iov[iovec_idx].iov_base & PAGE_MASK) - - ((unsigned long)page_list->base_address & PAGE_MASK)) >> PAGE_SHIFT; - - /* break up copies to not cross page boundary */ - while (iov[iovec_idx].iov_len) { - copy = min_t(int, PAGE_SIZE - iov_byte_offset, len); - copy = min_t(int, copy, iov[iovec_idx].iov_len); - - dma_cookie = dma_async_memcpy_pg_to_pg(chan, - page_list->pages[page_idx], - iov_byte_offset, - page, - offset, - copy); - /* poll for a descriptor slot */ - if (unlikely(dma_cookie < 0)) { - dma_async_issue_pending(chan); - continue; - } - - len -= copy; - iov[iovec_idx].iov_len -= copy; - iov[iovec_idx].iov_base += copy; - - if (!len) - return dma_cookie; - - offset += copy; - iov_byte_offset = 0; - page_idx++; - } - iovec_idx++; - } - - /* really bad if we ever run out of iovecs */ - BUG(); - return -EFAULT; -} diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 394cbc5c93e3..7938272f2edf 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -310,7 +310,8 @@ mv_xor_clean_slot(struct mv_xor_desc_slot *desc, return 0; } -static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) +/* This function must be called with the mv_xor_chan spinlock held */ +static void mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) { struct mv_xor_desc_slot *iter, *_iter; dma_cookie_t cookie = 0; @@ -366,18 +367,13 @@ static void __mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) mv_chan->dmachan.completed_cookie = cookie; } -static void -mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan) -{ - spin_lock_bh(&mv_chan->lock); - __mv_xor_slot_cleanup(mv_chan); - spin_unlock_bh(&mv_chan->lock); -} - static void mv_xor_tasklet(unsigned long data) { struct mv_xor_chan *chan = (struct mv_xor_chan *) data; + + spin_lock_bh(&chan->lock); mv_xor_slot_cleanup(chan); + spin_unlock_bh(&chan->lock); } static struct mv_xor_desc_slot * @@ -656,9 +652,10 @@ static void mv_xor_free_chan_resources(struct dma_chan *chan) struct mv_xor_desc_slot *iter, *_iter; int in_use_descs = 0; + spin_lock_bh(&mv_chan->lock); + mv_xor_slot_cleanup(mv_chan); - spin_lock_bh(&mv_chan->lock); list_for_each_entry_safe(iter, _iter, &mv_chan->chain, chain_node) { in_use_descs++; @@ -700,11 +697,12 @@ static enum dma_status mv_xor_status(struct dma_chan *chan, enum dma_status ret; ret = dma_cookie_status(chan, cookie, txstate); - if (ret == DMA_COMPLETE) { - mv_xor_clean_completed_slots(mv_chan); + if (ret == DMA_COMPLETE) return ret; - } + + spin_lock_bh(&mv_chan->lock); mv_xor_slot_cleanup(mv_chan); + spin_unlock_bh(&mv_chan->lock); return dma_cookie_status(chan, cookie, txstate); } @@ -782,7 +780,7 @@ static void mv_xor_issue_pending(struct dma_chan *chan) static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) { - int i; + int i, ret; void *src, *dest; dma_addr_t src_dma, dest_dma; struct dma_chan *dma_chan; @@ -819,19 +817,44 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan) src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0, PAGE_SIZE, DMA_TO_DEVICE); - unmap->to_cnt = 1; unmap->addr[0] = src_dma; + ret = dma_mapping_error(dma_chan->device->dev, src_dma); + if (ret) { + err = -ENOMEM; + goto free_resources; + } + unmap->to_cnt = 1; + dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0, PAGE_SIZE, DMA_FROM_DEVICE); - unmap->from_cnt = 1; unmap->addr[1] = dest_dma; + ret = dma_mapping_error(dma_chan->device->dev, dest_dma); + if (ret) { + err = -ENOMEM; + goto free_resources; + } + unmap->from_cnt = 1; unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma, PAGE_SIZE, 0); + if (!tx) { + dev_err(dma_chan->device->dev, + "Self-test cannot prepare operation, disabling\n"); + err = -ENODEV; + goto free_resources; + } + cookie = mv_xor_tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(dma_chan->device->dev, + "Self-test submit error, disabling\n"); + err = -ENODEV; + goto free_resources; + } + mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(1); @@ -866,7 +889,7 @@ out: static int mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) { - int i, src_idx; + int i, src_idx, ret; struct page *dest; struct page *xor_srcs[MV_XOR_NUM_SRC_TEST]; dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST]; @@ -929,19 +952,42 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan) unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); dma_srcs[i] = unmap->addr[i]; + ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[i]); + if (ret) { + err = -ENOMEM; + goto free_resources; + } unmap->to_cnt++; } unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); dest_dma = unmap->addr[src_count]; + ret = dma_mapping_error(dma_chan->device->dev, unmap->addr[src_count]); + if (ret) { + err = -ENOMEM; + goto free_resources; + } unmap->from_cnt = 1; unmap->len = PAGE_SIZE; tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs, src_count, PAGE_SIZE, 0); + if (!tx) { + dev_err(dma_chan->device->dev, + "Self-test cannot prepare operation, disabling\n"); + err = -ENODEV; + goto free_resources; + } cookie = mv_xor_tx_submit(tx); + if (dma_submit_error(cookie)) { + dev_err(dma_chan->device->dev, + "Self-test submit error, disabling\n"); + err = -ENODEV; + goto free_resources; + } + mv_xor_issue_pending(dma_chan); async_tx_ack(tx); msleep(8); |