diff options
author | NeilBrown <neilb@suse.com> | 2016-01-07 11:02:34 +1100 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-01-07 11:06:18 +0530 |
commit | b02bab6b0f928d49dbfb03e1e4e9dd43647623d7 (patch) | |
tree | 1d7648d55adc0d47d6da533f7a96a70aa9cf8ec8 | |
parent | 16605e8d50898ac88b5b504a7fbd63ecdcf37702 (diff) |
async_tx: use GFP_NOWAIT rather than GFP_IO
These async_XX functions are called from md/raid5 in an atomic
section, between get_cpu() and put_cpu(), so they must not sleep.
So use GFP_NOWAIT rather than GFP_IO.
Dan Williams writes: Longer term async_tx needs to be merged into md
directly as we can allocate this unmap data statically per-stripe
rather than per request.
Fixed: 7476bd79fc01 ("async_pq: convert to dmaengine_unmap_data")
Cc: stable@vger.kernel.org (v3.13+)
Reported-and-tested-by: Stanislav Samsonov <slava@annapurnalabs.com>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | crypto/async_tx/async_memcpy.c | 2 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/async_raid6_recov.c | 4 | ||||
-rw-r--r-- | crypto/async_tx/async_xor.c | 4 |
4 files changed, 7 insertions, 7 deletions
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index f8c0b8dbeb75..88bc8e6b2a54 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dmaengine_unmap_data *unmap = NULL; if (device) - unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT); if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 5d355e0c2633..c0748bbd4c08 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); /* XORing P/Q is only implemented in software */ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && @@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, BUG_ON(disks < 4); if (device) - unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned(device, offset, 0, len)) { diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 934a84981495..8fab6275ea1f 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, u8 *a, *b, *c; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { struct device *dev = dma->dev; @@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, u8 *d, *s; if (dma) - unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); + unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT); if (unmap) { dma_addr_t dma_dest[2]; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e1bce26cd4f9..da75777f2b3f 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; @@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, BUG_ON(src_cnt <= 1); if (device) - unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); + unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT); if (unmap && src_cnt <= device->max_xor && is_dma_xor_aligned(device, offset, 0, len)) { |