diff options
author | Yuan Kang <Yuan.Kang@freescale.com> | 2012-06-22 19:48:49 -0500 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2012-06-27 14:42:05 +0800 |
commit | 643b39b031f546c7c3c60ef360b8260aa2b32762 (patch) | |
tree | 71658c6a049937d65b48ebe7bc9a159eb655885a /drivers/crypto | |
parent | b0e09bae37eeacb213d9baf8fcb4d48934a4ada5 (diff) |
crypto: caam - chaining support
support chained scatterlists for aead, ablkcipher and ahash.
Signed-off-by: Yuan Kang <Yuan.Kang@freescale.com>
- fix dma unmap leak
- un-unlikely src == dst, due to experience with AF_ALG
Signed-off-by: Kudupudi Ugendreshwar <B38865@freescale.com>
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 114 | ||||
-rw-r--r-- | drivers/crypto/caam/caamhash.c | 53 | ||||
-rw-r--r-- | drivers/crypto/caam/sg_sw_sec4.h | 44 |
3 files changed, 147 insertions, 64 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 5ab480a12b5..0c1ea8492ef 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -654,8 +654,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, /* * aead_edesc - s/w-extended aead descriptor * @assoc_nents: number of segments in associated data (SPI+Seq) scatterlist + * @assoc_chained: if source is chained * @src_nents: number of segments in input scatterlist + * @src_chained: if source is chained * @dst_nents: number of segments in output scatterlist + * @dst_chained: if destination is chained * @iv_dma: dma address of iv for checking continuity and link table * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) * @sec4_sg_bytes: length of dma mapped sec4_sg space @@ -664,8 +667,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, */ struct aead_edesc { int assoc_nents; + bool assoc_chained; int src_nents; + bool src_chained; int dst_nents; + bool dst_chained; dma_addr_t iv_dma; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; @@ -676,7 +682,9 @@ struct aead_edesc { /* * ablkcipher_edesc - s/w-extended ablkcipher descriptor * @src_nents: number of segments in input scatterlist + * @src_chained: if source is chained * @dst_nents: number of segments in output scatterlist + * @dst_chained: if destination is chained * @iv_dma: dma address of iv for checking continuity and link table * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE) * @sec4_sg_bytes: length of dma mapped sec4_sg space @@ -685,7 +693,9 @@ struct aead_edesc { */ struct ablkcipher_edesc { int src_nents; + bool src_chained; int dst_nents; + bool dst_chained; dma_addr_t iv_dma; int sec4_sg_bytes; dma_addr_t sec4_sg_dma; @@ -694,15 +704,19 @@ struct ablkcipher_edesc { }; static void caam_unmap(struct device *dev, struct scatterlist *src, - struct scatterlist *dst, int src_nents, int dst_nents, + struct scatterlist *dst, int src_nents, + bool src_chained, int dst_nents, bool dst_chained, dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { - if (unlikely(dst != src)) { - dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); - dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); + if (dst != src) { + dma_unmap_sg_chained(dev, src, src_nents ? : 1, DMA_TO_DEVICE, + src_chained); + dma_unmap_sg_chained(dev, dst, dst_nents ? : 1, DMA_FROM_DEVICE, + dst_chained); } else { - dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); + dma_unmap_sg_chained(dev, src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); } if (iv_dma) @@ -719,12 +733,13 @@ static void aead_unmap(struct device *dev, struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); - dma_unmap_sg(dev, req->assoc, edesc->assoc_nents, DMA_TO_DEVICE); + dma_unmap_sg_chained(dev, req->assoc, edesc->assoc_nents, + DMA_TO_DEVICE, edesc->assoc_chained); caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->sec4_sg_dma, - edesc->sec4_sg_bytes); + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void ablkcipher_unmap(struct device *dev, @@ -735,9 +750,9 @@ static void ablkcipher_unmap(struct device *dev, int ivsize = crypto_ablkcipher_ivsize(ablkcipher); caam_unmap(dev, req->src, req->dst, - edesc->src_nents, edesc->dst_nents, - edesc->iv_dma, ivsize, edesc->sec4_sg_dma, - edesc->sec4_sg_bytes); + edesc->src_nents, edesc->src_chained, edesc->dst_nents, + edesc->dst_chained, edesc->iv_dma, ivsize, + edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, @@ -1128,25 +1143,26 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, dma_addr_t iv_dma = 0; int sgc; bool all_contig = true; + bool assoc_chained = false, src_chained = false, dst_chained = false; int ivsize = crypto_aead_ivsize(aead); int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - assoc_nents = sg_count(req->assoc, req->assoclen); - src_nents = sg_count(req->src, req->cryptlen); + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen); + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL, assoc_chained); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* Check if data are contiguous */ @@ -1172,8 +1188,11 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, } edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; edesc->iv_dma = iv_dma; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + @@ -1307,24 +1326,25 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request int sgc; u32 contig = GIV_SRC_CONTIG | GIV_DST_CONTIG; int ivsize = crypto_aead_ivsize(aead); + bool assoc_chained = false, src_chained = false, dst_chained = false; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; - assoc_nents = sg_count(req->assoc, req->assoclen); - src_nents = sg_count(req->src, req->cryptlen); + assoc_nents = sg_count(req->assoc, req->assoclen, &assoc_chained); + src_nents = sg_count(req->src, req->cryptlen, &src_chained); if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->cryptlen); + dst_nents = sg_count(req->dst, req->cryptlen, &dst_chained); - sgc = dma_map_sg(jrdev, req->assoc, assoc_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->assoc, assoc_nents ? : 1, + DMA_BIDIRECTIONAL, assoc_chained); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* Check if data are contiguous */ @@ -1358,8 +1378,11 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request } edesc->assoc_nents = assoc_nents; + edesc->assoc_chained = assoc_chained; edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; edesc->iv_dma = iv_dma; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + @@ -1459,21 +1482,22 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request bool iv_contig = false; int sgc; int ivsize = crypto_ablkcipher_ivsize(ablkcipher); + bool src_chained = false, dst_chained = false; int sec4_sg_index; - src_nents = sg_count(req->src, req->nbytes); + src_nents = sg_count(req->src, req->nbytes, &src_chained); - if (unlikely(req->dst != req->src)) - dst_nents = sg_count(req->dst, req->nbytes); + if (req->dst != req->src) + dst_nents = sg_count(req->dst, req->nbytes, &dst_chained); if (likely(req->src == req->dst)) { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_BIDIRECTIONAL); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_BIDIRECTIONAL, src_chained); } else { - sgc = dma_map_sg(jrdev, req->src, src_nents ? : 1, - DMA_TO_DEVICE); - sgc = dma_map_sg(jrdev, req->dst, dst_nents ? : 1, - DMA_FROM_DEVICE); + sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, src_chained); + sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1, + DMA_FROM_DEVICE, dst_chained); } /* @@ -1497,7 +1521,9 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request } edesc->src_nents = src_nents; + edesc->src_chained = src_chained; edesc->dst_nents = dst_nents; + edesc->dst_chained = dst_chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) + desc_bytes; @@ -1510,7 +1536,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request sec4_sg_index += 1 + src_nents; } - if (unlikely(dst_nents)) { + if (dst_nents) { sg_to_sec4_sg_last(req->dst, dst_nents, edesc->sec4_sg + sec4_sg_index, 0); } diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 7dcf28f4814..895aaf2bca9 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -175,9 +175,10 @@ static inline dma_addr_t buf_map_to_sec4_sg(struct device *jrdev, /* Map req->src and put it in link table */ static inline void src_map_to_sec4_sg(struct device *jrdev, struct scatterlist *src, int src_nents, - struct sec4_sg_entry *sec4_sg) + struct sec4_sg_entry *sec4_sg, + bool chained) { - dma_map_sg(jrdev, src, src_nents, DMA_TO_DEVICE); + dma_map_sg_chained(jrdev, src, src_nents, DMA_TO_DEVICE, chained); sg_to_sec4_sg_last(src, src_nents, sec4_sg, 0); } @@ -563,6 +564,7 @@ badkey: * ahash_edesc - s/w-extended ahash descriptor * @dst_dma: physical mapped address of req->result * @sec4_sg_dma: physical mapped address of h/w link table + * @chained: if source is chained * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space * @sec4_sg: pointer to h/w link table @@ -571,6 +573,7 @@ badkey: struct ahash_edesc { dma_addr_t dst_dma; dma_addr_t sec4_sg_dma; + bool chained; int src_nents; int sec4_sg_bytes; struct sec4_sg_entry *sec4_sg; @@ -582,7 +585,8 @@ static inline void ahash_unmap(struct device *dev, struct ahash_request *req, int dst_len) { if (edesc->src_nents) - dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); + dma_unmap_sg_chained(dev, req->src, edesc->src_nents, + DMA_TO_DEVICE, edesc->chained); if (edesc->dst_dma) dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE); @@ -775,6 +779,7 @@ static int ahash_update_ctx(struct ahash_request *req) dma_addr_t ptr = ctx->sh_desc_update_dma; int src_nents, sec4_sg_bytes, sec4_sg_src_index; struct ahash_edesc *edesc; + bool chained = false; int ret = 0; int sh_len; @@ -783,7 +788,8 @@ static int ahash_update_ctx(struct ahash_request *req) to_hash = in_len - *next_buflen; if (to_hash) { - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen)); + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), + &chained); sec4_sg_src_index = 1 + (*buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -801,6 +807,7 @@ static int ahash_update_ctx(struct ahash_request *req) } edesc->src_nents = src_nents; + edesc->chained = chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; @@ -818,7 +825,8 @@ static int ahash_update_ctx(struct ahash_request *req) if (src_nents) { src_map_to_sec4_sg(jrdev, req->src, src_nents, - edesc->sec4_sg + sec4_sg_src_index); + edesc->sec4_sg + sec4_sg_src_index, + chained); if (*next_buflen) { sg_copy_part(next_buf, req->src, to_hash - *buflen, req->nbytes); @@ -958,10 +966,11 @@ static int ahash_finup_ctx(struct ahash_request *req) int src_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; + bool chained = false; int ret = 0; int sh_len; - src_nents = __sg_count(req->src, req->nbytes); + src_nents = __sg_count(req->src, req->nbytes, &chained); sec4_sg_src_index = 1 + (buflen ? 1 : 0); sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -979,6 +988,7 @@ static int ahash_finup_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->src_nents = src_nents; + edesc->chained = chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; @@ -993,7 +1003,7 @@ static int ahash_finup_ctx(struct ahash_request *req) last_buflen); src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + - sec4_sg_src_index); + sec4_sg_src_index, chained); append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen + req->nbytes, LDST_SGF); @@ -1030,12 +1040,14 @@ static int ahash_digest(struct ahash_request *req) int src_nents, sec4_sg_bytes; dma_addr_t src_dma; struct ahash_edesc *edesc; + bool chained = false; int ret = 0; u32 options; int sh_len; - src_nents = sg_count(req->src, req->nbytes); - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); + src_nents = sg_count(req->src, req->nbytes, &chained); + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, + chained); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ @@ -1050,6 +1062,7 @@ static int ahash_digest(struct ahash_request *req) edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); edesc->src_nents = src_nents; + edesc->chained = chained; sh_len = desc_len(sh_desc); desc = edesc->hw_desc; @@ -1157,6 +1170,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) struct ahash_edesc *edesc; u32 *desc, *sh_desc = ctx->sh_desc_update_first; dma_addr_t ptr = ctx->sh_desc_update_first_dma; + bool chained = false; int ret = 0; int sh_len; @@ -1164,7 +1178,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) to_hash = in_len - *next_buflen; if (to_hash) { - src_nents = __sg_count(req->src, req->nbytes - (*next_buflen)); + src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), + &chained); sec4_sg_bytes = (1 + src_nents) * sizeof(struct sec4_sg_entry); @@ -1181,6 +1196,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) } edesc->src_nents = src_nents; + edesc->chained = chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; @@ -1191,7 +1207,7 @@ static int ahash_update_no_ctx(struct ahash_request *req) state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf, *buflen); src_map_to_sec4_sg(jrdev, req->src, src_nents, - edesc->sec4_sg + 1); + edesc->sec4_sg + 1, chained); if (*next_buflen) { sg_copy_part(next_buf, req->src, to_hash - *buflen, req->nbytes); @@ -1258,10 +1274,11 @@ static int ahash_finup_no_ctx(struct ahash_request *req) int sec4_sg_bytes, sec4_sg_src_index, src_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; + bool chained = false; int sh_len; int ret = 0; - src_nents = __sg_count(req->src, req->nbytes); + src_nents = __sg_count(req->src, req->nbytes, &chained); sec4_sg_src_index = 2; sec4_sg_bytes = (sec4_sg_src_index + src_nents) * sizeof(struct sec4_sg_entry); @@ -1279,6 +1296,7 @@ static int ahash_finup_no_ctx(struct ahash_request *req) init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); edesc->src_nents = src_nents; + edesc->chained = chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; @@ -1289,7 +1307,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req) state->buf_dma, buflen, last_buflen); - src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1); + src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, + chained); append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + req->nbytes, LDST_SGF); @@ -1332,6 +1351,7 @@ static int ahash_update_first(struct ahash_request *req) dma_addr_t src_dma; u32 options; struct ahash_edesc *edesc; + bool chained = false; int ret = 0; int sh_len; @@ -1340,8 +1360,10 @@ static int ahash_update_first(struct ahash_request *req) to_hash = req->nbytes - *next_buflen; if (to_hash) { - src_nents = sg_count(req->src, req->nbytes - (*next_buflen)); - dma_map_sg(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE); + src_nents = sg_count(req->src, req->nbytes - (*next_buflen), + &chained); + dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, + DMA_TO_DEVICE, chained); sec4_sg_bytes = src_nents * sizeof(struct sec4_sg_entry); /* @@ -1357,6 +1379,7 @@ static int ahash_update_first(struct ahash_request *req) } edesc->src_nents = src_nents; + edesc->chained = chained; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) + DESC_JOB_IO_LEN; diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h index 2dda9e3a5e6..e0037c8ee24 100644 --- a/drivers/crypto/caam/sg_sw_sec4.h +++ b/drivers/crypto/caam/sg_sw_sec4.h @@ -37,7 +37,7 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count, dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg), sg_dma_len(sg), offset); sec4_sg_ptr++; - sg = sg_next(sg); + sg = scatterwalk_sg_next(sg); sg_count--; } return sec4_sg_ptr - 1; @@ -56,7 +56,8 @@ static inline void sg_to_sec4_sg_last(struct scatterlist *sg, int sg_count, } /* count number of elements in scatterlist */ -static inline int __sg_count(struct scatterlist *sg_list, int nbytes) +static inline int __sg_count(struct scatterlist *sg_list, int nbytes, + bool *chained) { struct scatterlist *sg = sg_list; int sg_nents = 0; @@ -65,7 +66,7 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes) sg_nents++; nbytes -= sg->length; if (!sg_is_last(sg) && (sg + 1)->length == 0) - BUG(); /* Not support chaining */ + *chained = true; sg = scatterwalk_sg_next(sg); } @@ -73,9 +74,10 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes) } /* derive number of elements in scatterlist, but return 0 for 1 */ -static inline int sg_count(struct scatterlist *sg_list, int nbytes) +static inline int sg_count(struct scatterlist *sg_list, int nbytes, + bool *chained) { - int sg_nents = __sg_count(sg_list, nbytes); + int sg_nents = __sg_count(sg_list, nbytes, chained); if (likely(sg_nents == 1)) return 0; @@ -83,6 +85,38 @@ static inline int sg_count(struct scatterlist *sg_list, int nbytes) return sg_nents; } +static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + bool chained) +{ + if (unlikely(chained)) { + int i; + for (i = 0; i < nents; i++) { + dma_map_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } + } else { + dma_map_sg(dev, sg, nents, dir); + } + return nents; +} + +static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, + unsigned int nents, enum dma_data_direction dir, + bool chained) +{ + if (unlikely(chained)) { + int i; + for (i = 0; i < nents; i++) { + dma_unmap_sg(dev, sg, 1, dir); + sg = scatterwalk_sg_next(sg); + } + } else { + dma_unmap_sg(dev, sg, nents, dir); + } + return nents; +} + /* Copy from len bytes of sg to dest, starting from beginning */ static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) { |