diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-01 14:47:40 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-04-01 14:47:40 -0700 |
commit | 72f35423e8a6a2451c202f52cb8adb92b08592ec (patch) | |
tree | 2cc5c715631a59d51b6445143e03a187e8e394f6 /drivers/crypto/caam/caamalg.c | |
parent | 890f0b0d27dc400679b9a91d04ca44f5ee4c19c0 (diff) | |
parent | fcb90d51c375d09a034993cda262b68499e233a4 (diff) |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"API:
- Fix out-of-sync IVs in self-test for IPsec AEAD algorithms
Algorithms:
- Use formally verified implementation of x86/curve25519
Drivers:
- Enhance hwrng support in caam
- Use crypto_engine for skcipher/aead/rsa/hash in caam
- Add Xilinx AES driver
- Add uacce driver
- Register zip engine to uacce in hisilicon
- Add support for OCTEON TX CPT engine in marvell"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
crypto: af_alg - bool type cosmetics
crypto: arm[64]/poly1305 - add artifact to .gitignore files
crypto: caam - limit single JD RNG output to maximum of 16 bytes
crypto: caam - enable prediction resistance in HRWNG
bus: fsl-mc: add api to retrieve mc version
crypto: caam - invalidate entropy register during RNG initialization
crypto: caam - check if RNG job failed
crypto: caam - simplify RNG implementation
crypto: caam - drop global context pointer and init_done
crypto: caam - use struct hwrng's .init for initialization
crypto: caam - allocate RNG instantiation descriptor with GFP_DMA
crypto: ccree - remove duplicated include from cc_aead.c
crypto: chelsio - remove set but not used variable 'adap'
crypto: marvell - enable OcteonTX cpt options for build
crypto: marvell - add the Virtual Function driver for CPT
crypto: marvell - add support for OCTEON TX CPT engine
crypto: marvell - create common Kconfig and Makefile for Marvell
crypto: arm/neon - memzero_explicit aes-cbc key
crypto: bcm - Use scnprintf() for avoiding potential buffer overflow
crypto: atmel-i2c - Fix wakeup fail
...
Diffstat (limited to 'drivers/crypto/caam/caamalg.c')
-rw-r--r-- | drivers/crypto/caam/caamalg.c | 415 |
1 files changed, 177 insertions, 238 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index ef1a65f4fc92..b7bb7c30adeb 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -56,6 +56,7 @@ #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamalg_desc.h" +#include <crypto/engine.h> /* * crypto alg @@ -101,6 +102,7 @@ struct caam_skcipher_alg { * per-session context */ struct caam_ctx { + struct crypto_engine_ctx enginectx; u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; u8 key[CAAM_MAX_KEY_SIZE]; @@ -114,6 +116,14 @@ struct caam_ctx { unsigned int authsize; }; +struct caam_skcipher_req_ctx { + struct skcipher_edesc *edesc; +}; + +struct caam_aead_req_ctx { + struct aead_edesc *edesc; +}; + static int aead_null_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx(aead); @@ -858,6 +868,7 @@ static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, * @mapped_src_nents: number of segments in input h/w link table * @mapped_dst_nents: number of segments in output h/w link table * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables @@ -868,6 +879,7 @@ struct aead_edesc { int mapped_src_nents; int mapped_dst_nents; int sec4_sg_bytes; + bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; u32 hw_desc[]; @@ -881,6 +893,7 @@ struct aead_edesc { * @mapped_dst_nents: number of segments in output h/w link table * @iv_dma: dma address of iv for checking continuity and link table * @sec4_sg_bytes: length of dma mapped sec4_sg space + * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables @@ -893,9 +906,10 @@ struct skcipher_edesc { int mapped_dst_nents; dma_addr_t iv_dma; int sec4_sg_bytes; + bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; - u32 hw_desc[0]; + u32 hw_desc[]; }; static void caam_unmap(struct device *dev, struct scatterlist *src, @@ -941,37 +955,18 @@ static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } -static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) -{ - struct aead_request *req = context; - struct aead_edesc *edesc; - int ecode = 0; - - dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - - edesc = container_of(desc, struct aead_edesc, hw_desc[0]); - - if (err) - ecode = caam_jr_strstatus(jrdev, err); - - aead_unmap(jrdev, edesc, req); - - kfree(edesc); - - aead_request_complete(req, ecode); -} - -static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { struct aead_request *req = context; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct aead_edesc *edesc; int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - edesc = container_of(desc, struct aead_edesc, hw_desc[0]); + edesc = rctx->edesc; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -980,61 +975,30 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); - aead_request_complete(req, ecode); -} - -static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) -{ - struct skcipher_request *req = context; - struct skcipher_edesc *edesc; - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - int ivsize = crypto_skcipher_ivsize(skcipher); - int ecode = 0; - - dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - - edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); - - if (err) - ecode = caam_jr_strstatus(jrdev, err); - - skcipher_unmap(jrdev, edesc, req); - /* - * The crypto API expects us to set the IV (req->iv) to the last - * ciphertext block (CBC mode) or last counter (CTR mode). - * This is used e.g. by the CTS mode. + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. */ - if (ivsize && !ecode) { - memcpy(req->iv, (u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, - ivsize); - print_hex_dump_debug("dstiv @"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->iv, - edesc->src_nents > 1 ? 100 : ivsize, 1); - } - - caam_dump_sg("dst @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->dst, - edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); - - kfree(edesc); - - skcipher_request_complete(req, ecode); + if (!edesc->bklog) + aead_request_complete(req, ecode); + else + crypto_finalize_aead_request(jrp->engine, req, ecode); } -static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, - void *context) +static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, + void *context) { struct skcipher_request *req = context; struct skcipher_edesc *edesc; + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); + struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); int ivsize = crypto_skcipher_ivsize(skcipher); int ecode = 0; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); - edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]); + edesc = rctx->edesc; if (err) ecode = caam_jr_strstatus(jrdev, err); @@ -1060,7 +1024,14 @@ static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, kfree(edesc); - skcipher_request_complete(req, ecode); + /* + * If no backlog flag, the completion of the request is done + * by CAAM, not crypto engine. + */ + if (!edesc->bklog) + skcipher_request_complete(req, ecode); + else + crypto_finalize_skcipher_request(jrp->engine, req, ecode); } /* @@ -1306,6 +1277,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; @@ -1406,6 +1378,9 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; + + rctx->edesc = edesc; + *all_contig_ptr = !(mapped_src_nents > 1); sec4_sg_index = 0; @@ -1436,41 +1411,34 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, return edesc; } -static int gcm_encrypt(struct aead_request *req) +static int aead_enqueue_req(struct device *jrdev, struct aead_request *req) { - struct aead_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - bool all_contig; - u32 *desc; - int ret = 0; - - /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, true); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - /* Create and submit job descriptor */ - init_gcm_job(req, edesc, all_contig, true); + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + struct aead_edesc *edesc = rctx->edesc; + u32 *desc = edesc->hw_desc; + int ret; - print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_aead_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { aead_unmap(jrdev, edesc, req); - kfree(edesc); + kfree(rctx->edesc); } return ret; } -static int chachapoly_encrypt(struct aead_request *req) +static inline int chachapoly_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -1478,180 +1446,130 @@ static int chachapoly_encrypt(struct aead_request *req) struct device *jrdev = ctx->jrdev; bool all_contig; u32 *desc; - int ret; edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, - true); + encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); desc = edesc->hw_desc; - init_chachapoly_job(req, edesc, all_contig, true); + init_chachapoly_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } - - return ret; + return aead_enqueue_req(jrdev, req); } -static int chachapoly_decrypt(struct aead_request *req) +static int chachapoly_encrypt(struct aead_request *req) { - struct aead_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - bool all_contig; - u32 *desc; - int ret; - - edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, - false); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - desc = edesc->hw_desc; - - init_chachapoly_job(req, edesc, all_contig, false); - print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), - 1); - - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } - - return ret; + return chachapoly_crypt(req, true); } -static int ipsec_gcm_encrypt(struct aead_request *req) +static int chachapoly_decrypt(struct aead_request *req) { - return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); + return chachapoly_crypt(req, false); } -static int aead_encrypt(struct aead_request *req) +static inline int aead_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool all_contig; - u32 *desc; - int ret = 0; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, - &all_contig, true); + &all_contig, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ - init_authenc_job(req, edesc, all_contig, true); + init_authenc_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } + return aead_enqueue_req(jrdev, req); +} - return ret; +static int aead_encrypt(struct aead_request *req) +{ + return aead_crypt(req, true); } -static int gcm_decrypt(struct aead_request *req) +static int aead_decrypt(struct aead_request *req) { - struct aead_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - struct device *jrdev = ctx->jrdev; - bool all_contig; - u32 *desc; - int ret = 0; + return aead_crypt(req, false); +} - /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, false); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); +static int aead_do_one_req(struct crypto_engine *engine, void *areq) +{ + struct aead_request *req = aead_request_cast(areq); + struct caam_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); + struct caam_aead_req_ctx *rctx = aead_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; - /* Create and submit job descriptor*/ - init_gcm_job(req, edesc, all_contig, false); + rctx->edesc->bklog = true; - print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); + ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; + if (ret != -EINPROGRESS) { + aead_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); + ret = 0; } return ret; } -static int ipsec_gcm_decrypt(struct aead_request *req) -{ - return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); -} - -static int aead_decrypt(struct aead_request *req) +static inline int gcm_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx(aead); struct device *jrdev = ctx->jrdev; bool all_contig; - u32 *desc; - int ret = 0; - - caam_dump_sg("dec src@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, req->src, - req->assoclen + req->cryptlen, 1); /* allocate extended descriptor */ - edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, - &all_contig, false); + edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, + encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); - /* Create and submit job descriptor*/ - init_authenc_job(req, edesc, all_contig, false); + /* Create and submit job descriptor */ + init_gcm_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { - aead_unmap(jrdev, edesc, req); - kfree(edesc); - } + return aead_enqueue_req(jrdev, req); +} - return ret; +static int gcm_encrypt(struct aead_request *req) +{ + return gcm_crypt(req, true); +} + +static int gcm_decrypt(struct aead_request *req) +{ + return gcm_crypt(req, false); +} + +static int ipsec_gcm_encrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); +} + +static int ipsec_gcm_decrypt(struct aead_request *req) +{ + return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); } /* @@ -1662,6 +1580,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; @@ -1760,6 +1679,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + desc_bytes); + rctx->edesc = edesc; /* Make sure IV is located in a DMAable area */ if (ivsize) { @@ -1815,49 +1735,35 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, return edesc; } -static int skcipher_encrypt(struct skcipher_request *req) +static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) { - struct skcipher_edesc *edesc; - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct device *jrdev = ctx->jrdev; - u32 *desc; - int ret = 0; - - if (!req->cryptlen) - return 0; - - /* allocate extended descriptor */ - edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); - if (IS_ERR(edesc)) - return PTR_ERR(edesc); - - /* Create and submit job descriptor*/ - init_skcipher_job(req, edesc, true); + struct skcipher_request *req = skcipher_request_cast(areq); + struct caam_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); + struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); + u32 *desc = rctx->edesc->hw_desc; + int ret; - print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, - desc_bytes(edesc->hw_desc), 1); + rctx->edesc->bklog = true; - desc = edesc->hw_desc; - ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req); + ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req); - if (!ret) { - ret = -EINPROGRESS; + if (ret != -EINPROGRESS) { + skcipher_unmap(ctx->jrdev, rctx->edesc, req); + kfree(rctx->edesc); } else { - skcipher_unmap(jrdev, edesc, req); - kfree(edesc); + ret = 0; } return ret; } -static int skcipher_decrypt(struct skcipher_request *req) +static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher); struct device *jrdev = ctx->jrdev; + struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); u32 *desc; int ret = 0; @@ -1870,17 +1776,25 @@ static int skcipher_decrypt(struct skcipher_request *req) return PTR_ERR(edesc); /* Create and submit job descriptor*/ - init_skcipher_job(req, edesc, false); - desc = edesc->hw_desc; + init_skcipher_job(req, edesc, encrypt); print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); - ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req); - if (!ret) { - ret = -EINPROGRESS; - } else { + desc = edesc->hw_desc; + /* + * Only the backlog request are sent to crypto-engine since the others + * can be handled by CAAM, if free, especially since JR has up to 1024 + * entries (more than the 10 entries from crypto-engine). + */ + if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine, + req); + else + ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req); + + if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { skcipher_unmap(jrdev, edesc, req); kfree(edesc); } @@ -1888,6 +1802,16 @@ static int skcipher_decrypt(struct skcipher_request *req) return ret; } +static int skcipher_encrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, true); +} + +static int skcipher_decrypt(struct skcipher_request *req) +{ + return skcipher_crypt(req, false); +} + static struct caam_skcipher_alg driver_algs[] = { { .skcipher = { @@ -3391,6 +3315,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, { dma_addr_t dma_addr; struct caam_drv_private *priv; + const size_t sh_desc_enc_offset = offsetof(struct caam_ctx, + sh_desc_enc); ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { @@ -3406,7 +3332,8 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, offsetof(struct caam_ctx, - sh_desc_enc_dma), + sh_desc_enc_dma) - + sh_desc_enc_offset, ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); @@ -3416,8 +3343,10 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, ctx->sh_desc_enc_dma = dma_addr; ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, - sh_desc_dec); - ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key); + sh_desc_dec) - + sh_desc_enc_offset; + ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) - + sh_desc_enc_offset; /* copy descriptor header template value */ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; @@ -3431,6 +3360,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm) struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); + struct caam_ctx *ctx = crypto_skcipher_ctx(tfm); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); + + ctx->enginectx.op.do_one_request = skcipher_do_one_req; return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam, false); @@ -3443,13 +3377,18 @@ static int caam_aead_init(struct crypto_aead *tfm) container_of(alg, struct caam_aead_alg, aead); struct caam_ctx *ctx = crypto_aead_ctx(tfm); + crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); + + ctx->enginectx.op.do_one_request = aead_do_one_req; + return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } static void caam_exit_common(struct caam_ctx *ctx) { dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, - offsetof(struct caam_ctx, sh_desc_enc_dma), + offsetof(struct caam_ctx, sh_desc_enc_dma) - + offsetof(struct caam_ctx, sh_desc_enc), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } |