diff options
author | SrujanaChalla <schalla@marvell.com> | 2020-03-13 17:17:05 +0530 |
---|---|---|
committer | Herbert Xu <herbert@gondor.apana.org.au> | 2020-03-20 14:36:51 +1100 |
commit | 655ff1a1a727f9b83df317c4ad3b2f4a48f6206c (patch) | |
tree | 72fefb2a5c3f701ae659fbac146d5423c4d861a2 | |
parent | 82ff493eb77cfd28fbfea6ce29bdaaffe238c0b2 (diff) |
crypto: marvell - create common Kconfig and Makefile for Marvell
Creats common Kconfig and Makefile for Marvell crypto drivers.
Signed-off-by: SrujanaChalla <schalla@marvell.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
-rw-r--r-- | drivers/crypto/Kconfig | 15 | ||||
-rw-r--r-- | drivers/crypto/Makefile | 2 | ||||
-rw-r--r-- | drivers/crypto/marvell/Kconfig | 21 | ||||
-rw-r--r-- | drivers/crypto/marvell/Makefile | 6 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/Makefile | 3 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/cesa.c (renamed from drivers/crypto/marvell/cesa.c) | 0 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/cesa.h (renamed from drivers/crypto/marvell/cesa.h) | 5 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/cipher.c (renamed from drivers/crypto/marvell/cipher.c) | 15 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/hash.c (renamed from drivers/crypto/marvell/hash.c) | 38 | ||||
-rw-r--r-- | drivers/crypto/marvell/cesa/tdma.c (renamed from drivers/crypto/marvell/tdma.c) | 10 |
10 files changed, 69 insertions, 46 deletions
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index f81d4cf86143..2c887e4d005a 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -233,20 +233,6 @@ config CRYPTO_CRC32_S390 It is available with IBM z13 or later. -config CRYPTO_DEV_MARVELL_CESA - tristate "Marvell's Cryptographic Engine driver" - depends on PLAT_ORION || ARCH_MVEBU - select CRYPTO_LIB_AES - select CRYPTO_LIB_DES - select CRYPTO_SKCIPHER - select CRYPTO_HASH - select SRAM - help - This driver allows you to utilize the Cryptographic Engines and - Security Accelerator (CESA) which can be found on MVEBU and ORION - platforms. - This driver supports CPU offload through DMA transfers. - config CRYPTO_DEV_NIAGARA2 tristate "Niagara2 Stream Processing Unit driver" select CRYPTO_LIB_DES @@ -606,6 +592,7 @@ config CRYPTO_DEV_MXS_DCP source "drivers/crypto/qat/Kconfig" source "drivers/crypto/cavium/cpt/Kconfig" source "drivers/crypto/cavium/nitrox/Kconfig" +source "drivers/crypto/marvell/Kconfig" config CRYPTO_DEV_CAVIUM_ZIP tristate "Cavium ZIP driver" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d505d16ec00e..944ed7226e37 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -18,7 +18,7 @@ obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o -obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell/ +obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/ obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o diff --git a/drivers/crypto/marvell/Kconfig b/drivers/crypto/marvell/Kconfig new file mode 100644 index 000000000000..8262b14c6287 --- /dev/null +++ b/drivers/crypto/marvell/Kconfig @@ -0,0 +1,21 @@ +# +# Marvell crypto drivers configuration +# + +config CRYPTO_DEV_MARVELL + tristate + +config CRYPTO_DEV_MARVELL_CESA + tristate "Marvell's Cryptographic Engine driver" + depends on PLAT_ORION || ARCH_MVEBU + select CRYPTO_LIB_AES + select CRYPTO_LIB_DES + select CRYPTO_SKCIPHER + select CRYPTO_HASH + select SRAM + select CRYPTO_DEV_MARVELL + help + This driver allows you to utilize the Cryptographic Engines and + Security Accelerator (CESA) which can be found on MVEBU and ORION + platforms. + This driver supports CPU offload through DMA transfers. diff --git a/drivers/crypto/marvell/Makefile b/drivers/crypto/marvell/Makefile index b27cab65e696..2030b0b55e91 100644 --- a/drivers/crypto/marvell/Makefile +++ b/drivers/crypto/marvell/Makefile @@ -1,3 +1,3 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o -marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += cesa/ diff --git a/drivers/crypto/marvell/cesa/Makefile b/drivers/crypto/marvell/cesa/Makefile new file mode 100644 index 000000000000..b27cab65e696 --- /dev/null +++ b/drivers/crypto/marvell/cesa/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CRYPTO_DEV_MARVELL_CESA) += marvell-cesa.o +marvell-cesa-objs := cesa.o cipher.o hash.o tdma.o diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa/cesa.c index 8a5f0b0bdf77..8a5f0b0bdf77 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa/cesa.c diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index f1ed3b85c0d2..e8632d5f343f 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -436,7 +436,7 @@ struct mv_cesa_dev { * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing * @chain: list of the current tdma descriptors being processed - * by this engine. + * by this engine. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information. @@ -467,7 +467,7 @@ struct mv_cesa_engine { * @step: launch the crypto operation on the next chunk * @cleanup: cleanup the crypto request (release associated data) * @complete: complete the request, i.e copy result or context from sram when - * needed. + * needed. */ struct mv_cesa_req_ops { int (*process)(struct crypto_async_request *req, u32 status); @@ -734,6 +734,7 @@ static inline struct mv_cesa_engine *mv_cesa_select_engine(int weight) for (i = 0; i < cesa_dev->caps->nengines; i++) { struct mv_cesa_engine *engine = cesa_dev->engines + i; u32 load = atomic_read(&engine->load); + if (load < min_load) { min_load = load; selected = engine; diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cesa/cipher.c index c24f34a48cef..f133c2ccb5ae 100644 --- a/drivers/crypto/marvell/cipher.c +++ b/drivers/crypto/marvell/cesa/cipher.c @@ -106,8 +106,8 @@ static void mv_cesa_skcipher_std_step(struct skcipher_request *req) mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); - BUG_ON(readl(engine->regs + CESA_SA_CMD) & - CESA_SA_CMD_EN_CESA_SA_ACCL0); + WARN_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } @@ -178,6 +178,7 @@ static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req, { struct skcipher_request *skreq = skcipher_request_cast(req); struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq); + creq->base.engine = engine; if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) @@ -336,7 +337,8 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, do { struct mv_cesa_op_ctx *op; - op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags); + op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, + flags); if (IS_ERR(op)) { ret = PTR_ERR(op); goto err_free_tdma; @@ -365,9 +367,10 @@ static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req, } while (mv_cesa_skcipher_req_iter_next_op(&iter)); /* Add output data for IV */ - ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET, - CESA_SA_DATA_SRAM_OFFSET, - CESA_TDMA_SRC_IN_SRAM, flags); + ret = mv_cesa_dma_add_result_op(&basereq->chain, + CESA_SA_CFG_SRAM_OFFSET, + CESA_SA_DATA_SRAM_OFFSET, + CESA_TDMA_SRC_IN_SRAM, flags); if (ret) goto err_free_tdma; diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/cesa/hash.c index a2b35fb0fb89..b971284332b6 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/cesa/hash.c @@ -141,9 +141,11 @@ static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf) if (creq->algo_le) { __le64 bits = cpu_to_le64(creq->len << 3); + memcpy(buf + padlen, &bits, sizeof(bits)); } else { __be64 bits = cpu_to_be64(creq->len << 3); + memcpy(buf + padlen, &bits, sizeof(bits)); } @@ -168,7 +170,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) if (!sreq->offset) { digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); for (i = 0; i < digsize / 4; i++) - writel_relaxed(creq->state[i], engine->regs + CESA_IVDIG(i)); + writel_relaxed(creq->state[i], + engine->regs + CESA_IVDIG(i)); } if (creq->cache_ptr) @@ -245,8 +248,8 @@ static void mv_cesa_ahash_std_step(struct ahash_request *req) mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE); writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG); - BUG_ON(readl(engine->regs + CESA_SA_CMD) & - CESA_SA_CMD_EN_CESA_SA_ACCL0); + WARN_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } @@ -329,11 +332,12 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq)); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ && - (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_RESULT) { + (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) == + CESA_TDMA_RESULT) { __le32 *data = NULL; /* - * Result is already in the correct endianess when the SA is + * Result is already in the correct endianness when the SA is * used */ data = creq->base.chain.last->op->ctx.hash.hash; @@ -347,9 +351,9 @@ static void mv_cesa_ahash_complete(struct crypto_async_request *req) CESA_IVDIG(i)); if (creq->last_req) { /* - * Hardware's MD5 digest is in little endian format, but - * SHA in big endian format - */ + * Hardware's MD5 digest is in little endian format, but + * SHA in big endian format + */ if (creq->algo_le) { __le32 *result = (void *)ahashreq->result; @@ -439,7 +443,8 @@ static bool mv_cesa_ahash_cache_req(struct ahash_request *req) struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); bool cached = false; - if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && !creq->last_req) { + if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE && + !creq->last_req) { cached = true; if (!req->nbytes) @@ -648,7 +653,8 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (!mv_cesa_ahash_req_iter_next_op(&iter)) break; - op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl, + op = mv_cesa_dma_add_frag(&basereq->chain, + &creq->op_tmpl, frag_len, flags); if (IS_ERR(op)) { ret = PTR_ERR(op); @@ -920,7 +926,7 @@ struct ahash_alg mv_md5_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), .cra_init = mv_cesa_ahash_cra_init, .cra_module = THIS_MODULE, - } + } } }; @@ -990,7 +996,7 @@ struct ahash_alg mv_sha1_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), .cra_init = mv_cesa_ahash_cra_init, .cra_module = THIS_MODULE, - } + } } }; @@ -1063,7 +1069,7 @@ struct ahash_alg mv_sha256_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx), .cra_init = mv_cesa_ahash_cra_init, .cra_module = THIS_MODULE, - } + } } }; @@ -1297,7 +1303,7 @@ struct ahash_alg mv_ahmac_md5_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), .cra_init = mv_cesa_ahmac_cra_init, .cra_module = THIS_MODULE, - } + } } }; @@ -1367,7 +1373,7 @@ struct ahash_alg mv_ahmac_sha1_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), .cra_init = mv_cesa_ahmac_cra_init, .cra_module = THIS_MODULE, - } + } } }; @@ -1437,6 +1443,6 @@ struct ahash_alg mv_ahmac_sha256_alg = { .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx), .cra_init = mv_cesa_ahmac_cra_init, .cra_module = THIS_MODULE, - } + } } }; diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/cesa/tdma.c index 45939d53e8d6..b81ee276fe0e 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/cesa/tdma.c @@ -50,8 +50,8 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq) engine->regs + CESA_SA_CFG); writel_relaxed(dreq->chain.first->cur_dma, engine->regs + CESA_TDMA_NEXT_ADDR); - BUG_ON(readl(engine->regs + CESA_SA_CMD) & - CESA_SA_CMD_EN_CESA_SA_ACCL0); + WARN_ON(readl(engine->regs + CESA_SA_CMD) & + CESA_SA_CMD_EN_CESA_SA_ACCL0); writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); } @@ -175,8 +175,10 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) break; } - /* Save the last request in error to engine->req, so that the core - * knows which request was fautly */ + /* + * Save the last request in error to engine->req, so that the core + * knows which request was fautly + */ if (res) { spin_lock_bh(&engine->lock); engine->req = req; |