diff options
Diffstat (limited to 'drivers/crypto/ccree/cc_hash.c')
-rw-r--r-- | drivers/crypto/ccree/cc_hash.c | 228 |
1 files changed, 91 insertions, 137 deletions
diff --git a/drivers/crypto/ccree/cc_hash.c b/drivers/crypto/ccree/cc_hash.c index 912e5ce5079d..d5310783af15 100644 --- a/drivers/crypto/ccree/cc_hash.c +++ b/drivers/crypto/ccree/cc_hash.c @@ -20,8 +20,8 @@ #define CC_SM3_HASH_LEN_SIZE 8 struct cc_hash_handle { - cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/ - cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */ + u32 digest_len_sram_addr; /* const value in SRAM*/ + u32 larval_digest_sram_addr; /* const value in SRAM */ struct list_head hash_list; }; @@ -39,12 +39,19 @@ static const u32 cc_sha256_init[] = { SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; static const u32 cc_digest_len_sha512_init[] = { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; -static u64 cc_sha384_init[] = { - SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4, - SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 }; -static u64 cc_sha512_init[] = { - SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4, - SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 }; + +/* + * Due to the way the HW works, every double word in the SHA384 and SHA512 + * larval hashes must be stored in hi/lo order + */ +#define hilo(x) upper_32_bits(x), lower_32_bits(x) +static const u32 cc_sha384_init[] = { + hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4), + hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) }; +static const u32 cc_sha512_init[] = { + hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4), + hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) }; + static const u32 cc_sm3_init[] = { SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; @@ -342,7 +349,6 @@ static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, /* Get final MAC result */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -422,8 +428,7 @@ static int cc_hash_digest(struct ahash_request *req) bool is_hmac = ctx->is_hmac; struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; - cc_sram_addr_t larval_digest_addr = - cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode); + u32 larval_digest_addr; int idx = 0; int rc = 0; gfp_t flags = cc_gfp_flags(&req->base); @@ -465,6 +470,8 @@ static int cc_hash_digest(struct ahash_request *req) set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); } else { + larval_digest_addr = cc_larval_digest_addr(ctx->drvdata, + ctx->hash_mode); set_din_sram(&desc[idx], larval_digest_addr, ctx->inter_digestsize); } @@ -726,7 +733,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, int digestsize = 0; int i, idx = 0, rc = 0; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; - cc_sram_addr_t larval_addr; + u32 larval_addr; struct device *dev; ctx = crypto_ahash_ctx(ahash); @@ -752,7 +759,7 @@ static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, return -ENOMEM; ctx->key_params.key_dma_addr = - dma_map_single(dev, (void *)ctx->key_params.key, keylen, + dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", @@ -1067,8 +1074,8 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) ctx->key_params.keylen = 0; ctx->digest_buff_dma_addr = - dma_map_single(dev, (void *)ctx->digest_buff, - sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); + dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), + DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n", sizeof(ctx->digest_buff), ctx->digest_buff); @@ -1079,7 +1086,7 @@ static int cc_alloc_ctx(struct cc_hash_ctx *ctx) &ctx->digest_buff_dma_addr); ctx->opad_tmp_keys_dma_addr = - dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff, + dma_map_single(dev, ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { @@ -1196,8 +1203,8 @@ static int cc_mac_update(struct ahash_request *req) idx++; /* Setup request structure */ - cc_req.user_cb = (void *)cc_update_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_update_complete; + cc_req.user_arg = req; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { @@ -1254,8 +1261,8 @@ static int cc_mac_final(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_hash_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_hash_complete; + cc_req.user_arg = req; if (state->xcbc_count && rem_cnt == 0) { /* Load key for ECB decryption */ @@ -1311,7 +1318,6 @@ static int cc_mac_final(struct ahash_request *req) /* Get final MAC result */ hw_desc_init(&desc[idx]); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -1369,8 +1375,8 @@ static int cc_mac_finup(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_hash_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_hash_complete; + cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; @@ -1393,7 +1399,6 @@ static int cc_mac_finup(struct ahash_request *req) /* Get final MAC result */ hw_desc_init(&desc[idx]); - /* TODO */ set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); @@ -1448,8 +1453,8 @@ static int cc_mac_digest(struct ahash_request *req) } /* Setup request structure */ - cc_req.user_cb = (void *)cc_digest_complete; - cc_req.user_arg = (void *)req; + cc_req.user_cb = cc_digest_complete; + cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; @@ -1820,7 +1825,7 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, struct crypto_alg *alg; struct ahash_alg *halg; - t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL); + t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL); if (!t_crypto_alg) return ERR_PTR(-ENOMEM); @@ -1857,104 +1862,85 @@ static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, return t_crypto_alg; } +static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data, + unsigned int size, u32 *sram_buff_ofs) +{ + struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; + unsigned int larval_seq_len = 0; + int rc; + + cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data), + larval_seq, &larval_seq_len); + rc = send_request_init(drvdata, larval_seq, larval_seq_len); + if (rc) + return rc; + + *sram_buff_ofs += size; + return 0; +} + int cc_init_hash_sram(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle = drvdata->hash_handle; - cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr; - unsigned int larval_seq_len = 0; - struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; + u32 sram_buff_ofs = hash_handle->digest_len_sram_addr; bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); int rc = 0; /* Copy-to-sram digest-len */ - cc_set_sram_desc(cc_digest_len_init, sram_buff_ofs, - ARRAY_SIZE(cc_digest_len_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_digest_len_init, + sizeof(cc_digest_len_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_digest_len_init); - larval_seq_len = 0; - if (large_sha_supported) { /* Copy-to-sram digest-len for sha384/512 */ - cc_set_sram_desc(cc_digest_len_sha512_init, sram_buff_ofs, - ARRAY_SIZE(cc_digest_len_sha512_init), - larval_seq, &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init, + sizeof(cc_digest_len_sha512_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - - sram_buff_ofs += sizeof(cc_digest_len_sha512_init); - larval_seq_len = 0; } /* The initial digests offset */ hash_handle->larval_digest_sram_addr = sram_buff_ofs; /* Copy-to-sram initial SHA* digests */ - cc_set_sram_desc(cc_md5_init, sram_buff_ofs, ARRAY_SIZE(cc_md5_init), - larval_seq, &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_md5_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha1_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha1_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha1_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha224_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha224_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha224_init); - larval_seq_len = 0; - cc_set_sram_desc(cc_sha256_init, sram_buff_ofs, - ARRAY_SIZE(cc_sha256_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init), + &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha256_init); - larval_seq_len = 0; if (sm3_supported) { - cc_set_sram_desc(cc_sm3_init, sram_buff_ofs, - ARRAY_SIZE(cc_sm3_init), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sm3_init, + sizeof(cc_sm3_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sm3_init); - larval_seq_len = 0; } if (large_sha_supported) { - cc_set_sram_desc((u32 *)cc_sha384_init, sram_buff_ofs, - (ARRAY_SIZE(cc_sha384_init) * 2), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha384_init, + sizeof(cc_sha384_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; - sram_buff_ofs += sizeof(cc_sha384_init); - larval_seq_len = 0; - cc_set_sram_desc((u32 *)cc_sha512_init, sram_buff_ofs, - (ARRAY_SIZE(cc_sha512_init) * 2), larval_seq, - &larval_seq_len); - rc = send_request_init(drvdata, larval_seq, larval_seq_len); + rc = cc_init_copy_sram(drvdata, cc_sha512_init, + sizeof(cc_sha512_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; } @@ -1963,38 +1949,16 @@ init_digest_const_err: return rc; } -static void __init cc_swap_dwords(u32 *buf, unsigned long size) -{ - int i; - u32 tmp; - - for (i = 0; i < size; i += 2) { - tmp = buf[i]; - buf[i] = buf[i + 1]; - buf[i + 1] = tmp; - } -} - -/* - * Due to the way the HW works we need to swap every - * double word in the SHA384 and SHA512 larval hashes - */ -void __init cc_hash_global_init(void) -{ - cc_swap_dwords((u32 *)&cc_sha384_init, (ARRAY_SIZE(cc_sha384_init) * 2)); - cc_swap_dwords((u32 *)&cc_sha512_init, (ARRAY_SIZE(cc_sha512_init) * 2)); -} - int cc_hash_alloc(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle; - cc_sram_addr_t sram_buff; + u32 sram_buff; u32 sram_size_to_alloc; struct device *dev = drvdata_to_dev(drvdata); int rc = 0; int alg; - hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL); + hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL); if (!hash_handle) return -ENOMEM; @@ -2016,7 +1980,6 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); if (sram_buff == NULL_SRAM_ADDR) { - dev_err(dev, "SRAM pool exhausted\n"); rc = -ENOMEM; goto fail; } @@ -2056,12 +2019,10 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); - kfree(t_alg); goto fail; - } else { - list_add_tail(&t_alg->entry, - &hash_handle->hash_list); } + + list_add_tail(&t_alg->entry, &hash_handle->hash_list); } if (hw_mode == DRV_CIPHER_XCBC_MAC || hw_mode == DRV_CIPHER_CMAC) @@ -2081,18 +2042,16 @@ int cc_hash_alloc(struct cc_drvdata *drvdata) if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); - kfree(t_alg); goto fail; - } else { - list_add_tail(&t_alg->entry, &hash_handle->hash_list); } + + list_add_tail(&t_alg->entry, &hash_handle->hash_list); } return 0; fail: - kfree(drvdata->hash_handle); - drvdata->hash_handle = NULL; + cc_hash_free(drvdata); return rc; } @@ -2101,17 +2060,12 @@ int cc_hash_free(struct cc_drvdata *drvdata) struct cc_hash_alg *t_hash_alg, *hash_n; struct cc_hash_handle *hash_handle = drvdata->hash_handle; - if (hash_handle) { - list_for_each_entry_safe(t_hash_alg, hash_n, - &hash_handle->hash_list, entry) { - crypto_unregister_ahash(&t_hash_alg->ahash_alg); - list_del(&t_hash_alg->entry); - kfree(t_hash_alg); - } - - kfree(hash_handle); - drvdata->hash_handle = NULL; + list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, + entry) { + crypto_unregister_ahash(&t_hash_alg->ahash_alg); + list_del(&t_hash_alg->entry); } + return 0; } @@ -2272,22 +2226,23 @@ static const void *cc_larval_digest(struct device *dev, u32 mode) } } -/*! - * Gets the address of the initial digest in SRAM +/** + * cc_larval_digest_addr() - Get the address of the initial digest in SRAM * according to the given hash mode * - * \param drvdata - * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 + * @drvdata: Associated device driver context + * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 * - * \return u32 The address of the initial digest in SRAM + * Return: + * The address of the initial digest in SRAM */ -cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) +u32 cc_larval_digest_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; struct device *dev = drvdata_to_dev(_drvdata); bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); - cc_sram_addr_t addr; + u32 addr; switch (mode) { case DRV_HASH_NULL: @@ -2339,12 +2294,11 @@ cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode) return hash_handle->larval_digest_sram_addr; } -cc_sram_addr_t -cc_digest_len_addr(void *drvdata, u32 mode) +u32 cc_digest_len_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; - cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr; + u32 digest_len_addr = hash_handle->digest_len_sram_addr; switch (mode) { case DRV_HASH_SHA1: |