summaryrefslogtreecommitdiff
path: root/drivers/crypto/caam
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/caam')
-rw-r--r--drivers/crypto/caam/caamalg.c384
-rw-r--r--drivers/crypto/caam/caamrng.c17
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c61
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/desc_constr.h27
-rw-r--r--drivers/crypto/caam/error.c10
-rw-r--r--drivers/crypto/caam/regs.h4
8 files changed, 389 insertions, 117 deletions
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index b71f2fd749df..5f891254db73 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -66,10 +66,14 @@
/* length of descriptors text */
#define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
-#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 16 * CAAM_CMD_SZ)
-#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 21 * CAAM_CMD_SZ)
+#define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+#define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 18 * CAAM_CMD_SZ)
#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_ENC_LEN (DESC_AEAD_NULL_BASE + 14 * CAAM_CMD_SZ)
+#define DESC_AEAD_NULL_DEC_LEN (DESC_AEAD_NULL_BASE + 17 * CAAM_CMD_SZ)
+
#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
@@ -104,27 +108,14 @@ static inline void append_dec_op1(u32 *desc, u32 type)
}
/*
- * Wait for completion of class 1 key loading before allowing
- * error propagation
- */
-static inline void append_dec_shr_done(u32 *desc)
-{
- u32 *jump_cmd;
-
- jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
- set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-}
-
-/*
* For aead functions, read payload and write payload,
* both of which are specified in req->src and req->dst
*/
static inline void aead_append_src_dst(u32 *desc, u32 msg_type)
{
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH |
KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH);
- append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
}
/*
@@ -211,9 +202,196 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
append_key_aead(desc, ctx, keys_fit_inline);
set_jump_tgt_here(desc, key_jump_cmd);
+}
+
+static int aead_null_set_sh_desc(struct crypto_aead *aead)
+{
+ struct aead_tfm *tfm = &aead->base.crt_aead;
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *jrdev = ctx->jrdev;
+ bool keys_fit_inline = false;
+ u32 *key_jump_cmd, *jump_cmd, *read_move_cmd, *write_move_cmd;
+ u32 *desc;
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_ENC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ /* aead_encrypt shared descriptor */
+ desc = ctx->sh_desc_enc;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* cryptlen = seqoutlen - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
+
+ /*
+ * NULL encryption; IV is zero
+ * assoclen = (assoclen + cryptlen) - cryptlen
+ */
+ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH3 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH3 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+
+ /* Write ICV */
+ append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null enc shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (DESC_AEAD_NULL_DEC_LEN + DESC_JOB_IO_LEN +
+ ctx->split_key_pad_len <= CAAM_DESC_BYTES_MAX)
+ keys_fit_inline = true;
+
+ desc = ctx->sh_desc_dec;
+
+ /* aead_decrypt shared descriptor */
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Skip if already shared */
+ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+ if (keys_fit_inline)
+ append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len,
+ ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ set_jump_tgt_here(desc, key_jump_cmd);
+
+ /* Class 2 operation */
+ append_operation(desc, ctx->class2_alg_type |
+ OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
+
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
+ append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
+ ctx->authsize + tfm->ivsize);
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
+ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
+ append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
+
+ /* read assoc before reading payload */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG |
+ KEY_VLF);
+
+ /* Prepare to read and write cryptlen bytes */
+ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ);
+ append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ);
+
+ /*
+ * MOVE_LEN opcode is not available in all SEC HW revisions,
+ * thus need to do some magic, i.e. self-patch the descriptor
+ * buffer.
+ */
+ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF |
+ MOVE_DEST_MATH2 |
+ (0x6 << MOVE_LEN_SHIFT));
+ write_move_cmd = append_move(desc, MOVE_SRC_MATH2 |
+ MOVE_DEST_DESCBUF |
+ MOVE_WAITCOMP |
+ (0x8 << MOVE_LEN_SHIFT));
+
+ /* Read and write cryptlen bytes */
+ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1);
+
+ /*
+ * Insert a NOP here, since we need at least 4 instructions between
+ * code patching the descriptor buffer and the location being patched.
+ */
+ jump_cmd = append_jump(desc, JUMP_TEST_ALL);
+ set_jump_tgt_here(desc, jump_cmd);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
+ set_move_tgt_here(desc, read_move_cmd);
+ set_move_tgt_here(desc, write_move_cmd);
+ append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
+ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO |
+ MOVE_AUX_LS);
+ append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
+
+ /* Load ICV */
+ append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
+
+ ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
+ desc_bytes(desc),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
+ dev_err(jrdev, "unable to map shared descriptor\n");
+ return -ENOMEM;
+ }
+#ifdef DEBUG
+ print_hex_dump(KERN_ERR,
+ "aead null dec shdesc@"__stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc,
+ desc_bytes(desc), 1);
+#endif
+
+ return 0;
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -222,13 +400,16 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
struct caam_ctx *ctx = crypto_aead_ctx(aead);
struct device *jrdev = ctx->jrdev;
bool keys_fit_inline = false;
- u32 *key_jump_cmd, *jump_cmd;
u32 geniv, moveiv;
u32 *desc;
- if (!ctx->enckeylen || !ctx->authsize)
+ if (!ctx->authsize)
return 0;
+ /* NULL encryption / decryption */
+ if (!ctx->enckeylen)
+ return aead_null_set_sh_desc(aead);
+
/*
* Job Descriptor and Shared Descriptors
* must all fit into the 64-word Descriptor h/w Buffer
@@ -253,7 +434,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* assoclen + cryptlen = seqinlen - ivsize */
append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize);
- /* assoclen + cryptlen = (assoclen + cryptlen) - cryptlen */
+ /* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ);
/* read assoc before reading payload */
@@ -296,30 +477,18 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
CAAM_DESC_BYTES_MAX)
keys_fit_inline = true;
- desc = ctx->sh_desc_dec;
-
/* aead_decrypt shared descriptor */
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- append_key_aead(desc, ctx, keys_fit_inline);
+ desc = ctx->sh_desc_dec;
- /* Only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
- set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
+ init_sh_desc_key_aead(desc, ctx, keys_fit_inline);
/* Class 2 operation */
append_operation(desc, ctx->class2_alg_type |
OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON);
- /* assoclen + cryptlen = seqinlen - ivsize */
+ /* assoclen + cryptlen = seqinlen - ivsize - authsize */
append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM,
- ctx->authsize + tfm->ivsize)
+ ctx->authsize + tfm->ivsize);
/* assoclen = (assoclen + cryptlen) - cryptlen */
append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ);
append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ);
@@ -340,7 +509,6 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Load ICV */
append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 |
FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV);
- append_dec_shr_done(desc);
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
@@ -532,7 +700,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
struct ablkcipher_tfm *tfm = &ablkcipher->base.crt_ablkcipher;
struct device *jrdev = ctx->jrdev;
int ret = 0;
- u32 *key_jump_cmd, *jump_cmd;
+ u32 *key_jump_cmd;
u32 *desc;
#ifdef DEBUG
@@ -563,9 +731,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
- /* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
-
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
LDST_CLASS_1_CCB | tfm->ivsize);
@@ -603,11 +768,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->enckeylen, CLASS_1 |
KEY_DEST_CLASS_REG);
- /* For aead, only propagate error immediately if shared */
- jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
- set_jump_tgt_here(desc, jump_cmd);
/* load IV */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -619,9 +780,6 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* Perform operation */
ablkcipher_append_src_dst(desc);
- /* Wait for key to load before allowing propagating error */
- append_dec_shr_done(desc);
-
ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
desc_bytes(desc),
DMA_TO_DEVICE);
@@ -1459,6 +1617,11 @@ static int aead_givencrypt(struct aead_givcrypt_request *areq)
return ret;
}
+static int aead_null_givencrypt(struct aead_givcrypt_request *areq)
+{
+ return aead_encrypt(&areq->areq);
+}
+
/*
* allocate and map the ablkcipher extended descriptor for ablkcipher
*/
@@ -1648,6 +1811,124 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
+ .name = "authenc(hmac(md5),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-md5-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha1),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha1-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha224),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha224-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha256),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha256-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha384),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha384-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC,
+ },
+ {
+ .name = "authenc(hmac(sha512),ecb(cipher_null))",
+ .driver_name = "authenc-hmac-sha512-ecb-cipher_null-caam",
+ .blocksize = NULL_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_null_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = NULL_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .class1_alg_type = 0,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
@@ -2099,6 +2380,11 @@ static void caam_cra_exit(struct crypto_tfm *tfm)
dma_unmap_single(ctx->jrdev, ctx->sh_desc_givenc_dma,
desc_bytes(ctx->sh_desc_givenc),
DMA_TO_DEVICE);
+ if (ctx->key_dma &&
+ !dma_mapping_error(ctx->jrdev, ctx->key_dma))
+ dma_unmap_single(ctx->jrdev, ctx->key_dma,
+ ctx->enckeylen + ctx->split_key_pad_len,
+ DMA_TO_DEVICE);
caam_jr_free(ctx->jrdev);
}
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index 28486b19fc36..3529b54048c9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -76,7 +76,7 @@ struct caam_rng_ctx {
struct buf_data bufs[2];
};
-static struct caam_rng_ctx rng_ctx;
+static struct caam_rng_ctx *rng_ctx;
static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd)
{
@@ -137,7 +137,7 @@ static inline int submit_job(struct caam_rng_ctx *ctx, int to_current)
static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
{
- struct caam_rng_ctx *ctx = &rng_ctx;
+ struct caam_rng_ctx *ctx = rng_ctx;
struct buf_data *bd = &ctx->bufs[ctx->current_buf];
int next_buf_idx, copied_idx;
int err;
@@ -237,12 +237,12 @@ static void caam_cleanup(struct hwrng *rng)
struct buf_data *bd;
for (i = 0; i < 2; i++) {
- bd = &rng_ctx.bufs[i];
+ bd = &rng_ctx->bufs[i];
if (atomic_read(&bd->empty) == BUF_PENDING)
wait_for_completion(&bd->filled);
}
- rng_unmap_ctx(&rng_ctx);
+ rng_unmap_ctx(rng_ctx);
}
static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
@@ -273,8 +273,9 @@ static struct hwrng caam_rng = {
static void __exit caam_rng_exit(void)
{
- caam_jr_free(rng_ctx.jrdev);
+ caam_jr_free(rng_ctx->jrdev);
hwrng_unregister(&caam_rng);
+ kfree(rng_ctx);
}
static int __init caam_rng_init(void)
@@ -286,8 +287,10 @@ static int __init caam_rng_init(void)
pr_err("Job Ring Device allocation for transform failed\n");
return PTR_ERR(dev);
}
-
- caam_init_rng(&rng_ctx, dev);
+ rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
+ if (!rng_ctx)
+ return -ENOMEM;
+ caam_init_rng(rng_ctx, dev);
dev_info(dev, "registering rng-caam\n");
return hwrng_register(&caam_rng);
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 762aeff626ac..f227922cea38 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -26,6 +26,7 @@
#include <net/xfrm.h>
#include <crypto/algapi.h>
+#include <crypto/null.h>
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 63fb1af2c431..1c38f86bf63a 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -14,7 +14,6 @@
#include "jr.h"
#include "desc_constr.h"
#include "error.h"
-#include "ctrl.h"
/*
* Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -352,32 +351,17 @@ static void kick_trng(struct platform_device *pdev, int ent_delay)
/**
* caam_get_era() - Return the ERA of the SEC on SoC, based
- * on the SEC_VID register.
- * Returns the ERA number (1..4) or -ENOTSUPP if the ERA is unknown.
- * @caam_id - the value of the SEC_VID register
+ * on "sec-era" propery in the DTS. This property is updated by u-boot.
**/
-int caam_get_era(u64 caam_id)
+int caam_get_era(void)
{
- struct sec_vid *sec_vid = (struct sec_vid *)&caam_id;
- static const struct {
- u16 ip_id;
- u8 maj_rev;
- u8 era;
- } caam_eras[] = {
- {0x0A10, 1, 1},
- {0x0A10, 2, 2},
- {0x0A12, 1, 3},
- {0x0A14, 1, 3},
- {0x0A14, 2, 4},
- {0x0A16, 1, 4},
- {0x0A11, 1, 4}
- };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(caam_eras); i++)
- if (caam_eras[i].ip_id == sec_vid->ip_id &&
- caam_eras[i].maj_rev == sec_vid->maj_rev)
- return caam_eras[i].era;
+ struct device_node *caam_node;
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = (uint32_t *)of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ return prop ? *prop : -ENOTSUPP;
+ }
return -ENOTSUPP;
}
@@ -443,13 +427,10 @@ static int caam_probe(struct platform_device *pdev)
* for all, then go probe each one.
*/
rspec = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring")
- rspec++;
- if (!rspec) {
- /* for backward compatible with device trees */
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring")
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
rspec++;
- }
ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
GFP_KERNEL);
@@ -460,18 +441,9 @@ static int caam_probe(struct platform_device *pdev)
ring = 0;
ctrlpriv->total_jobrs = 0;
- for_each_compatible_node(np, NULL, "fsl,sec-v4.0-job-ring") {
- ctrlpriv->jrpdev[ring] =
- of_platform_device_create(np, NULL, dev);
- if (!ctrlpriv->jrpdev[ring]) {
- pr_warn("JR%d Platform device creation error\n", ring);
- continue;
- }
- ctrlpriv->total_jobrs++;
- ring++;
- }
- if (!ring) {
- for_each_compatible_node(np, NULL, "fsl,sec4.0-job-ring") {
+ for_each_available_child_of_node(nprop, np)
+ if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
+ of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
ctrlpriv->jrpdev[ring] =
of_platform_device_create(np, NULL, dev);
if (!ctrlpriv->jrpdev[ring]) {
@@ -482,7 +454,6 @@ static int caam_probe(struct platform_device *pdev)
ctrlpriv->total_jobrs++;
ring++;
}
- }
/* Check to see if QI present. If so, enable */
ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
@@ -564,7 +535,7 @@ static int caam_probe(struct platform_device *pdev)
/* Report "alive" for developer to see */
dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
- caam_get_era(caam_id));
+ caam_get_era());
dev_info(dev, "job rings = %d, qi = %d\n",
ctrlpriv->total_jobrs, ctrlpriv->qi_present);
diff --git a/drivers/crypto/caam/ctrl.h b/drivers/crypto/caam/ctrl.h
index 980d44eaaf40..cac5402a46eb 100644
--- a/drivers/crypto/caam/ctrl.h
+++ b/drivers/crypto/caam/ctrl.h
@@ -8,6 +8,6 @@
#define CTRL_H
/* Prototypes for backend-level services exposed to APIs */
-int caam_get_era(u64 caam_id);
+int caam_get_era(void);
#endif /* CTRL_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index cd5f678847ce..7eec20bb3849 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -155,21 +155,29 @@ static inline void append_cmd_data(u32 *desc, void *data, int len,
append_data(desc, data, len);
}
-static inline u32 *append_jump(u32 *desc, u32 options)
-{
- u32 *cmd = desc_end(desc);
-
- PRINT_POS;
- append_cmd(desc, CMD_JUMP | options);
-
- return cmd;
+#define APPEND_CMD_RET(cmd, op) \
+static inline u32 *append_##cmd(u32 *desc, u32 options) \
+{ \
+ u32 *cmd = desc_end(desc); \
+ PRINT_POS; \
+ append_cmd(desc, CMD_##op | options); \
+ return cmd; \
}
+APPEND_CMD_RET(jump, JUMP)
+APPEND_CMD_RET(move, MOVE)
static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd)
{
*jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc));
}
+static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd)
+{
+ *move_cmd &= ~MOVE_OFFSET_MASK;
+ *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) &
+ MOVE_OFFSET_MASK);
+}
+
#define APPEND_CMD(cmd, op) \
static inline void append_##cmd(u32 *desc, u32 options) \
{ \
@@ -177,7 +185,6 @@ static inline void append_##cmd(u32 *desc, u32 options) \
append_cmd(desc, CMD_##op | options); \
}
APPEND_CMD(operation, OPERATION)
-APPEND_CMD(move, MOVE)
#define APPEND_CMD_LEN(cmd, op) \
static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
@@ -328,7 +335,7 @@ append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
do { \
APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
append_cmd(desc, data); \
-} while (0);
+} while (0)
#define append_math_add_imm_u32(desc, dest, src0, src1, data) \
APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 9f25f5296029..0eabd81e1a90 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -16,9 +16,13 @@
char *tmp; \
\
tmp = kmalloc(sizeof(format) + max_alloc, GFP_ATOMIC); \
- sprintf(tmp, format, param); \
- strcat(str, tmp); \
- kfree(tmp); \
+ if (likely(tmp)) { \
+ sprintf(tmp, format, param); \
+ strcat(str, tmp); \
+ kfree(tmp); \
+ } else { \
+ strcat(str, "kmalloc failure in SPRINTFCAT"); \
+ } \
}
static void report_jump_idx(u32 status, char *outstr)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index d50174f45b21..cbde8b95a6f8 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -74,10 +74,10 @@
#endif
#else
#ifdef __LITTLE_ENDIAN
-#define wr_reg32(reg, data) __raw_writel(reg, data)
+#define wr_reg32(reg, data) __raw_writel(data, reg)
#define rd_reg32(reg) __raw_readl(reg)
#ifdef CONFIG_64BIT
-#define wr_reg64(reg, data) __raw_writeq(reg, data)
+#define wr_reg64(reg, data) __raw_writeq(data, reg)
#define rd_reg64(reg) __raw_readq(reg)
#endif
#endif