summaryrefslogtreecommitdiff
path: root/drivers/crypto/nx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-07 14:31:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-07 14:31:18 -0700
commit6be48f2940af9ea8d93c23a0dd8e322672c92efd (patch)
tree1bdc85a9d3fd0c19e108ea27a29a83ef2b44f5d0 /drivers/crypto/nx
parent0ffb01d9def22f1954e99529b7e4ded497b2e88b (diff)
parent68411521cc6055edc6274e03ab3210a5893533ba (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: "Here is the crypto update for 3.12: - Added MODULE_SOFTDEP to allow pre-loading of modules. - Reinstated crct10dif driver using the module softdep feature. - Allow via rng driver to be auto-loaded. - Split large input data when necessary in nx. - Handle zero length messages correctly for GCM/XCBC in nx. - Handle SHA-2 chunks bigger than block size properly in nx. - Handle unaligned lengths in omap-aes. - Added SHA384/SHA512 to omap-sham. - Added OMAP5/AM43XX SHAM support. - Added OMAP4 TRNG support. - Misc fixes" * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (66 commits) Reinstate "crypto: crct10dif - Wrap crc_t10dif function all to use crypto transform framework" hwrng: via - Add MODULE_DEVICE_TABLE crypto: fcrypt - Fix bitoperation for compilation with clang crypto: nx - fix SHA-2 for chunks bigger than block size crypto: nx - fix GCM for zero length messages crypto: nx - fix XCBC for zero length messages crypto: nx - fix limits to sg lists for AES-CCM crypto: nx - fix limits to sg lists for AES-XCBC crypto: nx - fix limits to sg lists for AES-GCM crypto: nx - fix limits to sg lists for AES-CTR crypto: nx - fix limits to sg lists for AES-CBC crypto: nx - fix limits to sg lists for AES-ECB crypto: nx - add offset to nx_build_sg_lists() padata - Register hotcpu notifier after initialization padata - share code between CPU_ONLINE and CPU_DOWN_FAILED, same to CPU_DOWN_PREPARE and CPU_UP_CANCELED hwrng: omap - reorder OMAP TRNG driver code crypto: omap-sham - correct dma burst size crypto: omap-sham - Enable Polling mode if DMA fails crypto: tegra-aes - bitwise vs logical and crypto: sahara - checking the wrong variable ...
Diffstat (limited to 'drivers/crypto/nx')
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c57
-rw-r--r--drivers/crypto/nx/nx-aes-ccm.c283
-rw-r--r--drivers/crypto/nx/nx-aes-ctr.c52
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c50
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c296
-rw-r--r--drivers/crypto/nx/nx-aes-xcbc.c205
-rw-r--r--drivers/crypto/nx/nx-sha256.c124
-rw-r--r--drivers/crypto/nx/nx-sha512.c131
-rw-r--r--drivers/crypto/nx/nx.c35
-rw-r--r--drivers/crypto/nx/nx.h3
10 files changed, 894 insertions, 342 deletions
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index 35d483f8db66..cc00b52306ba 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -70,35 +70,52 @@ static int cbc_aes_nx_crypt(struct blkcipher_desc *desc,
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
int rc;
- if (nbytes > nx_ctx->ap->databytelen)
- return -EINVAL;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
- csbcpb->cpb.aes_cbc.iv);
- if (rc)
- goto out;
-
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
-
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
+ do {
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ processed, csbcpb->cpb.aes_cbc.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-aes-ccm.c b/drivers/crypto/nx/nx-aes-ccm.c
index ef5eae6d1400..5ecd4c2414aa 100644
--- a/drivers/crypto/nx/nx-aes-ccm.c
+++ b/drivers/crypto/nx/nx-aes-ccm.c
@@ -179,13 +179,26 @@ static int generate_pat(u8 *iv,
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
unsigned int iauth_len = 0;
- struct vio_pfo_op *op = NULL;
u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL;
int rc;
/* zero the ctr value */
memset(iv + 15 - iv[0], 0, iv[0] + 1);
+ /* page 78 of nx_wb.pdf has,
+ * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes
+ * in length. If a full message is used, the AES CCA implementation
+ * restricts the maximum AAD length to 2^32 -1 bytes.
+ * If partial messages are used, the implementation supports
+ * 2^64 -1 bytes maximum AAD length.
+ *
+ * However, in the cryptoapi's aead_request structure,
+ * assoclen is an unsigned int, thus it cannot hold a length
+ * value greater than 2^32 - 1.
+ * Thus the AAD is further constrained by this and is never
+ * greater than 2^32.
+ */
+
if (!req->assoclen) {
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
} else if (req->assoclen <= 14) {
@@ -195,7 +208,46 @@ static int generate_pat(u8 *iv,
b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0;
b1 = nx_ctx->priv.ccm.iauth_tag;
iauth_len = req->assoclen;
+ } else if (req->assoclen <= 65280) {
+ /* if associated data is less than (2^16 - 2^8), we construct
+ * B1 differently and feed in the associated data to a CCA
+ * operation */
+ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+ b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+ iauth_len = 14;
+ } else {
+ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
+ b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
+ iauth_len = 10;
+ }
+
+ /* generate B0 */
+ rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
+ if (rc)
+ return rc;
+
+ /* generate B1:
+ * add control info for associated data
+ * RFC 3610 and NIST Special Publication 800-38C
+ */
+ if (b1) {
+ memset(b1, 0, 16);
+ if (req->assoclen <= 65280) {
+ *(u16 *)b1 = (u16)req->assoclen;
+ scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
+ iauth_len, SCATTERWALK_FROM_SG);
+ } else {
+ *(u16 *)b1 = (u16)(0xfffe);
+ *(u32 *)&b1[2] = (u32)req->assoclen;
+ scatterwalk_map_and_copy(b1 + 6, req->assoc, 0,
+ iauth_len, SCATTERWALK_FROM_SG);
+ }
+ }
+ /* now copy any remaining AAD to scatterlist and call nx... */
+ if (!req->assoclen) {
+ return rc;
+ } else if (req->assoclen <= 14) {
nx_insg = nx_build_sg_list(nx_insg, b1, 16, nx_ctx->ap->sglen);
nx_outsg = nx_build_sg_list(nx_outsg, tmp, 16,
nx_ctx->ap->sglen);
@@ -210,56 +262,74 @@ static int generate_pat(u8 *iv,
NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT;
NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE;
- op = &nx_ctx->op;
result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac;
- } else if (req->assoclen <= 65280) {
- /* if associated data is less than (2^16 - 2^8), we construct
- * B1 differently and feed in the associated data to a CCA
- * operation */
- b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0;
- b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1;
- iauth_len = 14;
- /* remaining assoc data must have scatterlist built for it */
- nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen,
- req->assoc, iauth_len,
- req->assoclen - iauth_len);
- nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
- sizeof(struct nx_sg);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
- op = &nx_ctx->op_aead;
- result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
} else {
- /* if associated data is less than (2^32), we construct B1
- * differently yet again and feed in the associated data to a
- * CCA operation */
- pr_err("associated data len is %u bytes (returning -EINVAL)\n",
- req->assoclen);
- rc = -EINVAL;
- }
+ u32 max_sg_len;
+ unsigned int processed = 0, to_process;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ processed += iauth_len;
+
+ do {
+ to_process = min_t(u32, req->assoclen - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < req->assoclen) {
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) |=
+ NX_FDM_INTERMEDIATE;
+ } else {
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) &=
+ ~NX_FDM_INTERMEDIATE;
+ }
+
+ nx_insg = nx_walk_and_build(nx_ctx->in_sg,
+ nx_ctx->ap->sglen,
+ req->assoc, processed,
+ to_process);
+
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) *
+ sizeof(struct nx_sg);
- rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0);
- if (rc)
- goto done;
+ result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
- if (b1) {
- memset(b1, 0, 16);
- *(u16 *)b1 = (u16)req->assoclen;
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
- scatterwalk_map_and_copy(b1 + 2, req->assoc, 0,
- iauth_len, SCATTERWALK_FROM_SG);
+ memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0,
+ nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0,
+ AES_BLOCK_SIZE);
- rc = nx_hcall_sync(nx_ctx, op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto done;
+ NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION;
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen,
+ &(nx_ctx->stats->aes_bytes));
- memcpy(out, result, AES_BLOCK_SIZE);
+ processed += to_process;
+ } while (processed < req->assoclen);
+
+ result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0;
}
-done:
+
+ memcpy(out, result, AES_BLOCK_SIZE);
+
return rc;
}
@@ -271,10 +341,12 @@ static int ccm_nx_decrypt(struct aead_request *req,
unsigned int nbytes = req->cryptlen;
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
struct nx_ccm_priv *priv = &nx_ctx->priv.ccm;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
int rc = -1;
- if (nbytes > nx_ctx->ap->databytelen)
- return -EINVAL;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
nbytes -= authsize;
@@ -288,26 +360,61 @@ static int ccm_nx_decrypt(struct aead_request *req,
if (rc)
goto out;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
- if (rc)
- goto out;
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
- NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ do {
+
+ /* to_process: the AES_BLOCK_SIZE data chunk to process in this
+ * update. This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ to_process, processed,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ if (rc)
+ goto out;
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
+ /* for partial completion, copy following for next
+ * entry into loop...
+ */
+ memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_s0,
+ csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ /* update stats */
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
authsize) ? -EBADMSG : 0;
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -318,38 +425,76 @@ static int ccm_nx_encrypt(struct aead_request *req,
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
unsigned int nbytes = req->cryptlen;
unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
int rc = -1;
- if (nbytes > nx_ctx->ap->databytelen)
- return -EINVAL;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes,
csbcpb->cpb.aes_ccm.in_pat_or_b0);
if (rc)
goto out;
- rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, nbytes,
- csbcpb->cpb.aes_ccm.iv_or_ctr);
- if (rc)
- goto out;
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+ /* to process: the AES_BLOCK_SIZE data chunk to process in this
+ * update. This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+ rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src,
+ to_process, processed,
+ csbcpb->cpb.aes_ccm.iv_or_ctr);
+ if (rc)
+ goto out;
- NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ /* for partial completion, copy following for next
+ * entry into loop...
+ */
+ memcpy(desc->info, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0,
+ csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ccm.in_s0,
+ csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE);
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ /* update stats */
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+
+ } while (processed < nbytes);
/* copy out the auth tag */
scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac,
req->dst, nbytes, authsize,
SCATTERWALK_TO_SG);
+
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-aes-ctr.c b/drivers/crypto/nx/nx-aes-ctr.c
index b6286f14680b..a37d009dc75c 100644
--- a/drivers/crypto/nx/nx-aes-ctr.c
+++ b/drivers/crypto/nx/nx-aes-ctr.c
@@ -88,30 +88,48 @@ static int ctr_aes_nx_crypt(struct blkcipher_desc *desc,
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
int rc;
- if (nbytes > nx_ctx->ap->databytelen)
- return -EINVAL;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes,
- csbcpb->cpb.aes_ctr.iv);
- if (rc)
- goto out;
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
+ do {
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ processed, csbcpb->cpb.aes_ctr.iv);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
+ processed += to_process;
+ } while (processed < nbytes);
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index 7bbc9a81da21..85a8d23cf29d 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -70,34 +70,52 @@ static int ecb_aes_nx_crypt(struct blkcipher_desc *desc,
{
struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ unsigned long irq_flags;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
int rc;
- if (nbytes > nx_ctx->ap->databytelen)
- return -EINVAL;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
- rc = nx_build_sg_lists(nx_ctx, desc, dst, src, nbytes, NULL);
- if (rc)
- goto out;
+ do {
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
+ rc = nx_build_sg_lists(nx_ctx, desc, dst, src, to_process,
+ processed, NULL);
+ if (rc)
+ goto out;
+
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ processed += to_process;
+ } while (processed < nbytes);
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index 6cca6c392b00..025d9a8d5b19 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -125,38 +125,187 @@ static int nx_gca(struct nx_crypto_ctx *nx_ctx,
struct aead_request *req,
u8 *out)
{
+ int rc;
struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
- int rc = -EINVAL;
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_ctx->in_sg;
+ unsigned int nbytes = req->assoclen;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
- if (req->assoclen > nx_ctx->ap->databytelen)
- goto out;
-
- if (req->assoclen <= AES_BLOCK_SIZE) {
+ if (nbytes <= AES_BLOCK_SIZE) {
scatterwalk_start(&walk, req->assoc);
- scatterwalk_copychunks(out, &walk, req->assoclen,
- SCATTERWALK_FROM_SG);
+ scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
-
- rc = 0;
- goto out;
+ return 0;
}
- nx_sg = nx_walk_and_build(nx_sg, nx_ctx->ap->sglen, req->assoc, 0,
- req->assoclen);
- nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg);
+ NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+ /*
+ * to_process: the data chunk to process in this update.
+ * This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
+
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
+ req->assoc, processed, to_process);
+ nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
+ * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ return rc;
+
+ memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
+ csbcpb_aead->cpb.aes_gca.out_pat,
+ AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+ memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
+
+ return rc;
+}
+
+static int gmac(struct aead_request *req, struct blkcipher_desc *desc)
+{
+ int rc;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *nx_sg;
+ unsigned int nbytes = req->assoclen;
+ unsigned int processed = 0, to_process;
+ u32 max_sg_len;
+
+ /* Set GMAC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
+
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ /* Copy IV */
+ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, desc->info, AES_BLOCK_SIZE);
+
+ do {
+ /*
+ * to_process: the data chunk to process in this update.
+ * This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
+
+ nx_sg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen,
+ req->assoc, processed, to_process);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
+ * sizeof(struct nx_sg);
+
+ csbcpb->cpb.aes_gcm.bit_length_data = 0;
+ csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_s0,
+ csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+
+out:
+ /* Restore GCM mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+ return rc;
+}
+
+static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc,
+ int enc)
+{
+ int rc;
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ char out[AES_BLOCK_SIZE];
+ struct nx_sg *in_sg, *out_sg;
+
+ /* For scenarios where the input message is zero length, AES CTR mode
+ * may be used. Set the source data to be a single block (16B) of all
+ * zeros, and set the input IV value to be the same as the GMAC IV
+ * value. - nx_wb 4.8.1.3 */
+
+ /* Change to ECB mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
+ sizeof(csbcpb->cpb.aes_ecb.key));
+ if (enc)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ /* Encrypt the counter/IV */
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) desc->info,
+ AES_BLOCK_SIZE, nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, sizeof(out),
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
if (rc)
goto out;
-
atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes));
- memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
+ /* Copy out the auth tag */
+ memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)));
out:
+ /* Restore XCBC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
+
+ /*
+ * ECB key uses the same region that GCM AAD and counter, so it's safe
+ * to just fill it with zeroes.
+ */
+ memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
+
return rc;
}
@@ -166,88 +315,104 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct blkcipher_desc desc;
unsigned int nbytes = req->cryptlen;
+ unsigned int processed = 0, to_process;
+ unsigned long irq_flags;
+ u32 max_sg_len;
int rc = -EINVAL;
- if (nbytes > nx_ctx->ap->databytelen)
- goto out;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
desc.info = nx_ctx->priv.gcm.iv;
/* initialize the counter */
*(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
- /* For scenarios where the input message is zero length, AES CTR mode
- * may be used. Set the source data to be a single block (16B) of all
- * zeros, and set the input IV value to be the same as the GMAC IV
- * value. - nx_wb 4.8.1.3 */
if (nbytes == 0) {
- char src[AES_BLOCK_SIZE] = {};
- struct scatterlist sg;
-
- desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0);
- if (IS_ERR(desc.tfm)) {
- rc = -ENOMEM;
+ if (req->assoclen == 0)
+ rc = gcm_empty(req, &desc, enc);
+ else
+ rc = gmac(req, &desc);
+ if (rc)
goto out;
- }
-
- crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key,
- NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 :
- NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32);
-
- sg_init_one(&sg, src, AES_BLOCK_SIZE);
- if (enc)
- crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg,
- AES_BLOCK_SIZE);
else
- crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg,
- AES_BLOCK_SIZE);
- crypto_free_blkcipher(desc.tfm);
-
- rc = 0;
- goto out;
+ goto mac;
}
- desc.tfm = (struct crypto_blkcipher *)req->base.tfm;
-
+ /* Process associated data */
csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8;
-
if (req->assoclen) {
rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad);
if (rc)
goto out;
}
- if (enc)
+ /* Set flags for encryption */
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
+ if (enc) {
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
- else
+ } else {
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
+ }
- csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
+ /* page_limit: number of sg entries that fit on one page */
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+ /*
+ * to_process: the data chunk to process in this update.
+ * This value is bound by sg list limits.
+ */
+ to_process = min_t(u64, nbytes - processed,
+ nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+
+ if ((to_process + processed) < nbytes)
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ else
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
- rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes,
- csbcpb->cpb.aes_gcm.iv_or_cnt);
- if (rc)
- goto out;
+ csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
+ desc.tfm = (struct crypto_blkcipher *) req->base.tfm;
+ rc = nx_build_sg_lists(nx_ctx, &desc, req->dst,
+ req->src, to_process, processed,
+ csbcpb->cpb.aes_gcm.iv_or_cnt);
+ if (rc)
+ goto out;
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
- atomic_inc(&(nx_ctx->stats->aes_ops));
- atomic64_add(csbcpb->csb.processed_byte_count,
- &(nx_ctx->stats->aes_bytes));
+ memcpy(desc.info, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
+ csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_gcm.in_s0,
+ csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+ atomic64_add(csbcpb->csb.processed_byte_count,
+ &(nx_ctx->stats->aes_bytes));
+
+ processed += to_process;
+ } while (processed < nbytes);
+mac:
if (enc) {
/* copy out the auth tag */
scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac,
req->dst, nbytes,
crypto_aead_authsize(crypto_aead_reqtfm(req)),
SCATTERWALK_TO_SG);
- } else if (req->assoclen) {
+ } else {
u8 *itag = nx_ctx->priv.gcm.iauth_tag;
u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
- scatterwalk_map_and_copy(itag, req->dst, nbytes,
+ scatterwalk_map_and_copy(itag, req->src, nbytes,
crypto_aead_authsize(crypto_aead_reqtfm(req)),
SCATTERWALK_FROM_SG);
rc = memcmp(itag, otag,
@@ -255,6 +420,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
-EBADMSG : 0;
}
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-aes-xcbc.c b/drivers/crypto/nx/nx-aes-xcbc.c
index 93923e4628c0..03c4bf57d066 100644
--- a/drivers/crypto/nx/nx-aes-xcbc.c
+++ b/drivers/crypto/nx/nx-aes-xcbc.c
@@ -56,6 +56,77 @@ static int nx_xcbc_set_key(struct crypto_shash *desc,
return 0;
}
+/*
+ * Based on RFC 3566, for a zero-length message:
+ *
+ * n = 1
+ * K1 = E(K, 0x01010101010101010101010101010101)
+ * K3 = E(K, 0x03030303030303030303030303030303)
+ * E[0] = 0x00000000000000000000000000000000
+ * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
+ * E[1] = (K1, M[1] ^ E[0] ^ K3)
+ * Tag = M[1]
+ */
+static int nx_xcbc_empty(struct shash_desc *desc, u8 *out)
+{
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg, *out_sg;
+ u8 keys[2][AES_BLOCK_SIZE];
+ u8 key[32];
+ int rc = 0;
+
+ /* Change to ECB mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
+ memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE);
+ memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
+
+ /* K1 and K3 base patterns */
+ memset(keys[0], 0x01, sizeof(keys[0]));
+ memset(keys[1], 0x03, sizeof(keys[1]));
+
+ /* Generate K1 and K3 encrypting the patterns */
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, sizeof(keys),
+ nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, sizeof(keys),
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+ /* XOr K3 with the padding for a 0 length message */
+ keys[1][0] ^= 0x80;
+
+ /* Encrypt the final result */
+ memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE);
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], sizeof(keys[1]),
+ nx_ctx->ap->sglen);
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, AES_BLOCK_SIZE,
+ nx_ctx->ap->sglen);
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+ atomic_inc(&(nx_ctx->stats->aes_ops));
+
+out:
+ /* Restore XCBC mode */
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+ memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE);
+ NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
+
+ return rc;
+}
+
static int nx_xcbc_init(struct shash_desc *desc)
{
struct xcbc_state *sctx = shash_desc_ctx(desc);
@@ -88,76 +159,99 @@ static int nx_xcbc_update(struct shash_desc *desc,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg;
- u32 to_process, leftover;
+ u32 to_process, leftover, total;
+ u32 max_sg_len;
+ unsigned long irq_flags;
int rc = 0;
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously and we're updating again,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.aes_xcbc.cv,
- csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
- }
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+
+ total = sctx->count + len;
/* 2 cases for total data len:
* 1: <= AES_BLOCK_SIZE: copy into state, return 0
* 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if (len + sctx->count <= AES_BLOCK_SIZE) {
+ if (total <= AES_BLOCK_SIZE) {
memcpy(sctx->buffer + sctx->count, data, len);
sctx->count += len;
goto out;
}
- /* to_process: the AES_BLOCK_SIZE data chunk to process in this
- * update */
- to_process = (sctx->count + len) & ~(AES_BLOCK_SIZE - 1);
- leftover = (sctx->count + len) & (AES_BLOCK_SIZE - 1);
-
- /* the hardware will not accept a 0 byte operation for this algorithm
- * and the operation MUST be finalized to be correct. So if we happen
- * to get an update that falls on a block sized boundary, we must
- * save off the last block to finalize with later. */
- if (!leftover) {
- to_process -= AES_BLOCK_SIZE;
- leftover = AES_BLOCK_SIZE;
- }
-
- if (sctx->count) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buffer,
- sctx->count, nx_ctx->ap->sglen);
- in_sg = nx_build_sg_list(in_sg, (u8 *)data,
- to_process - sctx->count,
- nx_ctx->ap->sglen);
+ in_sg = nx_ctx->in_sg;
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+
+ /* to_process: the AES_BLOCK_SIZE data chunk to process in this
+ * update */
+ to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+ leftover = total - to_process;
+
+ /* the hardware will not accept a 0 byte operation for this
+ * algorithm and the operation MUST be finalized to be correct.
+ * So if we happen to get an update that falls on a block sized
+ * boundary, we must save off the last block to finalize with
+ * later. */
+ if (!leftover) {
+ to_process -= AES_BLOCK_SIZE;
+ leftover = AES_BLOCK_SIZE;
+ }
+
+ if (sctx->count) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ (u8 *) sctx->buffer,
+ sctx->count,
+ max_sg_len);
+ }
+ in_sg = nx_build_sg_list(in_sg,
+ (u8 *) data,
+ to_process - sctx->count,
+ max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
- } else {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data, to_process,
- nx_ctx->ap->sglen);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ /* we've hit the nx chip previously and we're updating again,
+ * so copy over the partial digest */
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ memcpy(csbcpb->cpb.aes_xcbc.cv,
+ csbcpb->cpb.aes_xcbc.out_cv_mac,
+ AES_BLOCK_SIZE);
+ }
+
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
+ atomic_inc(&(nx_ctx->stats->aes_ops));
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- atomic_inc(&(nx_ctx->stats->aes_ops));
+ total -= to_process;
+ data += to_process - sctx->count;
+ sctx->count = 0;
+ in_sg = nx_ctx->in_sg;
+ } while (leftover > AES_BLOCK_SIZE);
/* copy the leftover back into the state struct */
- memcpy(sctx->buffer, data + len - leftover, leftover);
+ memcpy(sctx->buffer, data, leftover);
sctx->count = leftover;
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -167,21 +261,23 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
+ unsigned long irq_flags;
int rc = 0;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
memcpy(csbcpb->cpb.aes_xcbc.cv,
csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
} else if (sctx->count == 0) {
- /* we've never seen an update, so this is a 0 byte op. The
- * hardware cannot handle a 0 byte op, so just copy out the
- * known 0 byte result. This is cheaper than allocating a
- * software context to do a 0 byte op */
- u8 data[] = { 0x75, 0xf0, 0x25, 0x1d, 0x52, 0x8a, 0xc0, 0x1c,
- 0x45, 0x73, 0xdf, 0xd5, 0x84, 0xd7, 0x9f, 0x29 };
- memcpy(out, data, sizeof(data));
+ /*
+ * we've never seen an update, so this is a 0 byte op. The
+ * hardware cannot handle a 0 byte op, so just ECB to
+ * generate the hash.
+ */
+ rc = nx_xcbc_empty(desc, out);
goto out;
}
@@ -211,6 +307,7 @@ static int nx_xcbc_final(struct shash_desc *desc, u8 *out)
memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE);
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 67024f2f0b78..da0b24a7633f 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -55,71 +55,91 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
- u64 to_process, leftover;
+ u64 to_process, leftover, total;
+ u32 max_sg_len;
+ unsigned long irq_flags;
int rc = 0;
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously and we're updating again,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha256.input_partial_digest,
- csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
- }
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
/* 2 cases for total data len:
- * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
- * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
+ * 1: < SHA256_BLOCK_SIZE: copy into state, return 0
+ * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if (len + sctx->count < SHA256_BLOCK_SIZE) {
+ total = sctx->count + len;
+ if (total < SHA256_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count, data, len);
sctx->count += len;
goto out;
}
- /* to_process: the SHA256_BLOCK_SIZE data chunk to process in this
- * update */
- to_process = (sctx->count + len) & ~(SHA256_BLOCK_SIZE - 1);
- leftover = (sctx->count + len) & (SHA256_BLOCK_SIZE - 1);
-
- if (sctx->count) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
- sctx->count, nx_ctx->ap->sglen);
- in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+ in_sg = nx_ctx->in_sg;
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+ /*
+ * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+ * this update. This value is also restricted by the sg list
+ * limits.
+ */
+ to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(SHA256_BLOCK_SIZE - 1);
+ leftover = total - to_process;
+
+ if (sctx->count) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ (u8 *) sctx->buf,
+ sctx->count, max_sg_len);
+ }
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
to_process - sctx->count,
- nx_ctx->ap->sglen);
+ max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
- } else {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
- to_process, nx_ctx->ap->sglen);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha256.input_partial_digest,
+ csbcpb->cpb.sha256.message_digest,
+ SHA256_DIGEST_SIZE);
+ }
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
- atomic_inc(&(nx_ctx->stats->sha256_ops));
+ atomic_inc(&(nx_ctx->stats->sha256_ops));
+ csbcpb->cpb.sha256.message_bit_length += (u64)
+ (csbcpb->cpb.sha256.spbc * 8);
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
+ total -= to_process;
+ data += to_process - sctx->count;
+ sctx->count = 0;
+ in_sg = nx_ctx->in_sg;
+ } while (leftover >= SHA256_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
- memcpy(sctx->buf, data + len - leftover, leftover);
+ memcpy(sctx->buf, data, leftover);
sctx->count = leftover;
-
- csbcpb->cpb.sha256.message_bit_length += (u64)
- (csbcpb->cpb.sha256.spbc * 8);
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -129,8 +149,13 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
+ u32 max_sg_len;
+ unsigned long irq_flags;
int rc;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
@@ -146,9 +171,9 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha256.message_bit_length += (u64)(sctx->count * 8);
in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
- sctx->count, nx_ctx->ap->sglen);
+ sctx->count, max_sg_len);
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA256_DIGEST_SIZE,
- nx_ctx->ap->sglen);
+ max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -168,6 +193,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
&(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -177,6 +203,9 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct sha256_state *octx = out;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
octx->count = sctx->count +
(csbcpb->cpb.sha256.message_bit_length / 8);
@@ -199,6 +228,7 @@ static int nx_sha256_export(struct shash_desc *desc, void *out)
octx->state[7] = SHA256_H7;
}
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
@@ -208,6 +238,9 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
const struct sha256_state *ictx = in;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
@@ -222,6 +255,7 @@ static int nx_sha256_import(struct shash_desc *desc, const void *in)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
}
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 08eee1122349..4ae5b0f221d5 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -55,73 +55,93 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg;
- u64 to_process, leftover, spbc_bits;
+ u64 to_process, leftover, total, spbc_bits;
+ u32 max_sg_len;
+ unsigned long irq_flags;
int rc = 0;
- if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
- /* we've hit the nx chip previously and we're updating again,
- * so copy over the partial digest */
- memcpy(csbcpb->cpb.sha512.input_partial_digest,
- csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
- }
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
/* 2 cases for total data len:
- * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
- * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
+ * 1: < SHA512_BLOCK_SIZE: copy into state, return 0
+ * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
+ total = sctx->count[0] + len;
+ if (total < SHA512_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count[0], data, len);
sctx->count[0] += len;
goto out;
}
- /* to_process: the SHA512_BLOCK_SIZE data chunk to process in this
- * update */
- to_process = (sctx->count[0] + len) & ~(SHA512_BLOCK_SIZE - 1);
- leftover = (sctx->count[0] + len) & (SHA512_BLOCK_SIZE - 1);
-
- if (sctx->count[0]) {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buf,
- sctx->count[0], nx_ctx->ap->sglen);
- in_sg = nx_build_sg_list(in_sg, (u8 *)data,
+ in_sg = nx_ctx->in_sg;
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
+ nx_ctx->ap->sglen);
+
+ do {
+ /*
+ * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+ * this update. This value is also restricted by the sg list
+ * limits.
+ */
+ to_process = min_t(u64, total, nx_ctx->ap->databytelen);
+ to_process = min_t(u64, to_process,
+ NX_PAGE_SIZE * (max_sg_len - 1));
+ to_process = to_process & ~(SHA512_BLOCK_SIZE - 1);
+ leftover = total - to_process;
+
+ if (sctx->count[0]) {
+ in_sg = nx_build_sg_list(nx_ctx->in_sg,
+ (u8 *) sctx->buf,
+ sctx->count[0], max_sg_len);
+ }
+ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
to_process - sctx->count[0],
- nx_ctx->ap->sglen);
- nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
- sizeof(struct nx_sg);
- } else {
- in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)data,
- to_process, nx_ctx->ap->sglen);
+ max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
sizeof(struct nx_sg);
- }
- NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
+ /*
+ * we've hit the nx chip previously and we're updating
+ * again, so copy over the partial digest.
+ */
+ memcpy(csbcpb->cpb.sha512.input_partial_digest,
+ csbcpb->cpb.sha512.message_digest,
+ SHA512_DIGEST_SIZE);
+ }
- if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
- rc = -EINVAL;
- goto out;
- }
-
- rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
- desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
- if (rc)
- goto out;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
+ desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP);
+ if (rc)
+ goto out;
+
+ atomic_inc(&(nx_ctx->stats->sha512_ops));
+ spbc_bits = csbcpb->cpb.sha512.spbc * 8;
+ csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
+ if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
+ csbcpb->cpb.sha512.message_bit_length_hi++;
+
+ /* everything after the first update is continuation */
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
- atomic_inc(&(nx_ctx->stats->sha512_ops));
+ total -= to_process;
+ data += to_process - sctx->count[0];
+ sctx->count[0] = 0;
+ in_sg = nx_ctx->in_sg;
+ } while (leftover >= SHA512_BLOCK_SIZE);
/* copy the leftover back into the state struct */
if (leftover)
- memcpy(sctx->buf, data + len - leftover, leftover);
+ memcpy(sctx->buf, data, leftover);
sctx->count[0] = leftover;
-
- spbc_bits = csbcpb->cpb.sha512.spbc * 8;
- csbcpb->cpb.sha512.message_bit_length_lo += spbc_bits;
- if (csbcpb->cpb.sha512.message_bit_length_lo < spbc_bits)
- csbcpb->cpb.sha512.message_bit_length_hi++;
-
- /* everything after the first update is continuation */
- NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -131,9 +151,15 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct nx_sg *in_sg, *out_sg;
+ u32 max_sg_len;
u64 count0;
+ unsigned long irq_flags;
int rc;
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+ max_sg_len = min_t(u32, nx_driver.of.max_sg_len, nx_ctx->ap->sglen);
+
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
@@ -152,9 +178,9 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
csbcpb->cpb.sha512.message_bit_length_hi++;
in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, sctx->count[0],
- nx_ctx->ap->sglen);
+ max_sg_len);
out_sg = nx_build_sg_list(nx_ctx->out_sg, out, SHA512_DIGEST_SIZE,
- nx_ctx->ap->sglen);
+ max_sg_len);
nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
@@ -174,6 +200,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return rc;
}
@@ -183,6 +210,9 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
struct sha512_state *octx = out;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
/* move message_bit_length (128 bits) into count and convert its value
* to bytes */
@@ -214,6 +244,7 @@ static int nx_sha512_export(struct shash_desc *desc, void *out)
octx->state[7] = SHA512_H7;
}
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
@@ -223,6 +254,9 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
const struct sha512_state *ictx = in;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
sctx->count[0] = ictx->count[0] & 0x3f;
@@ -240,6 +274,7 @@ static int nx_sha512_import(struct shash_desc *desc, const void *in)
NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
}
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
return 0;
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index bbdab6e5ccf0..5533fe31c90d 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -61,8 +61,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
do {
rc = vio_h_cop_sync(viodev, op);
- } while ((rc == -EBUSY && !may_sleep && retries--) ||
- (rc == -EBUSY && may_sleep && cond_resched()));
+ } while (rc == -EBUSY && !may_sleep && retries--);
if (rc) {
dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d "
@@ -114,13 +113,29 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
* have been described (or @sgmax elements have been written), the
* loop ends. min_t is used to ensure @end_addr falls on the same page
* as sg_addr, if not, we need to create another nx_sg element for the
- * data on the next page */
+ * data on the next page.
+ *
+ * Also when using vmalloc'ed data, every time that a system page
+ * boundary is crossed the physical address needs to be re-calculated.
+ */
for (sg = sg_head; sg_len < len; sg++) {
+ u64 next_page;
+
sg->addr = sg_addr;
- sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr);
- sg->len = sg_addr - sg->addr;
+ sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE),
+ end_addr);
+
+ next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE;
+ sg->len = min_t(u64, sg_addr, next_page) - sg->addr;
sg_len += sg->len;
+ if (sg_addr >= next_page &&
+ is_vmalloc_addr(start_addr + sg_len)) {
+ sg_addr = page_to_phys(vmalloc_to_page(
+ start_addr + sg_len));
+ end_addr = sg_addr + len - sg_len;
+ }
+
if ((sg - sg_head) == sgmax) {
pr_err("nx: scatter/gather list overflow, pid: %d\n",
current->pid);
@@ -196,6 +211,8 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* @dst: destination scatterlist
* @src: source scatterlist
* @nbytes: length of data described in the scatterlists
+ * @offset: number of bytes to fast-forward past at the beginning of
+ * scatterlists.
* @iv: destination for the iv data, if the algorithm requires it
*
* This is common code shared by all the AES algorithms. It uses the block
@@ -207,6 +224,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes,
+ unsigned int offset,
u8 *iv)
{
struct nx_sg *nx_insg = nx_ctx->in_sg;
@@ -215,8 +233,10 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
if (iv)
memcpy(iv, desc->info, AES_BLOCK_SIZE);
- nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
- nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
+ nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
+ offset, nbytes);
+ nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
+ offset, nbytes);
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
@@ -235,6 +255,7 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
*/
void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function)
{
+ spin_lock_init(&nx_ctx->lock);
memset(nx_ctx->kmem, 0, nx_ctx->kmem_len);
nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT;
diff --git a/drivers/crypto/nx/nx.h b/drivers/crypto/nx/nx.h
index 3232b182dd28..befda07ca1da 100644
--- a/drivers/crypto/nx/nx.h
+++ b/drivers/crypto/nx/nx.h
@@ -117,6 +117,7 @@ struct nx_ctr_priv {
};
struct nx_crypto_ctx {
+ spinlock_t lock; /* synchronize access to the context */
void *kmem; /* unaligned, kmalloc'd buffer */
size_t kmem_len; /* length of kmem */
struct nx_csbcpb *csbcpb; /* aligned page given to phyp @ hcall time */
@@ -155,7 +156,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int, u32);
int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
struct scatterlist *, struct scatterlist *, unsigned int,
- u8 *);
+ unsigned int, u8 *);
struct nx_sg *nx_walk_and_build(struct nx_sg *, unsigned int,
struct scatterlist *, unsigned int,
unsigned int);