summaryrefslogtreecommitdiff
path: root/drivers/crypto/nx/nx.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 13:33:26 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 13:33:26 -0800
commite3aa91a7cb21a595169b20c64f63ca39a91a0c43 (patch)
tree6a92a2e595629949a45336c770c2408abba8444d /drivers/crypto/nx/nx.c
parent78a45c6f067824cf5d0a9fedea7339ac2e28603c (diff)
parent8606813a6c8997fd3bb805186056d78670eb86ca (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu: - The crypto API is now documented :) - Disallow arbitrary module loading through crypto API. - Allow get request with empty driver name through crypto_user. - Allow speed testing of arbitrary hash functions. - Add caam support for ctr(aes), gcm(aes) and their derivatives. - nx now supports concurrent hashing properly. - Add sahara support for SHA1/256. - Add ARM64 version of CRC32. - Misc fixes. * git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (77 commits) crypto: tcrypt - Allow speed testing of arbitrary hash functions crypto: af_alg - add user space interface for AEAD crypto: qat - fix problem with coalescing enable logic crypto: sahara - add support for SHA1/256 crypto: sahara - replace tasklets with kthread crypto: sahara - add support for i.MX53 crypto: sahara - fix spinlock initialization crypto: arm - replace memset by memzero_explicit crypto: powerpc - replace memset by memzero_explicit crypto: sha - replace memset by memzero_explicit crypto: sparc - replace memset by memzero_explicit crypto: algif_skcipher - initialize upon init request crypto: algif_skcipher - removed unneeded code crypto: algif_skcipher - Fixed blocking recvmsg crypto: drbg - use memzero_explicit() for clearing sensitive data crypto: drbg - use MODULE_ALIAS_CRYPTO crypto: include crypto- module prefix in template crypto: user - add MODULE_ALIAS crypto: sha-mb - remove a bogus NULL check crytpo: qat - Fix 64 bytes requests ...
Diffstat (limited to 'drivers/crypto/nx/nx.c')
-rw-r--r--drivers/crypto/nx/nx.c127
1 files changed, 108 insertions, 19 deletions
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index 5533fe31c90d..a392465d3e3f 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -90,7 +90,7 @@ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx,
*/
struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
u8 *start_addr,
- unsigned int len,
+ unsigned int *len,
u32 sgmax)
{
unsigned int sg_len = 0;
@@ -106,7 +106,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
else
sg_addr = __pa(sg_addr);
- end_addr = sg_addr + len;
+ end_addr = sg_addr + *len;
/* each iteration will write one struct nx_sg element and add the
* length of data described by that element to sg_len. Once @len bytes
@@ -118,7 +118,7 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
* Also when using vmalloc'ed data, every time that a system page
* boundary is crossed the physical address needs to be re-calculated.
*/
- for (sg = sg_head; sg_len < len; sg++) {
+ for (sg = sg_head; sg_len < *len; sg++) {
u64 next_page;
sg->addr = sg_addr;
@@ -133,15 +133,17 @@ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head,
is_vmalloc_addr(start_addr + sg_len)) {
sg_addr = page_to_phys(vmalloc_to_page(
start_addr + sg_len));
- end_addr = sg_addr + len - sg_len;
+ end_addr = sg_addr + *len - sg_len;
}
if ((sg - sg_head) == sgmax) {
pr_err("nx: scatter/gather list overflow, pid: %d\n",
current->pid);
- return NULL;
+ sg++;
+ break;
}
}
+ *len = sg_len;
/* return the moved sg_head pointer */
return sg;
@@ -160,11 +162,11 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
unsigned int sglen,
struct scatterlist *sg_src,
unsigned int start,
- unsigned int src_len)
+ unsigned int *src_len)
{
struct scatter_walk walk;
struct nx_sg *nx_sg = nx_dst;
- unsigned int n, offset = 0, len = src_len;
+ unsigned int n, offset = 0, len = *src_len;
char *dst;
/* we need to fast forward through @start bytes first */
@@ -182,27 +184,101 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
* element we're currently looking at */
scatterwalk_advance(&walk, start - offset);
- while (len && nx_sg) {
+ while (len && (nx_sg - nx_dst) < sglen) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
- scatterwalk_start(&walk, sg_next(walk.sg));
+ /* In cases where we have scatterlist chain scatterwalk_sg_next
+ * handles with it properly */
+ scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
dst = scatterwalk_map(&walk);
- nx_sg = nx_build_sg_list(nx_sg, dst, n, sglen);
+ nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst));
len -= n;
scatterwalk_unmap(dst);
scatterwalk_advance(&walk, n);
scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len);
}
+ /* update to_process */
+ *src_len -= len;
/* return the moved destination pointer */
return nx_sg;
}
/**
+ * trim_sg_list - ensures the bound in sg list.
+ * @sg: sg list head
+ * @end: sg lisg end
+ * @delta: is the amount we need to crop in order to bound the list.
+ *
+ */
+static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
+{
+ while (delta && end > sg) {
+ struct nx_sg *last = end - 1;
+
+ if (last->len > delta) {
+ last->len -= delta;
+ delta = 0;
+ } else {
+ end--;
+ delta -= last->len;
+ }
+ }
+ return (sg - end) * sizeof(struct nx_sg);
+}
+
+/**
+ * nx_sha_build_sg_list - walk and build sg list to sha modes
+ * using right bounds and limits.
+ * @nx_ctx: NX crypto context for the lists we're building
+ * @nx_sg: current sg list in or out list
+ * @op_len: current op_len to be used in order to build a sg list
+ * @nbytes: number or bytes to be processed
+ * @offset: buf offset
+ * @mode: SHA256 or SHA512
+ */
+int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+ struct nx_sg *nx_in_outsg,
+ s64 *op_len,
+ unsigned int *nbytes,
+ u8 *offset,
+ u32 mode)
+{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
+ struct nx_sg *nx_insg = nx_in_outsg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+ nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+
+ switch (mode) {
+ case NX_DS_SHA256:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+ break;
+ case NX_DS_SHA512:
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+ break;
+ default:
+ return -EINVAL;
+ }
+ *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+
+ return 0;
+}
+
+/**
* nx_build_sg_lists - walk the input scatterlists and build arrays of NX
* scatterlists based on them.
*
@@ -223,26 +299,39 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src,
- unsigned int nbytes,
+ unsigned int *nbytes,
unsigned int offset,
u8 *iv)
{
+ unsigned int delta = 0;
+ unsigned int total = *nbytes;
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
+ unsigned int max_sg_len;
+
+ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
if (iv)
memcpy(iv, desc->info, AES_BLOCK_SIZE);
- nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src,
- offset, nbytes);
- nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst,
- offset, nbytes);
+ *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+
+ nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst,
+ offset, nbytes);
+ nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src,
+ offset, nbytes);
+
+ if (*nbytes < total)
+ delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1));
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
- nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
- nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
+ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
return 0;
}
@@ -540,10 +629,10 @@ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode)
/* we need an extra page for csbcpb_aead for these modes */
if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM)
- nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
else
- nx_ctx->kmem_len = (3 * NX_PAGE_SIZE) +
+ nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) +
sizeof(struct nx_csbcpb);
nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL);