summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-02-05 17:33:35 +0000
committerLinus Torvalds <torvalds@linux-foundation.org>2020-02-05 17:33:35 +0000
commitcfb4b571e8b56b65d1a893bda5153647fda823b9 (patch)
treed507f6ab8f0eb8bc52ad7b9371d442ecafef35d4
parent6992ca0dd017ebaa2bf8e9dcc49f1c3b7033b082 (diff)
parent55d0a513a0e202c68af2c8f4b1e923a345227bbb (diff)
Merge tag 's390-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull more s390 updates from Vasily Gorbik: "The second round of s390 fixes and features for 5.6: - Add KPROBES_ON_FTRACE support - Add EP11 AES secure keys support - PAES rework and prerequisites for paes-s390 ciphers selftests - Fix page table upgrade for hugetlbfs" * tag 's390-5.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/pkey/zcrypt: Support EP11 AES secure keys s390/zcrypt: extend EP11 card and queue sysfs attributes s390/zcrypt: add new low level ep11 functions support file s390/zcrypt: ep11 structs rework, export zcrypt_send_ep11_cprb s390/zcrypt: enable card/domain autoselect on ep11 cprbs s390/crypto: enable clear key values for paes ciphers s390/pkey: Add support for key blob with clear key value s390/crypto: Rework on paes implementation s390: support KPROBES_ON_FTRACE s390/mm: fix dynamic pagetable upgrade for hugetlbfs
-rw-r--r--Documentation/features/debug/kprobes-on-ftrace/arch-support.txt2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/crypto/paes_s390.c230
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/uapi/asm/pkey.h69
-rw-r--r--arch/s390/include/uapi/asm/zcrypt.h32
-rw-r--r--arch/s390/kernel/ftrace.c80
-rw-r--r--arch/s390/kernel/kprobes.c61
-rw-r--r--arch/s390/kernel/mcount.S6
-rw-r--r--arch/s390/mm/hugetlbpage.c100
-rw-r--r--drivers/s390/crypto/Makefile3
-rw-r--r--drivers/s390/crypto/pkey_api.c470
-rw-r--r--drivers/s390/crypto/zcrypt_api.c27
-rw-r--r--drivers/s390/crypto/zcrypt_api.h1
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.h1
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c273
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c1293
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.h124
19 files changed, 2494 insertions, 282 deletions
diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 4fae0464ddff..32b297295fff 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -24,7 +24,7 @@
| parisc: | ok |
| powerpc: | ok |
| riscv: | TODO |
- | s390: | TODO |
+ | s390: | ok |
| sh: | TODO |
| sparc: | TODO |
| um: | TODO |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 734996784d03..8abe77536d9d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -156,6 +156,7 @@ config S390
select HAVE_KERNEL_UNCOMPRESSED
select HAVE_KERNEL_XZ
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_KVM
select HAVE_LIVEPATCH
diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c
index e2a85783f804..f3caeb17c85b 100644
--- a/arch/s390/crypto/paes_s390.c
+++ b/arch/s390/crypto/paes_s390.c
@@ -5,7 +5,7 @@
* s390 implementation of the AES Cipher Algorithm with protected keys.
*
* s390 Version:
- * Copyright IBM Corp. 2017,2019
+ * Copyright IBM Corp. 2017,2020
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
*/
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <crypto/internal/skcipher.h>
#include <crypto/xts.h>
@@ -32,11 +33,11 @@
* is called. As paes can handle different kinds of key blobs
* and padding is also possible, the limits need to be generous.
*/
-#define PAES_MIN_KEYSIZE 64
-#define PAES_MAX_KEYSIZE 256
+#define PAES_MIN_KEYSIZE 16
+#define PAES_MAX_KEYSIZE 320
static u8 *ctrblk;
-static DEFINE_SPINLOCK(ctrblk_lock);
+static DEFINE_MUTEX(ctrblk_lock);
static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
@@ -53,19 +54,46 @@ struct key_blob {
unsigned int keylen;
};
-static inline int _copy_key_to_kb(struct key_blob *kb,
- const u8 *key,
- unsigned int keylen)
-{
- if (keylen <= sizeof(kb->keybuf))
+static inline int _key_to_kb(struct key_blob *kb,
+ const u8 *key,
+ unsigned int keylen)
+{
+ struct clearkey_header {
+ u8 type;
+ u8 res0[3];
+ u8 version;
+ u8 res1[3];
+ u32 keytype;
+ u32 len;
+ } __packed * h;
+
+ switch (keylen) {
+ case 16:
+ case 24:
+ case 32:
+ /* clear key value, prepare pkey clear key token in keybuf */
+ memset(kb->keybuf, 0, sizeof(kb->keybuf));
+ h = (struct clearkey_header *) kb->keybuf;
+ h->version = 0x02; /* TOKVER_CLEAR_KEY */
+ h->keytype = (keylen - 8) >> 3;
+ h->len = keylen;
+ memcpy(kb->keybuf + sizeof(*h), key, keylen);
+ kb->keylen = sizeof(*h) + keylen;
kb->key = kb->keybuf;
- else {
- kb->key = kmalloc(keylen, GFP_KERNEL);
- if (!kb->key)
- return -ENOMEM;
+ break;
+ default:
+ /* other key material, let pkey handle this */
+ if (keylen <= sizeof(kb->keybuf))
+ kb->key = kb->keybuf;
+ else {
+ kb->key = kmalloc(keylen, GFP_KERNEL);
+ if (!kb->key)
+ return -ENOMEM;
+ }
+ memcpy(kb->key, key, keylen);
+ kb->keylen = keylen;
+ break;
}
- memcpy(kb->key, key, keylen);
- kb->keylen = keylen;
return 0;
}
@@ -82,16 +110,18 @@ static inline void _free_kb_keybuf(struct key_blob *kb)
struct s390_paes_ctx {
struct key_blob kb;
struct pkey_protkey pk;
+ spinlock_t pk_lock;
unsigned long fc;
};
struct s390_pxts_ctx {
struct key_blob kb[2];
struct pkey_protkey pk[2];
+ spinlock_t pk_lock;
unsigned long fc;
};
-static inline int __paes_convert_key(struct key_blob *kb,
+static inline int __paes_keyblob2pkey(struct key_blob *kb,
struct pkey_protkey *pk)
{
int i, ret;
@@ -106,22 +136,18 @@ static inline int __paes_convert_key(struct key_blob *kb,
return ret;
}
-static int __paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
{
- unsigned long fc;
+ struct pkey_protkey pkey;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_keyblob2pkey(&ctx->kb, &pkey))
return -EINVAL;
- /* Pick the correct function code based on the protected key type */
- fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
- (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
-
- /* Check if the function code is available */
- ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(&ctx->pk, &pkey, sizeof(pkey));
+ spin_unlock_bh(&ctx->pk_lock);
- return ctx->fc ? 0 : -EINVAL;
+ return 0;
}
static int ecb_paes_init(struct crypto_skcipher *tfm)
@@ -129,6 +155,7 @@ static int ecb_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -140,6 +167,24 @@ static void ecb_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb);
}
+static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx)
+{
+ unsigned long fc;
+
+ if (__paes_convert_key(ctx))
+ return -EINVAL;
+
+ /* Pick the correct function code based on the protected key type */
+ fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 :
+ (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0;
+
+ /* Check if the function code is available */
+ ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
+
+ return ctx->fc ? 0 : -EINVAL;
+}
+
static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -147,11 +192,11 @@ static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
- rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+ rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
- return __paes_set_key(ctx);
+ return __ecb_paes_set_key(ctx);
}
static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
@@ -161,18 +206,31 @@ static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier)
struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret;
+ struct {
+ u8 key[MAXPROTKEYSIZE];
+ } param;
ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
- k = cpacf_km(ctx->fc | modifier, ctx->pk.protkey,
+ k = cpacf_km(ctx->fc | modifier, &param,
walk.dst.virt.addr, walk.src.virt.addr, n);
if (k)
ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
- if (__paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
@@ -210,6 +268,7 @@ static int cbc_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -221,11 +280,11 @@ static void cbc_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb);
}
-static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_convert_key(ctx))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -246,7 +305,7 @@ static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
- rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+ rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
@@ -268,8 +327,12 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
+
memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
+ spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
while ((nbytes = walk.nbytes) != 0) {
/* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
@@ -280,9 +343,11 @@ static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
- if (__cbc_paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
return ret;
@@ -322,6 +387,7 @@ static int xts_paes_init(struct crypto_skcipher *tfm)
ctx->kb[0].key = NULL;
ctx->kb[1].key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -334,12 +400,27 @@ static void xts_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb[1]);
}
-static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx)
+{
+ struct pkey_protkey pkey0, pkey1;
+
+ if (__paes_keyblob2pkey(&ctx->kb[0], &pkey0) ||
+ __paes_keyblob2pkey(&ctx->kb[1], &pkey1))
+ return -EINVAL;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(&ctx->pk[0], &pkey0, sizeof(pkey0));
+ memcpy(&ctx->pk[1], &pkey1, sizeof(pkey1));
+ spin_unlock_bh(&ctx->pk_lock);
+
+ return 0;
+}
+
+static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb[0], &ctx->pk[0]) ||
- __paes_convert_key(&ctx->kb[1], &ctx->pk[1]))
+ if (__xts_paes_convert_key(ctx))
return -EINVAL;
if (ctx->pk[0].type != ctx->pk[1].type)
@@ -371,10 +452,10 @@ static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
_free_kb_keybuf(&ctx->kb[0]);
_free_kb_keybuf(&ctx->kb[1]);
- rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len);
+ rc = _key_to_kb(&ctx->kb[0], in_key, key_len);
if (rc)
return rc;
- rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
+ rc = _key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
if (rc)
return rc;
@@ -416,15 +497,17 @@ static int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier)
ret = skcipher_walk_virt(&walk, req, false);
if (ret)
return ret;
+
keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64;
offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0;
-retry:
+
memset(&pcc_param, 0, sizeof(pcc_param));
memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
+ spin_lock_bh(&ctx->pk_lock);
memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen);
- cpacf_pcc(ctx->fc, pcc_param.key + offset);
-
memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen);
+ spin_unlock_bh(&ctx->pk_lock);
+ cpacf_pcc(ctx->fc, pcc_param.key + offset);
memcpy(xts_param.init, pcc_param.xts, 16);
while ((nbytes = walk.nbytes) != 0) {
@@ -435,11 +518,15 @@ retry:
if (k)
ret = skcipher_walk_done(&walk, nbytes - k);
if (k < n) {
- if (__xts_paes_set_key(ctx) != 0)
+ if (__xts_paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
- goto retry;
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(xts_param.key + offset,
+ ctx->pk[0].protkey, keylen);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
+
return ret;
}
@@ -476,6 +563,7 @@ static int ctr_paes_init(struct crypto_skcipher *tfm)
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
ctx->kb.key = NULL;
+ spin_lock_init(&ctx->pk_lock);
return 0;
}
@@ -487,11 +575,11 @@ static void ctr_paes_exit(struct crypto_skcipher *tfm)
_free_kb_keybuf(&ctx->kb);
}
-static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
{
unsigned long fc;
- if (__paes_convert_key(&ctx->kb, &ctx->pk))
+ if (__paes_convert_key(ctx))
return -EINVAL;
/* Pick the correct function code based on the protected key type */
@@ -513,7 +601,7 @@ static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm);
_free_kb_keybuf(&ctx->kb);
- rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
+ rc = _key_to_kb(&ctx->kb, in_key, key_len);
if (rc)
return rc;
@@ -543,49 +631,65 @@ static int ctr_paes_crypt(struct skcipher_request *req)
struct skcipher_walk walk;
unsigned int nbytes, n, k;
int ret, locked;
-
- locked = spin_trylock(&ctrblk_lock);
+ struct {
+ u8 key[MAXPROTKEYSIZE];
+ } param;
ret = skcipher_walk_virt(&walk, req, false);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
+
+ locked = mutex_trylock(&ctrblk_lock);
+
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE;
if (nbytes >= 2*AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk.iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
- k = cpacf_kmctr(ctx->fc, ctx->pk.protkey, walk.dst.virt.addr,
+ k = cpacf_kmctr(ctx->fc, &param, walk.dst.virt.addr,
walk.src.virt.addr, n, ctrptr);
if (k) {
if (ctrptr == ctrblk)
memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, nbytes - n);
+ ret = skcipher_walk_done(&walk, nbytes - k);
}
if (k < n) {
- if (__ctr_paes_set_key(ctx) != 0) {
+ if (__paes_convert_key(ctx)) {
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
return skcipher_walk_done(&walk, -EIO);
}
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
}
if (locked)
- spin_unlock(&ctrblk_lock);
+ mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
if (nbytes) {
while (1) {
- if (cpacf_kmctr(ctx->fc, ctx->pk.protkey, buf,
+ if (cpacf_kmctr(ctx->fc, &param, buf,
walk.src.virt.addr, AES_BLOCK_SIZE,
walk.iv) == AES_BLOCK_SIZE)
break;
- if (__ctr_paes_set_key(ctx) != 0)
+ if (__paes_convert_key(ctx))
return skcipher_walk_done(&walk, -EIO);
+ spin_lock_bh(&ctx->pk_lock);
+ memcpy(param.key, ctx->pk.protkey, MAXPROTKEYSIZE);
+ spin_unlock_bh(&ctx->pk_lock);
}
memcpy(walk.dst.virt.addr, buf, nbytes);
crypto_inc(walk.iv, AES_BLOCK_SIZE);
- ret = skcipher_walk_done(&walk, 0);
+ ret = skcipher_walk_done(&walk, nbytes);
}
return ret;
@@ -618,12 +722,12 @@ static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg)
static void paes_s390_fini(void)
{
- if (ctrblk)
- free_page((unsigned long) ctrblk);
__crypto_unregister_skcipher(&ctr_paes_alg);
__crypto_unregister_skcipher(&xts_paes_alg);
__crypto_unregister_skcipher(&cbc_paes_alg);
__crypto_unregister_skcipher(&ecb_paes_alg);
+ if (ctrblk)
+ free_page((unsigned long) ctrblk);
}
static int __init paes_s390_init(void)
@@ -661,14 +765,14 @@ static int __init paes_s390_init(void)
if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) ||
cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) {
- ret = crypto_register_skcipher(&ctr_paes_alg);
- if (ret)
- goto out_err;
ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
if (!ctrblk) {
ret = -ENOMEM;
goto out_err;
}
+ ret = crypto_register_skcipher(&ctr_paes_alg);
+ if (ret)
+ goto out_err;
}
return 0;
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index b106aa29bf55..09cdb632a490 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -54,7 +54,6 @@ typedef u16 kprobe_opcode_t;
struct arch_specific_insn {
/* copy of original instruction */
kprobe_opcode_t *insn;
- unsigned int is_ftrace_insn : 1;
};
struct prev_kprobe {
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index a4d38092530a..85e944f04c70 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -33,6 +33,8 @@
#define ARCH_HAS_PREPARE_HUGEPAGE
#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+
#include <asm/setup.h>
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/uapi/asm/pkey.h b/arch/s390/include/uapi/asm/pkey.h
index e22f0720bbb8..d27d7d329263 100644
--- a/arch/s390/include/uapi/asm/pkey.h
+++ b/arch/s390/include/uapi/asm/pkey.h
@@ -25,10 +25,11 @@
#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */
+#define MINEP11AESKEYBLOBSIZE 256 /* min EP11 AES key blob size */
+#define MAXEP11AESKEYBLOBSIZE 320 /* max EP11 AES key blob size */
-/* Minimum and maximum size of a key blob */
+/* Minimum size of a key blob */
#define MINKEYBLOBSIZE SECKEYBLOBSIZE
-#define MAXKEYBLOBSIZE MAXAESCIPHERKEYSIZE
/* defines for the type field within the pkey_protkey struct */
#define PKEY_KEYTYPE_AES_128 1
@@ -39,6 +40,7 @@
enum pkey_key_type {
PKEY_TYPE_CCA_DATA = (__u32) 1,
PKEY_TYPE_CCA_CIPHER = (__u32) 2,
+ PKEY_TYPE_EP11 = (__u32) 3,
};
/* the newer ioctls use a pkey_key_size enum for key size information */
@@ -200,7 +202,7 @@ struct pkey_kblob2pkey {
/*
* Generate secure key, version 2.
- * Generate either a CCA AES secure key or a CCA AES cipher key.
+ * Generate CCA AES secure key, CCA AES cipher key or EP11 AES secure key.
* There needs to be a list of apqns given with at least one entry in there.
* All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
* is not supported. The implementation walks through the list of apqns and
@@ -210,10 +212,13 @@ struct pkey_kblob2pkey {
* (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
* generate a list of apqns based on the key type to generate.
* The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
*/
struct pkey_genseck2 {
struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/
@@ -229,8 +234,8 @@ struct pkey_genseck2 {
/*
* Generate secure key from clear key value, version 2.
- * Construct a CCA AES secure key or CCA AES cipher key from a given clear key
- * value.
+ * Construct an CCA AES secure key, CCA AES cipher key or EP11 AES secure
+ * key from a given clear key value.
* There needs to be a list of apqns given with at least one entry in there.
* All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
* is not supported. The implementation walks through the list of apqns and
@@ -240,10 +245,13 @@ struct pkey_genseck2 {
* (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
* generate a list of apqns based on the key type to generate.
* The keygenflags argument is passed to the low level generation functions
- * individual for the key type and has a key type specific meaning. Currently
- * only CCA AES cipher keys react to this parameter: Use one or more of the
- * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
- * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * individual for the key type and has a key type specific meaning. When
+ * generating CCA cipher keys you can use one or more of the PKEY_KEYGEN_*
+ * flags to widen the export possibilities. By default a cipher key is
+ * only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
+ * The keygenflag argument for generating an EP11 AES key should either be 0
+ * to use the defaults which are XCP_BLOB_ENCRYPT, XCP_BLOB_DECRYPT and
+ * XCP_BLOB_PROTKEY_EXTRACTABLE or a valid combination of XCP_BLOB_* flags.
*/
struct pkey_clr2seck2 {
struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
@@ -266,14 +274,19 @@ struct pkey_clr2seck2 {
* with one apqn able to handle this key.
* The function also checks for the master key verification patterns
* of the key matching to the current or alternate mkvp of the apqn.
- * Currently CCA AES secure keys and CCA AES cipher keys are supported.
- * The flags field is updated with some additional info about the apqn mkvp
+ * For CCA AES secure keys and CCA AES cipher keys this means to check
+ * the key's mkvp against the current or old mkvp of the apqns. The flags
+ * field is updated with some additional info about the apqn mkvp
* match: If the current mkvp matches to the key's mkvp then the
* PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to
* the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the
* alternate mkvp is the old master key verification pattern.
* CCA AES secure keys are also checked to have the CPACF export allowed
* bit enabled (XPRTCPAC) in the kmf1 field.
+ * EP11 keys are also supported and the wkvp of the key is checked against
+ * the current wkvp of the apqns. There is no alternate for this type of
+ * key and so on a match the flag PKEY_FLAGS_MATCH_CUR_MKVP always is set.
+ * EP11 keys are also checked to have XCP_BLOB_PROTKEY_EXTRACTABLE set.
* The ioctl returns 0 as long as the given or found apqn matches to
* matches with the current or alternate mkvp to the key's mkvp. If the given
* apqn does not match or there is no such apqn found, -1 with errno
@@ -313,16 +326,20 @@ struct pkey_kblob2pkey2 {
/*
* Build a list of APQNs based on a key blob given.
* Is able to find out which type of secure key is given (CCA AES secure
- * key or CCA AES cipher key) and tries to find all matching crypto cards
- * based on the MKVP and maybe other criterias (like CCA AES cipher keys
- * need a CEX5C or higher). The list of APQNs is further filtered by the key's
- * mkvp which needs to match to either the current mkvp or the alternate mkvp
- * (which is the old mkvp on CCA adapters) of the apqns. The flags argument may
- * be used to limit the matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is
- * given, only the current mkvp of each apqn is compared. Likewise with the
- * PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to
- * return apqns where either the current or the alternate mkvp
+ * key, CCA AES cipher key or EP11 AES key) and tries to find all matching
+ * crypto cards based on the MKVP and maybe other criterias (like CCA AES
+ * cipher keys need a CEX5C or higher, EP11 keys with BLOB_PKEY_EXTRACTABLE
+ * need a CEX7 and EP11 api version 4). The list of APQNs is further filtered
+ * by the key's mkvp which needs to match to either the current mkvp (CCA and
+ * EP11) or the alternate mkvp (old mkvp, CCA adapters only) of the apqns. The
+ * flags argument may be used to limit the matching apqns. If the
+ * PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current mkvp of each apqn is
+ * compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it
+ * is assumed to return apqns where either the current or the alternate mkvp
* matches. At least one of the matching flags needs to be given.
+ * The flags argument for EP11 keys has no further action and is currently
+ * ignored (but needs to be given as PKEY_FLAGS_MATCH_CUR_MKVP) as there is only
+ * the wkvp from the key to match against the apqn's wkvp.
* The list of matching apqns is stored into the space given by the apqns
* argument and the number of stored entries goes into apqn_entries. If the list
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
@@ -356,6 +373,10 @@ struct pkey_apqns4key {
* If both are given, it is assumed to return apqns where either the
* current or the alternate mkvp matches. If no match flag is given
* (flags is 0) the mkvp values are ignored for the match process.
+ * For EP11 keys there is only the current wkvp. So if the apqns should also
+ * match to a given wkvp, then the PKEY_FLAGS_MATCH_CUR_MKVP flag should be
+ * set. The wkvp value is 32 bytes but only the leftmost 16 bytes are compared
+ * against the leftmost 16 byte of the wkvp of the apqn.
* The list of matching apqns is stored into the space given by the apqns
* argument and the number of stored entries goes into apqn_entries. If the list
* is empty (apqn_entries is 0) the apqn_entries field is updated to the number
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index f9e5e1f0821d..5a2177e96e88 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -161,17 +161,17 @@ struct ica_xcRB {
* @payload_len: Payload length
*/
struct ep11_cprb {
- __u16 cprb_len;
- unsigned char cprb_ver_id;
- unsigned char pad_000[2];
- unsigned char flags;
- unsigned char func_id[2];
- __u32 source_id;
- __u32 target_id;
- __u32 ret_code;
- __u32 reserved1;
- __u32 reserved2;
- __u32 payload_len;
+ __u16 cprb_len;
+ __u8 cprb_ver_id;
+ __u8 pad_000[2];
+ __u8 flags;
+ __u8 func_id[2];
+ __u32 source_id;
+ __u32 target_id;
+ __u32 ret_code;
+ __u32 reserved1;
+ __u32 reserved2;
+ __u32 payload_len;
} __attribute__((packed));
/**
@@ -197,13 +197,13 @@ struct ep11_target_dev {
*/
struct ep11_urb {
__u16 targets_num;
- __u64 targets;
+ __u8 __user *targets;
__u64 weight;
__u64 req_no;
__u64 req_len;
- __u64 req;
+ __u8 __user *req;
__u64 resp_len;
- __u64 resp;
+ __u8 __user *resp;
} __attribute__((packed));
/**
@@ -237,7 +237,9 @@ struct zcrypt_device_matrix_ext {
struct zcrypt_device_status_ext device[MAX_ZDEV_ENTRIES_EXT];
};
-#define AUTOSELECT 0xFFFFFFFF
+#define AUTOSELECT 0xFFFFFFFF
+#define AUTOSEL_AP ((__u16) 0xFFFF)
+#define AUTOSEL_DOM ((__u16) 0xFFFF)
#define ZCRYPT_IOCTL_MAGIC 'z'
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 1bb85f60c0dd..4cd9b1ada834 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -72,15 +72,6 @@ static inline void ftrace_generate_orig_insn(struct ftrace_insn *insn)
#endif
}
-static inline int is_kprobe_on_ftrace(struct ftrace_insn *insn)
-{
-#ifdef CONFIG_KPROBES
- if (insn->opc == BREAKPOINT_INSTRUCTION)
- return 1;
-#endif
- return 0;
-}
-
static inline void ftrace_generate_kprobe_nop_insn(struct ftrace_insn *insn)
{
#ifdef CONFIG_KPROBES
@@ -114,16 +105,6 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
/* Initial code replacement */
ftrace_generate_orig_insn(&orig);
ftrace_generate_nop_insn(&new);
- } else if (is_kprobe_on_ftrace(&old)) {
- /*
- * If we find a breakpoint instruction, a kprobe has been
- * placed at the beginning of the function. We write the
- * constant KPROBE_ON_FTRACE_NOP into the remaining four
- * bytes of the original instruction so that the kprobes
- * handler can execute a nop, if it reaches this breakpoint.
- */
- ftrace_generate_kprobe_call_insn(&orig);
- ftrace_generate_kprobe_nop_insn(&new);
} else {
/* Replace ftrace call with a nop. */
ftrace_generate_call_insn(&orig, rec->ip);
@@ -142,21 +123,10 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (probe_kernel_read(&old, (void *) rec->ip, sizeof(old)))
return -EFAULT;
- if (is_kprobe_on_ftrace(&old)) {
- /*
- * If we find a breakpoint instruction, a kprobe has been
- * placed at the beginning of the function. We write the
- * constant KPROBE_ON_FTRACE_CALL into the remaining four
- * bytes of the original instruction so that the kprobes
- * handler can execute a brasl if it reaches this breakpoint.
- */
- ftrace_generate_kprobe_nop_insn(&orig);
- ftrace_generate_kprobe_call_insn(&new);
- } else {
- /* Replace nop with an ftrace call. */
- ftrace_generate_nop_insn(&orig);
- ftrace_generate_call_insn(&new, rec->ip);
- }
+ /* Replace nop with an ftrace call. */
+ ftrace_generate_nop_insn(&orig);
+ ftrace_generate_call_insn(&new, rec->ip);
+
/* Verify that the to be replaced code matches what we expect. */
if (memcmp(&orig, &old, sizeof(old)))
return -EINVAL;
@@ -241,3 +211,45 @@ int ftrace_disable_ftrace_graph_caller(void)
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb;
+ struct kprobe *p = get_kprobe((kprobe_opcode_t *)ip);
+
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ return;
+ }
+
+ __this_cpu_write(current_kprobe, p);
+
+ kcb = get_kprobe_ctlblk();
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ instruction_pointer_set(regs, ip);
+
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+
+ instruction_pointer_set(regs, ip + MCOUNT_INSN_SIZE);
+
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ }
+ __this_cpu_write(current_kprobe, NULL);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ return 0;
+}
+#endif
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 6f1388391620..548d0ea9808d 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -56,21 +56,10 @@ struct kprobe_insn_cache kprobe_s390_insn_slots = {
static void copy_instruction(struct kprobe *p)
{
- unsigned long ip = (unsigned long) p->addr;
s64 disp, new_disp;
u64 addr, new_addr;
- if (ftrace_location(ip) == ip) {
- /*
- * If kprobes patches the instruction that is morphed by
- * ftrace make sure that kprobes always sees the branch
- * "jg .+24" that skips the mcount block or the "brcl 0,0"
- * in case of hotpatch.
- */
- ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
- p->ainsn.is_ftrace_insn = 1;
- } else
- memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
+ memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
p->opcode = p->ainsn.insn[0];
if (!probe_is_insn_relative_long(p->ainsn.insn))
return;
@@ -136,11 +125,6 @@ int arch_prepare_kprobe(struct kprobe *p)
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
-int arch_check_ftrace_location(struct kprobe *p)
-{
- return 0;
-}
-
struct swap_insn_args {
struct kprobe *p;
unsigned int arm_kprobe : 1;
@@ -149,28 +133,11 @@ struct swap_insn_args {
static int swap_instruction(void *data)
{
struct swap_insn_args *args = data;
- struct ftrace_insn new_insn, *insn;
struct kprobe *p = args->p;
- size_t len;
-
- new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
- len = sizeof(new_insn.opc);
- if (!p->ainsn.is_ftrace_insn)
- goto skip_ftrace;
- len = sizeof(new_insn);
- insn = (struct ftrace_insn *) p->addr;
- if (args->arm_kprobe) {
- if (is_ftrace_nop(insn))
- new_insn.disp = KPROBE_ON_FTRACE_NOP;
- else
- new_insn.disp = KPROBE_ON_FTRACE_CALL;
- } else {
- ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
- if (insn->disp == KPROBE_ON_FTRACE_NOP)
- ftrace_generate_nop_insn(&new_insn);
- }
-skip_ftrace:
- s390_kernel_write(p->addr, &new_insn, len);
+ u16 opc;
+
+ opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
+ s390_kernel_write(p->addr, &opc, sizeof(opc));
return 0;
}
NOKPROBE_SYMBOL(swap_instruction);
@@ -464,24 +431,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
unsigned long ip = regs->psw.addr;
int fixup = probe_get_fixup_type(p->ainsn.insn);
- /* Check if the kprobes location is an enabled ftrace caller */
- if (p->ainsn.is_ftrace_insn) {
- struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
- struct ftrace_insn call_insn;
-
- ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
- /*
- * A kprobe on an enabled ftrace call site actually single
- * stepped an unconditional branch (ftrace nop equivalent).
- * Now we need to fixup things and pretend that a brasl r0,...
- * was executed instead.
- */
- if (insn->disp == KPROBE_ON_FTRACE_CALL) {
- ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
- regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
- }
- }
-
if (fixup & FIXUP_PSW_NORMAL)
ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index f942341429b1..7458dcfd6464 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -42,6 +42,9 @@ ENTRY(ftrace_caller)
.globl ftrace_regs_caller
.set ftrace_regs_caller,ftrace_caller
stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
+ lghi %r14,0 # save condition code
+ ipm %r14 # don't put any instructions
+ sllg %r14,%r14,16 # clobbering CC before this point
lgr %r1,%r15
#if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
aghi %r0,MCOUNT_RETURN_FIXUP
@@ -54,6 +57,9 @@ ENTRY(ftrace_caller)
# allocate pt_regs and stack frame for ftrace_trace_function
aghi %r15,-STACK_FRAME_SIZE
stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
+ stg %r14,(STACK_PTREGS_PSW)(%r15)
+ lg %r14,(__SF_GPRS+8*8)(%r1) # restore original return address
+ stosm (STACK_PTREGS_PSW)(%r15),0
aghi %r1,-TRACED_FUNC_FRAME_SIZE
stg %r1,__SF_BACKCHAIN(%r15)
stg %r0,(STACK_PTREGS_PSW+8)(%r15)
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index b0246c705a19..5674710a4841 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -2,7 +2,7 @@
/*
* IBM System z Huge TLB Page Support for Kernel.
*
- * Copyright IBM Corp. 2007,2016
+ * Copyright IBM Corp. 2007,2020
* Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
*/
@@ -11,6 +11,9 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/security.h>
/*
* If the bit selected by single-bit bitmask "a" is set within "x", move
@@ -267,3 +270,98 @@ static __init int setup_hugepagesz(char *opt)
return 1;
}
__setup("hugepagesz=", setup_hugepagesz);
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
+ unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+ struct vm_unmapped_area_info info;
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = current->mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
+}
+
+static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
+ unsigned long addr0, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+ struct vm_unmapped_area_info info;
+ unsigned long addr;
+
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.length = len;
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = current->mm->mmap_base;
+ info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+ info.align_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ if (addr & ~PAGE_MASK) {
+ VM_BUG_ON(addr != -ENOMEM);
+ info.flags = 0;
+ info.low_limit = TASK_UNMAPPED_BASE;
+ info.high_limit = TASK_SIZE;
+ addr = vm_unmapped_area(&info);
+ }
+
+ return addr;
+}
+
+unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ int rc;
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (len > TASK_SIZE - mmap_min_addr)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ if (prepare_hugepage_range(file, addr, len))
+ return -EINVAL;
+ goto check_asce_limit;
+ }
+
+ if (addr) {
+ addr = ALIGN(addr, huge_page_size(h));
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ goto check_asce_limit;
+ }
+
+ if (mm->get_unmapped_area == arch_get_unmapped_area)
+ addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
+ pgoff, flags);
+ else
+ addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
+ pgoff, flags);
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+check_asce_limit:
+ if (addr + len > current->mm->context.asce_limit &&
+ addr + len <= TASK_SIZE) {
+ rc = crst_table_upgrade(mm, addr + len);
+ if (rc)
+ return (unsigned long) rc;
+ }
+ return addr;
+}
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 52aa95c8af4b..22d2db690cd3 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -7,7 +7,8 @@ ap-objs := ap_bus.o ap_card.o ap_queue.o
obj-$(subst m,y,$(CONFIG_ZCRYPT)) += ap.o
# zcrypt_api.o and zcrypt_msgtype*.o depend on ap.o
zcrypt-objs := zcrypt_api.o zcrypt_card.o zcrypt_queue.o
-zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o zcrypt_ccamisc.o
+zcrypt-objs += zcrypt_msgtype6.o zcrypt_msgtype50.o
+zcrypt-objs += zcrypt_ccamisc.o zcrypt_ep11misc.o
obj-$(CONFIG_ZCRYPT) += zcrypt.o
# adapter drivers depend on ap.o and zcrypt.o
obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index d78d77686d7b..71dae64ba994 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -25,6 +25,7 @@
#include "zcrypt_api.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("IBM Corporation");
@@ -71,6 +72,17 @@ struct protaeskeytoken {
u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
} __packed;
+/* inside view of a clear key token (type 0x00 version 0x02) */
+struct clearaeskeytoken {
+ u8 type; /* 0x00 for PAES specific key tokens */
+ u8 res0[3];
+ u8 version; /* 0x02 for clear AES key token */
+ u8 res1[3];
+ u32 keytype; /* key type, one of the PKEY_KEYTYPE values */
+ u32 len; /* bytes actually stored in clearkey[] */
+ u8 clearkey[0]; /* clear key value */
+} __packed;
+
/*
* Create a protected key from a clear key value.
*/
@@ -173,6 +185,72 @@ static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
}
/*
+ * Construct EP11 key with given clear key value.
+ */
+static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
+ u8 *keybuf, size_t *keybuflen)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+
+ /* build a list of apqns suitable for ep11 keys with cpacf support */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to bild an ep11 key */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_clr2keyblob(card, dom, clrkeylen * 8,
+ 0, clrkey, keybuf, keybuflen);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
+ * Find card and transform EP11 secure key into protected key.
+ */
+static int pkey_ep11key2pkey(const u8 *key, struct pkey_protkey *pkey)
+{
+ int i, rc;
+ u16 card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len, &pkey->type);
+ if (rc == 0)
+ break;
+ }
+
+out:
+ kfree(apqns);
+ if (rc)
+ DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+/*
* Verify key and give back some info about the key.
*/
static int pkey_verifykey(const struct pkey_seckey *seckey,
@@ -305,26 +383,90 @@ static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
struct pkey_protkey *protkey)
{
+ int rc = -EINVAL;
+ u8 *tmpbuf = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- struct protaeskeytoken *t;
switch (hdr->version) {
- case TOKVER_PROTECTED_KEY:
- if (keylen != sizeof(struct protaeskeytoken))
- return -EINVAL;
+ case TOKVER_PROTECTED_KEY: {
+ struct protaeskeytoken *t;
+ if (keylen != sizeof(struct protaeskeytoken))
+ goto out;
t = (struct protaeskeytoken *)key;
protkey->len = t->len;
protkey->type = t->keytype;
memcpy(protkey->protkey, t->protkey,
sizeof(protkey->protkey));
-
- return pkey_verifyprotkey(protkey);
+ rc = pkey_verifyprotkey(protkey);
+ break;
+ }
+ case TOKVER_CLEAR_KEY: {
+ struct clearaeskeytoken *t;
+ struct pkey_clrkey ckey;
+ union u_tmpbuf {
+ u8 skey[SECKEYBLOBSIZE];
+ u8 ep11key[MAXEP11AESKEYBLOBSIZE];
+ };
+ size_t tmpbuflen = sizeof(union u_tmpbuf);
+
+ if (keylen < sizeof(struct clearaeskeytoken))
+ goto out;
+ t = (struct clearaeskeytoken *)key;
+ if (keylen != sizeof(*t) + t->len)
+ goto out;
+ if ((t->keytype == PKEY_KEYTYPE_AES_128 && t->len == 16)
+ || (t->keytype == PKEY_KEYTYPE_AES_192 && t->len == 24)
+ || (t->keytype == PKEY_KEYTYPE_AES_256 && t->len == 32))
+ memcpy(ckey.clrkey, t->clearkey, t->len);
+ else
+ goto out;
+ /* alloc temp key buffer space */
+ tmpbuf = kmalloc(tmpbuflen, GFP_ATOMIC);
+ if (!tmpbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ /* try direct way with the PCKMO instruction */
+ rc = pkey_clr2protkey(t->keytype, &ckey, protkey);
+ if (rc == 0)
+ break;
+ /* PCKMO failed, so try the CCA secure key way */
+ rc = cca_clr2seckey(0xFFFF, 0xFFFF, t->keytype,
+ ckey.clrkey, tmpbuf);
+ if (rc == 0)
+ rc = pkey_skey2pkey(tmpbuf, protkey);
+ if (rc == 0)
+ break;
+ /* if the CCA way also failed, let's try via EP11 */
+ rc = pkey_clr2ep11key(ckey.clrkey, t->len,
+ tmpbuf, &tmpbuflen);
+ if (rc == 0)
+ rc = pkey_ep11key2pkey(tmpbuf, protkey);
+ /* now we should really have an protected key */
+ DEBUG_ERR("%s unable to build protected key from clear",
+ __func__);
+ break;
+ }
+ case TOKVER_EP11_AES: {
+ if (keylen < MINEP11AESKEYBLOBSIZE)
+ goto out;
+ /* check ep11 key for exportable as protected key */
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ rc = pkey_ep11key2pkey(key, protkey);
+ break;
+ }
default:
DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
__func__, hdr->version);
- return -EINVAL;
+ rc = -EINVAL;
}
+
+out:
+ kfree(tmpbuf);
+ return rc;
}
/*
@@ -403,6 +545,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -419,7 +565,10 @@ static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_CCA_DATA) {
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_genaeskey(card, dom, ksize, kflags,
+ keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_genseckey(card, dom, ksize, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
} else /* TOKVER_CCA_VLSC */
@@ -450,6 +599,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (*keybufsize < SECKEYBLOBSIZE)
return -EINVAL;
break;
+ case PKEY_TYPE_EP11:
+ if (*keybufsize < MINEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -466,7 +619,10 @@ static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (ktype == PKEY_TYPE_CCA_DATA) {
+ if (ktype == PKEY_TYPE_EP11) {
+ rc = ep11_clr2keyblob(card, dom, ksize, kflags,
+ clrkey, keybuf, keybufsize);
+ } else if (ktype == PKEY_TYPE_CCA_DATA) {
rc = cca_clr2seckey(card, dom, ksize,
clrkey, keybuf);
*keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
@@ -489,11 +645,11 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header) ||
- hdr->type != TOKTYPE_CCA_INTERNAL)
+ if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- if (hdr->version == TOKVER_CCA_AES) {
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
@@ -521,7 +677,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
- } else if (hdr->version == TOKVER_CCA_VLSC) {
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
@@ -556,6 +713,29 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
*cardnr = ((struct pkey_apqn *)_apqns)->card;
*domain = ((struct pkey_apqn *)_apqns)->domain;
+ } else if (hdr->type == TOKTYPE_NON_CCA
+ && hdr->version == TOKVER_EP11_AES) {
+ struct ep11keyblob *kb = (struct ep11keyblob *)key;
+
+ rc = ep11_check_aeskeyblob(debug_info, 3, key, 0, 1);
+ if (rc)
+ goto out;
+ if (ktype)
+ *ktype = PKEY_TYPE_EP11;
+ if (ksize)
+ *ksize = kb->head.keybitlen;
+
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
+ ZCRYPT_CEX7, EP11_API_V, kb->wkvp);
+ if (rc)
+ goto out;
+
+ if (flags)
+ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
+
+ *cardnr = ((struct pkey_apqn *)_apqns)->card;
+ *domain = ((struct pkey_apqn *)_apqns)->domain;
+
} else
rc = -EINVAL;
@@ -578,30 +758,32 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (keylen < sizeof(struct keytoken_header))
return -EINVAL;
- switch (hdr->type) {
- case TOKTYPE_NON_CCA:
- return pkey_nonccatok2pkey(key, keylen, pkey);
- case TOKTYPE_CCA_INTERNAL:
- switch (hdr->version) {
- case TOKVER_CCA_AES:
+ if (hdr->type == TOKTYPE_CCA_INTERNAL) {
+ if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
return -EINVAL;
- break;
- case TOKVER_CCA_VLSC:
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
return -EINVAL;
- break;
- default:
+ } else {
DEBUG_ERR("%s unknown CCA internal token version %d\n",
__func__, hdr->version);
return -EINVAL;
}
- break;
- default:
+ } else if (hdr->type == TOKTYPE_NON_CCA) {
+ if (hdr->version == TOKVER_EP11_AES) {
+ if (keylen < sizeof(struct ep11keyblob))
+ return -EINVAL;
+ if (ep11_check_aeskeyblob(debug_info, 3, key, 0, 1))
+ return -EINVAL;
+ } else {
+ return pkey_nonccatok2pkey(key, keylen, pkey);
+ }
+ } else {
DEBUG_ERR("%s unknown/unsupported blob type %d\n",
__func__, hdr->type);
return -EINVAL;
@@ -611,12 +793,21 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
card = apqns[i].card;
dom = apqns[i].domain;
- if (hdr->version == TOKVER_CCA_AES)
+ if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_AES)
rc = cca_sec2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
- else /* TOKVER_CCA_VLSC */
+ else if (hdr->type == TOKTYPE_CCA_INTERNAL
+ && hdr->version == TOKVER_CCA_VLSC)
rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
&pkey->len, &pkey->type);
+ else { /* EP11 AES secure key blob */
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ rc = ep11_key2protkey(card, dom, key, kb->head.len,
+ pkey->protkey, &pkey->len,
+ &pkey->type);
+ }
if (rc == 0)
break;
}
@@ -631,12 +822,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
u32 _nr_apqns, *_apqns = NULL;
struct keytoken_header *hdr = (struct keytoken_header *)key;
- if (keylen < sizeof(struct keytoken_header) ||
- hdr->type != TOKTYPE_CCA_INTERNAL ||
- flags == 0)
+ if (keylen < sizeof(struct keytoken_header) || flags == 0)
return -EINVAL;
- if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
+ if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES) {
+ int minhwtype = 0, api = 0;
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+ if (flags != PKEY_FLAGS_MATCH_CUR_MKVP)
+ return -EINVAL;
+ if (kb->attr & EP11_BLOB_PKEY_EXTRACTABLE) {
+ minhwtype = ZCRYPT_CEX7;
+ api = EP11_API_V;
+ }
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ minhwtype, api, kb->wkvp);
+ if (rc)
+ goto out;
+ } else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
int minhwtype = ZCRYPT_CEX3C;
u64 cur_mkvp = 0, old_mkvp = 0;
@@ -647,7 +850,7 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
cur_mkvp = t->mkvp;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp;
- } else {
+ } else if (hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
minhwtype = ZCRYPT_CEX6;
@@ -655,19 +858,24 @@ static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
cur_mkvp = t->mkvp0;
if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
old_mkvp = t->mkvp0;
+ } else {
+ /* unknown cca internal token type */
+ return -EINVAL;
}
rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
minhwtype, cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
+ *nr_apqns = _nr_apqns;
out:
kfree(_apqns);
@@ -695,14 +903,26 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype,
minhwtype, cur_mkvp, old_mkvp, 1);
if (rc)
goto out;
- if (apqns) {
- if (*nr_apqns < _nr_apqns)
- rc = -ENOSPC;
- else
- memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
- }
- *nr_apqns = _nr_apqns;
+ } else if (ktype == PKEY_TYPE_EP11) {
+ u8 *wkvp = NULL;
+
+ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
+ wkvp = cur_mkvp;
+ rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, wkvp);
+ if (rc)
+ goto out;
+
+ } else
+ return -EINVAL;
+
+ if (apqns) {
+ if (*nr_apqns < _nr_apqns)
+ rc = -ENOSPC;
+ else
+ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
}
+ *nr_apqns = _nr_apqns;
out:
kfree(_apqns);
@@ -1357,8 +1577,9 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
bool is_xts, char *buf, loff_t off,
size_t count)
{
- size_t keysize;
- int rc;
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = CCACIPHERTOKENSIZE;
if (off != 0 || count < CCACIPHERTOKENSIZE)
return -EINVAL;
@@ -1366,22 +1587,31 @@ static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
if (count < 2 * CCACIPHERTOKENSIZE)
return -EINVAL;
- keysize = CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
+ /* build a list of apqns able to generate an cipher key */
+ rc = cca_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX6, 0, 0, 0);
if (rc)
return rc;
- memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
- if (is_xts) {
- keysize = CCACIPHERTOKENSIZE;
- rc = cca_gencipherkey(-1, -1, keybits, 0,
- buf + CCACIPHERTOKENSIZE, &keysize);
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
if (rc)
return rc;
- memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
- CCACIPHERTOKENSIZE - keysize);
- return 2 * CCACIPHERTOKENSIZE;
+ if (is_xts) {
+ keysize = CCACIPHERTOKENSIZE;
+ buf += CCACIPHERTOKENSIZE;
+ rc = cca_gencipherkey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * CCACIPHERTOKENSIZE;
}
return CCACIPHERTOKENSIZE;
@@ -1457,10 +1687,134 @@ static struct attribute_group ccacipher_attr_group = {
.bin_attrs = ccacipher_attrs,
};
+/*
+ * Sysfs attribute read function for all ep11 aes key binary attributes.
+ * The implementation can not deal with partial reads, because a new random
+ * secure key blob is generated with each read. In case of partial reads
+ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
+ * This function and the sysfs attributes using it provide EP11 key blobs
+ * padded to the upper limit of MAXEP11AESKEYBLOBSIZE which is currently
+ * 320 bytes.
+ */
+static ssize_t pkey_ep11_aes_attr_read(enum pkey_key_size keybits,
+ bool is_xts, char *buf, loff_t off,
+ size_t count)
+{
+ int i, rc, card, dom;
+ u32 nr_apqns, *apqns = NULL;
+ size_t keysize = MAXEP11AESKEYBLOBSIZE;
+
+ if (off != 0 || count < MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+ if (is_xts)
+ if (count < 2 * MAXEP11AESKEYBLOBSIZE)
+ return -EINVAL;
+
+ /* build a list of apqns able to generate an cipher key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7, EP11_API_V, NULL);
+ if (rc)
+ return rc;
+
+ memset(buf, 0, is_xts ? 2 * keysize : keysize);
+
+ /* simple try all apqns from the list */
+ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
+ card = apqns[i] >> 16;
+ dom = apqns[i] & 0xFFFF;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ break;
+ }
+ if (rc)
+ return rc;
+
+ if (is_xts) {
+ keysize = MAXEP11AESKEYBLOBSIZE;
+ buf += MAXEP11AESKEYBLOBSIZE;
+ rc = ep11_genaeskey(card, dom, keybits, 0, buf, &keysize);
+ if (rc == 0)
+ return 2 * MAXEP11AESKEYBLOBSIZE;
+ }
+
+ return MAXEP11AESKEYBLOBSIZE;
+}
+
+static ssize_t ep11_aes_128_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_192_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_128_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
+ off, count);
+}
+
+static ssize_t ep11_aes_256_xts_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off,
+ size_t count)
+{
+ return pkey_ep11_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
+ off, count);
+}
+
+static BIN_ATTR_RO(ep11_aes_128, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_192, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256, MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_128_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+static BIN_ATTR_RO(ep11_aes_256_xts, 2 * MAXEP11AESKEYBLOBSIZE);
+
+static struct bin_attribute *ep11_attrs[] = {
+ &bin_attr_ep11_aes_128,
+ &bin_attr_ep11_aes_192,
+ &bin_attr_ep11_aes_256,
+ &bin_attr_ep11_aes_128_xts,
+ &bin_attr_ep11_aes_256_xts,
+ NULL
+};
+
+static struct attribute_group ep11_attr_group = {
+ .name = "ep11",
+ .bin_attrs = ep11_attrs,
+};
+
static const struct attribute_group *pkey_attr_groups[] = {
&protkey_attr_group,
&ccadata_attr_group,
&ccacipher_attr_group,
+ &ep11_attr_group,
NULL,
};
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 9157e728a362..a42257d6c79e 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -36,6 +36,7 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_msgtype50.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
/*
* Module description.
@@ -849,7 +850,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* check if device is online and eligible */
if (!zq->online ||
!zq->ops->send_cprb ||
- (tdom != (unsigned short) AUTOSELECT &&
+ (tdom != AUTOSEL_DOM &&
tdom != AP_QID_QUEUE(zq->queue->qid)))
continue;
/* check if device node has admission for this queue */
@@ -874,7 +875,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
/* in case of auto select, provide the correct domain */
qid = pref_zq->queue->qid;
- if (*domain == (unsigned short) AUTOSELECT)
+ if (*domain == AUTOSEL_DOM)
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
@@ -901,7 +902,7 @@ static bool is_desired_ep11_card(unsigned int dev_id,
struct ep11_target_dev *targets)
{
while (target_num-- > 0) {
- if (dev_id == targets->ap_id)
+ if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
return true;
targets++;
}
@@ -912,16 +913,19 @@ static bool is_desired_ep11_queue(unsigned int dev_qid,
unsigned short target_num,
struct ep11_target_dev *targets)
{
+ int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
+
while (target_num-- > 0) {
- if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
+ if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
+ (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
return true;
targets++;
}
return false;
}
-static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
- struct ep11_urb *xcrb)
+static long _zcrypt_send_ep11_cprb(struct ap_perms *perms,
+ struct ep11_urb *xcrb)
{
struct zcrypt_card *zc, *pref_zc;
struct zcrypt_queue *zq, *pref_zq;
@@ -1026,6 +1030,12 @@ out:
return rc;
}
+long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
+{
+ return _zcrypt_send_ep11_cprb(&ap_perms, xcrb);
+}
+EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
+
static long zcrypt_rng(char *buffer)
{
struct zcrypt_card *zc, *pref_zc;
@@ -1366,12 +1376,12 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
return -EFAULT;
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
/* on failure: retry once again after a requested rescan */
if ((rc == -ENODEV) && (zcrypt_process_rescan()))
do {
- rc = zcrypt_send_ep11_cprb(perms, &xcrb);
+ rc = _zcrypt_send_ep11_cprb(perms, &xcrb);
} while (rc == -EAGAIN);
if (rc)
ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc);
@@ -1885,6 +1895,7 @@ void __exit zcrypt_api_exit(void)
zcrypt_msgtype6_exit();
zcrypt_msgtype50_exit();
zcrypt_ccamisc_exit();
+ zcrypt_ep11misc_exit();
zcrypt_debug_exit();
}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index d464618cd84f..599e68bf53f7 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -140,6 +140,7 @@ struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int);
int zcrypt_api_init(void);
void zcrypt_api_exit(void);
long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+long zcrypt_send_ep11_cprb(struct ep11_urb *urb);
void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
int zcrypt_device_status_ext(int card, int queue,
struct zcrypt_device_status_ext *devstatus);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h
index 77b6cc7b8f82..3a9876d5ab0e 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.h
+++ b/drivers/s390/crypto/zcrypt_ccamisc.h
@@ -19,6 +19,7 @@
/* For TOKTYPE_NON_CCA: */
#define TOKVER_PROTECTED_KEY 0x01 /* Protected key token */
+#define TOKVER_CLEAR_KEY 0x02 /* Clear key token */
/* For TOKTYPE_CCA_INTERNAL: */
#define TOKVER_CCA_AES 0x04 /* CCA AES key token */
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index 6fabc906114c..9a9d02e19774 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -19,6 +19,7 @@
#include "zcrypt_error.h"
#include "zcrypt_cex4.h"
#include "zcrypt_ccamisc.h"
+#include "zcrypt_ep11misc.h"
#define CEX4A_MIN_MOD_SIZE 1 /* 8 bits */
#define CEX4A_MAX_MOD_SIZE_2K 256 /* 2048 bits */
@@ -71,11 +72,11 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = {
MODULE_DEVICE_TABLE(ap, zcrypt_cex4_queue_ids);
/*
- * CCA card addditional device attributes
+ * CCA card additional device attributes
*/
-static ssize_t serialnr_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cca_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct cca_info ci;
struct ap_card *ac = to_ap_card(dev);
@@ -88,23 +89,25 @@ static ssize_t serialnr_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n", ci.serial);
}
-static DEVICE_ATTR_RO(serialnr);
+
+static struct device_attribute dev_attr_cca_serialnr =
+ __ATTR(serialnr, 0444, cca_serialnr_show, NULL);
static struct attribute *cca_card_attrs[] = {
- &dev_attr_serialnr.attr,
+ &dev_attr_cca_serialnr.attr,
NULL,
};
-static const struct attribute_group cca_card_attr_group = {
+static const struct attribute_group cca_card_attr_grp = {
.attrs = cca_card_attrs,
};
-/*
- * CCA queue addditional device attributes
- */
-static ssize_t mkvps_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ /*
+ * CCA queue additional device attributes
+ */
+static ssize_t cca_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
int n = 0;
struct cca_info ci;
@@ -138,17 +141,233 @@ static ssize_t mkvps_show(struct device *dev,
return n;
}
-static DEVICE_ATTR_RO(mkvps);
+
+static struct device_attribute dev_attr_cca_mkvps =
+ __ATTR(mkvps, 0444, cca_mkvps_show, NULL);
static struct attribute *cca_queue_attrs[] = {
- &dev_attr_mkvps.attr,
+ &dev_attr_cca_mkvps.attr,
NULL,
};
-static const struct attribute_group cca_queue_attr_group = {
+static const struct attribute_group cca_queue_attr_grp = {
.attrs = cca_queue_attrs,
};
+/*
+ * EP11 card additional device attributes
+ */
+static ssize_t ep11_api_ordinalnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.API_ord_nr > 0)
+ return snprintf(buf, PAGE_SIZE, "%u\n", ci.API_ord_nr);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_api_ordinalnr =
+ __ATTR(API_ordinalnr, 0444, ep11_api_ordinalnr_show, NULL);
+
+static ssize_t ep11_fw_version_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.FW_version > 0)
+ return snprintf(buf, PAGE_SIZE, "%d.%d\n",
+ (int)(ci.FW_version >> 8),
+ (int)(ci.FW_version & 0xFF));
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_fw_version =
+ __ATTR(FW_version, 0444, ep11_fw_version_show, NULL);
+
+static ssize_t ep11_serialnr_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ if (ci.serial[0])
+ return snprintf(buf, PAGE_SIZE, "%16.16s\n", ci.serial);
+ else
+ return snprintf(buf, PAGE_SIZE, "\n");
+}
+
+static struct device_attribute dev_attr_ep11_serialnr =
+ __ATTR(serialnr, 0444, ep11_serialnr_show, NULL);
+
+static const struct {
+ int mode_bit;
+ const char *mode_txt;
+} ep11_op_modes[] = {
+ { 0, "FIPS2009" },
+ { 1, "BSI2009" },
+ { 2, "FIPS2011" },
+ { 3, "BSI2011" },
+ { 6, "BSICC2017" },
+ { 0, NULL }
+};
+
+static ssize_t ep11_card_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_card_info ci;
+ struct ap_card *ac = to_ap_card(dev);
+ struct zcrypt_card *zc = ac->private;
+
+ memset(&ci, 0, sizeof(ci));
+
+ ep11_get_card_info(ac->id, &ci, zc->online);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (ci.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_card_op_modes =
+ __ATTR(op_modes, 0444, ep11_card_op_modes_show, NULL);
+
+static struct attribute *ep11_card_attrs[] = {
+ &dev_attr_ep11_api_ordinalnr.attr,
+ &dev_attr_ep11_fw_version.attr,
+ &dev_attr_ep11_serialnr.attr,
+ &dev_attr_ep11_card_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_card_attr_grp = {
+ .attrs = ep11_card_attrs,
+};
+
+/*
+ * EP11 queue additional device attributes
+ */
+
+static ssize_t ep11_mkvps_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+ static const char * const cwk_state[] = { "invalid", "valid" };
+ static const char * const nwk_state[] = { "empty", "uncommitted",
+ "committed" };
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ if (di.cur_wk_state == '0') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s -\n",
+ cwk_state[di.cur_wk_state - '0']);
+ } else if (di.cur_wk_state == '1') {
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: %s 0x",
+ cwk_state[di.cur_wk_state - '0']);
+ bin2hex(buf + n, di.cur_wkvp, sizeof(di.cur_wkvp));
+ n += 2 * sizeof(di.cur_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n = snprintf(buf, PAGE_SIZE, "WK CUR: - -\n");
+
+ if (di.new_wk_state == '0') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s -\n",
+ nwk_state[di.new_wk_state - '0']);
+ } else if (di.new_wk_state >= '1' && di.new_wk_state <= '2') {
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: %s 0x",
+ nwk_state[di.new_wk_state - '0']);
+ bin2hex(buf + n, di.new_wkvp, sizeof(di.new_wkvp));
+ n += 2 * sizeof(di.new_wkvp);
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+ } else
+ n += snprintf(buf + n, PAGE_SIZE - n, "WK NEW: - -\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_mkvps =
+ __ATTR(mkvps, 0444, ep11_mkvps_show, NULL);
+
+static ssize_t ep11_queue_op_modes_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int i, n = 0;
+ struct ep11_domain_info di;
+ struct zcrypt_queue *zq = to_ap_queue(dev)->private;
+
+ memset(&di, 0, sizeof(di));
+
+ if (zq->online)
+ ep11_get_domain_info(AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid),
+ &di);
+
+ for (i = 0; ep11_op_modes[i].mode_txt; i++) {
+ if (di.op_mode & (1 << ep11_op_modes[i].mode_bit)) {
+ if (n > 0)
+ buf[n++] = ' ';
+ n += snprintf(buf + n, PAGE_SIZE - n,
+ "%s", ep11_op_modes[i].mode_txt);
+ }
+ }
+ n += snprintf(buf + n, PAGE_SIZE - n, "\n");
+
+ return n;
+}
+
+static struct device_attribute dev_attr_ep11_queue_op_modes =
+ __ATTR(op_modes, 0444, ep11_queue_op_modes_show, NULL);
+
+static struct attribute *ep11_queue_attrs[] = {
+ &dev_attr_ep11_mkvps.attr,
+ &dev_attr_ep11_queue_op_modes.attr,
+ NULL,
+};
+
+static const struct attribute_group ep11_queue_attr_grp = {
+ .attrs = ep11_queue_attrs,
+};
+
/**
* Probe function for CEX4/CEX5/CEX6/CEX7 card device. It always
* accepts the AP device since the bus_match already checked
@@ -313,7 +532,12 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev)
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_card_attr_group);
+ &cca_card_attr_grp);
+ if (rc)
+ zcrypt_card_unregister(zc);
+ } else if (ap_test_bit(&ac->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_card_attr_grp);
if (rc)
zcrypt_card_unregister(zc);
}
@@ -332,7 +556,9 @@ static void zcrypt_cex4_card_remove(struct ap_device *ap_dev)
struct zcrypt_card *zc = ac->private;
if (ap_test_bit(&ac->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_group);
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_card_attr_grp);
+ else if (ap_test_bit(&ac->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_card_attr_grp);
if (zc)
zcrypt_card_unregister(zc);
}
@@ -394,7 +620,12 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO)) {
rc = sysfs_create_group(&ap_dev->device.kobj,
- &cca_queue_attr_group);
+ &cca_queue_attr_grp);
+ if (rc)
+ zcrypt_queue_unregister(zq);
+ } else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11)) {
+ rc = sysfs_create_group(&ap_dev->device.kobj,
+ &ep11_queue_attr_grp);
if (rc)
zcrypt_queue_unregister(zq);
}
@@ -413,7 +644,9 @@ static void zcrypt_cex4_queue_remove(struct ap_device *ap_dev)
struct zcrypt_queue *zq = aq->private;
if (ap_test_bit(&aq->card->functions, AP_FUNC_COPRO))
- sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_group);
+ sysfs_remove_group(&ap_dev->device.kobj, &cca_queue_attr_grp);
+ else if (ap_test_bit(&aq->card->functions, AP_FUNC_EP11))
+ sysfs_remove_group(&ap_dev->device.kobj, &ep11_queue_attr_grp);
if (zq)
zcrypt_queue_unregister(zq);
}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
new file mode 100644
index 000000000000..d4caf46ff9df
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#include "ap_bus.h"
+#include "zcrypt_api.h"
+#include "zcrypt_debug.h"
+#include "zcrypt_msgtype6.h"
+#include "zcrypt_ep11misc.h"
+#include "zcrypt_ccamisc.h"
+
+#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
+#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
+#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
+#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
+
+/* default iv used here */
+static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77,
+ 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff };
+
+/* ep11 card info cache */
+struct card_list_entry {
+ struct list_head list;
+ u16 cardnr;
+ struct ep11_card_info info;
+};
+static LIST_HEAD(card_list);
+static DEFINE_SPINLOCK(card_list_lock);
+
+static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci)
+{
+ int rc = -ENOENT;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(ci, &ptr->info, sizeof(*ci));
+ rc = 0;
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+
+ return rc;
+}
+
+static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci)
+{
+ int found = 0;
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ found = 1;
+ break;
+ }
+ }
+ if (!found) {
+ ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC);
+ if (!ptr) {
+ spin_unlock_bh(&card_list_lock);
+ return;
+ }
+ ptr->cardnr = cardnr;
+ memcpy(&ptr->info, ci, sizeof(*ci));
+ list_add(&ptr->list, &card_list);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void card_cache_scrub(u16 cardnr)
+{
+ struct card_list_entry *ptr;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry(ptr, &card_list, list) {
+ if (ptr->cardnr == cardnr) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ break;
+ }
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+static void __exit card_cache_free(void)
+{
+ struct card_list_entry *ptr, *pnext;
+
+ spin_lock_bh(&card_list_lock);
+ list_for_each_entry_safe(ptr, pnext, &card_list, list) {
+ list_del(&ptr->list);
+ kfree(ptr);
+ }
+ spin_unlock_bh(&card_list_lock);
+}
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport)
+{
+ struct ep11keyblob *kb = (struct ep11keyblob *) key;
+
+#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
+
+ if (kb->head.type != TOKTYPE_NON_CCA) {
+ if (dbg)
+ DBF("%s key check failed, type 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.type, TOKTYPE_NON_CCA);
+ return -EINVAL;
+ }
+ if (kb->head.version != TOKVER_EP11_AES) {
+ if (dbg)
+ DBF("%s key check failed, version 0x%02x != 0x%02x\n",
+ __func__, (int) kb->head.version, TOKVER_EP11_AES);
+ return -EINVAL;
+ }
+ if (kb->version != EP11_STRUCT_MAGIC) {
+ if (dbg)
+ DBF("%s key check failed, magic 0x%04x != 0x%04x\n",
+ __func__, (int) kb->version, EP11_STRUCT_MAGIC);
+ return -EINVAL;
+ }
+ switch (kb->head.keybitlen) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ if (dbg)
+ DBF("%s key check failed, keybitlen %d invalid\n",
+ __func__, (int) kb->head.keybitlen);
+ return -EINVAL;
+ }
+ if (keybitsize > 0 && keybitsize != (int) kb->head.keybitlen) {
+ DBF("%s key check failed, keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+ if (checkcpacfexport && !(kb->attr & EP11_BLOB_PKEY_EXTRACTABLE)) {
+ if (dbg)
+ DBF("%s key check failed, PKEY_EXTRACTABLE is 0\n",
+ __func__);
+ return -EINVAL;
+ }
+#undef DBF
+
+ return 0;
+}
+EXPORT_SYMBOL(ep11_check_aeskeyblob);
+
+/*
+ * Helper function which calls zcrypt_send_ep11_cprb with
+ * memory management segment adjusted to kernel space
+ * so that the copy_from_user called within this
+ * function do in fact copy from kernel space.
+ */
+static inline int _zcrypt_send_ep11_cprb(struct ep11_urb *urb)
+{
+ int rc;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ rc = zcrypt_send_ep11_cprb(urb);
+ set_fs(old_fs);
+
+ return rc;
+}
+
+/*
+ * Allocate and prepare ep11 cprb plus additional payload.
+ */
+static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
+{
+ size_t len = sizeof(struct ep11_cprb) + payload_len;
+ struct ep11_cprb *cprb;
+
+ cprb = kmalloc(len, GFP_KERNEL);
+ if (!cprb)
+ return NULL;
+
+ memset(cprb, 0, len);
+ cprb->cprb_len = sizeof(struct ep11_cprb);
+ cprb->cprb_ver_id = 0x04;
+ memcpy(cprb->func_id, "T4", 2);
+ cprb->ret_code = 0xFFFFFFFF;
+ cprb->payload_len = payload_len;
+
+ return cprb;
+}
+
+/*
+ * Some helper functions related to ASN1 encoding.
+ * Limited to length info <= 2 byte.
+ */
+
+#define ASN1TAGLEN(x) (2 + (x) + ((x) > 127 ? 1 : 0) + ((x) > 255 ? 1 : 0))
+
+static int asn1tag_write(u8 *ptr, u8 tag, const u8 *pvalue, u16 valuelen)
+{
+ ptr[0] = tag;
+ if (valuelen > 255) {
+ ptr[1] = 0x82;
+ *((u16 *)(ptr + 2)) = valuelen;
+ memcpy(ptr + 4, pvalue, valuelen);
+ return 4 + valuelen;
+ }
+ if (valuelen > 127) {
+ ptr[1] = 0x81;
+ ptr[2] = (u8) valuelen;
+ memcpy(ptr + 3, pvalue, valuelen);
+ return 3 + valuelen;
+ }
+ ptr[1] = (u8) valuelen;
+ memcpy(ptr + 2, pvalue, valuelen);
+ return 2 + valuelen;
+}
+
+/* EP11 payload > 127 bytes starts with this struct */
+struct pl_head {
+ u8 tag;
+ u8 lenfmt;
+ u16 len;
+ u8 func_tag;
+ u8 func_len;
+ u32 func;
+ u8 dom_tag;
+ u8 dom_len;
+ u32 dom;
+} __packed;
+
+/* prep ep11 payload head helper function */
+static inline void prep_head(struct pl_head *h,
+ size_t pl_size, int api, int func)
+{
+ h->tag = 0x30;
+ h->lenfmt = 0x82;
+ h->len = pl_size - 4;
+ h->func_tag = 0x04;
+ h->func_len = sizeof(u32);
+ h->func = (api << 16) + func;
+ h->dom_tag = 0x04;
+ h->dom_len = sizeof(u32);
+}
+
+/* prep urb helper function */
+static inline void prep_urb(struct ep11_urb *u,
+ struct ep11_target_dev *t, int nt,
+ struct ep11_cprb *req, size_t req_len,
+ struct ep11_cprb *rep, size_t rep_len)
+{
+ u->targets = (u8 __user *) t;
+ u->targets_num = nt;
+ u->req = (u8 __user *) req;
+ u->req_len = req_len;
+ u->resp = (u8 __user *) rep;
+ u->resp_len = rep_len;
+}
+
+/* Check ep11 reply payload, return 0 or suggested errno value. */
+static int check_reply_pl(const u8 *pl, const char *func)
+{
+ int len;
+ u32 ret;
+
+ /* start tag */
+ if (*pl++ != 0x30) {
+ DEBUG_ERR("%s reply start tag mismatch\n", func);
+ return -EIO;
+ }
+
+ /* payload length format */
+ if (*pl < 127) {
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x81) {
+ pl++;
+ len = *pl;
+ pl++;
+ } else if (*pl == 0x82) {
+ pl++;
+ len = *((u16 *)pl);
+ pl += 2;
+ } else {
+ DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
+ return -EIO;
+ }
+
+ /* len should cover at least 3 fields with 32 bit value each */
+ if (len < 3 * 6) {
+ DEBUG_ERR("%s reply length %d too small\n", func, len);
+ return -EIO;
+ }
+
+ /* function tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s function tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* dom tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 6;
+
+ /* return value tag, length and value */
+ if (pl[0] != 0x04 || pl[1] != 0x04) {
+ DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ return -EIO;
+ }
+ pl += 2;
+ ret = *((u32 *)pl);
+ if (ret != 0) {
+ DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+/*
+ * Helper function which does an ep11 query with given query type.
+ */
+static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
+ size_t buflen, u8 *buf)
+{
+ struct ep11_info_req_pl {
+ struct pl_head head;
+ u8 query_type_tag;
+ u8 query_type_len;
+ u32 query_type;
+ u8 query_subtype_tag;
+ u8 query_subtype_len;
+ u32 query_subtype;
+ } __packed * req_pl;
+ struct ep11_info_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ int api = 1, rc = -ENOMEM;
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct ep11_info_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct ep11_info_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 38); /* get xcp info */
+ req_pl->query_type_tag = 0x04;
+ req_pl->query_type_len = sizeof(u32);
+ req_pl->query_type = query_type;
+ req_pl->query_subtype_tag = 0x04;
+ req_pl->query_subtype_len = sizeof(u32);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen);
+ if (!rep)
+ goto out;
+ rep_pl = (struct ep11_info_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = cardnr;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl) + buflen);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) cardnr, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > buflen) {
+ DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(buf, ((u8 *) rep_pl) + sizeof(*rep_pl), rep_pl->data_len);
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify)
+{
+ int rc;
+ struct ep11_module_query_info {
+ u32 API_ord_nr;
+ u32 firmware_id;
+ u8 FW_major_vers;
+ u8 FW_minor_vers;
+ u8 CSP_major_vers;
+ u8 CSP_minor_vers;
+ u8 fwid[32];
+ u8 xcp_config_hash[32];
+ u8 CSP_config_hash[32];
+ u8 serial[16];
+ u8 module_date_time[16];
+ u64 op_mode;
+ u32 PKCS11_flags;
+ u32 ext_flags;
+ u32 domains;
+ u32 sym_state_bytes;
+ u32 digest_state_bytes;
+ u32 pin_blob_bytes;
+ u32 SPKI_bytes;
+ u32 priv_key_blob_bytes;
+ u32 sym_blob_bytes;
+ u32 max_payload_bytes;
+ u32 CP_profile_bytes;
+ u32 max_CP_index;
+ } __packed * pmqi = NULL;
+
+ rc = card_cache_fetch(card, info);
+ if (rc || verify) {
+ pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL);
+ if (!pmqi)
+ return -ENOMEM;
+ rc = ep11_query_info(card, AUTOSEL_DOM,
+ 0x01 /* module info query */,
+ sizeof(*pmqi), (u8 *) pmqi);
+ if (rc) {
+ if (rc == -ENODEV)
+ card_cache_scrub(card);
+ goto out;
+ }
+ memset(info, 0, sizeof(*info));
+ info->API_ord_nr = pmqi->API_ord_nr;
+ info->FW_version =
+ (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers;
+ memcpy(info->serial, pmqi->serial, sizeof(info->serial));
+ info->op_mode = pmqi->op_mode;
+ card_cache_update(card, info);
+ }
+
+out:
+ kfree(pmqi);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_card_info);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info)
+{
+ int rc;
+ struct ep11_domain_query_info {
+ u32 dom_index;
+ u8 cur_WK_VP[32];
+ u8 new_WK_VP[32];
+ u32 dom_flags;
+ u64 op_mode;
+ } __packed * p_dom_info;
+
+ p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL);
+ if (!p_dom_info)
+ return -ENOMEM;
+
+ rc = ep11_query_info(card, domain, 0x03 /* domain info query */,
+ sizeof(*p_dom_info), (u8 *) p_dom_info);
+ if (rc)
+ goto out;
+
+ memset(info, 0, sizeof(*info));
+ info->cur_wk_state = '0';
+ info->new_wk_state = '0';
+ if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) {
+ if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) {
+ info->cur_wk_state = '1';
+ memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32);
+ }
+ if (p_dom_info->dom_flags & 0x04 /* new wk present */
+ || p_dom_info->dom_flags & 0x08 /* new wk committed */) {
+ info->new_wk_state =
+ p_dom_info->dom_flags & 0x08 ? '2' : '1';
+ memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32);
+ }
+ }
+ info->op_mode = p_dom_info->op_mode;
+
+out:
+ kfree(p_dom_info);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_get_domain_info);
+
+/*
+ * Default EP11 AES key generate attributes, used when no keygenflags given:
+ * XCP_BLOB_ENCRYPT | XCP_BLOB_DECRYPT | XCP_BLOB_PROTKEY_EXTRACTABLE
+ */
+#define KEY_ATTR_DEFAULTS 0x00200c00
+
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct keygen_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 keybytes_tag;
+ u8 keybytes_len;
+ u32 keybytes;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_val_len_type;
+ u32 attr_val_len_value;
+ u8 pin_tag;
+ u8 pin_len;
+ } __packed * req_pl;
+ struct keygen_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ int api, rc = -ENOMEM;
+
+ switch (keybitsize) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* request cprb and payload */
+ req = alloc_cprb(sizeof(struct keygen_req_pl));
+ if (!req)
+ goto out;
+ req_pl = (struct keygen_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, sizeof(*req_pl), api, 21); /* GenerateKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ req_pl->keybytes_tag = 0x04;
+ req_pl->keybytes_len = sizeof(u32);
+ req_pl->keybytes = keybitsize / 8;
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32);
+ req_pl->mech = 0x00001080; /* CKM_AES_KEY_GEN */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 5 * sizeof(u32);
+ req_pl->attr_header = 0x10010000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_val_len_type = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ req_pl->pin_tag = 0x04;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct keygen_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct keygen_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + sizeof(*req_pl),
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_genaeskey);
+
+static int ep11_cryptsingle(u16 card, u16 domain,
+ u16 mode, u32 mech, const u8 *iv,
+ const u8 *key, size_t keysize,
+ const u8 *inbuf, size_t inbufsize,
+ u8 *outbuf, size_t *outbufsize)
+{
+ struct crypt_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by key tag + key blob
+ * followed by plaintext tag + plaintext
+ */
+ } __packed * req_pl;
+ struct crypt_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ /* data follows */
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ size_t req_pl_size, rep_pl_size;
+ int n, api = 1, rc = -ENOMEM;
+ u8 *p;
+
+ /* the simple asn1 coding used has length limits */
+ if (keysize > 0xFFFF || inbufsize > 0xFFFF)
+ return -EINVAL;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct crypt_req_pl *) (((u8 *) req) + sizeof(*req));
+ prep_head(&req_pl->head, req_pl_size, api, (mode ? 20 : 19));
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key and input data */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ p += asn1tag_write(p, 0x04, inbuf, inbufsize);
+
+ /* reply cprb and payload, assume out data size <= in data size + 32 */
+ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32);
+ rep = alloc_cprb(rep_pl_size);
+ if (!rep)
+ goto out;
+ rep_pl = (struct crypt_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + rep_pl_size);
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ p = ((u8 *) rep_pl) + sizeof(*rep_pl);
+ if (rep_pl->data_lenfmt <= 127)
+ n = rep_pl->data_lenfmt;
+ else if (rep_pl->data_lenfmt == 0x81)
+ n = *p++;
+ else if (rep_pl->data_lenfmt == 0x82) {
+ n = *((u16 *) p);
+ p += 2;
+ } else {
+ DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
+ rc = -EIO;
+ goto out;
+ }
+ if (n > *outbufsize) {
+ DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ memcpy(outbuf, p, n);
+ *outbufsize = n;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_unwrapkey(u16 card, u16 domain,
+ const u8 *kek, size_t keksize,
+ const u8 *enckey, size_t enckeysize,
+ u32 mech, const u8 *iv,
+ u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize)
+{
+ struct uw_req_pl {
+ struct pl_head head;
+ u8 attr_tag;
+ u8 attr_len;
+ u32 attr_header;
+ u32 attr_bool_mask;
+ u32 attr_bool_bits;
+ u32 attr_key_type;
+ u32 attr_key_type_value;
+ u32 attr_val_len;
+ u32 attr_val_len_value;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * maybe followed by iv data
+ * followed by kek tag + kek blob
+ * followed by empty mac tag
+ * followed by empty pin tag
+ * followed by encryted key tag + bytes
+ */
+ } __packed * req_pl;
+ struct uw_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keksize) + 4 + ASN1TAGLEN(enckeysize);
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ req_pl = (struct uw_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!keygenflags || keygenflags & 0x00200000) ? 4 : 1;
+ prep_head(&req_pl->head, req_pl_size, api, 34); /* UnwrapKey */
+ req_pl->attr_tag = 0x04;
+ req_pl->attr_len = 7 * sizeof(u32);
+ req_pl->attr_header = 0x10020000;
+ req_pl->attr_bool_mask = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_bool_bits = keygenflags ? keygenflags : KEY_ATTR_DEFAULTS;
+ req_pl->attr_key_type = 0x00000100; /* CKA_KEY_TYPE */
+ req_pl->attr_key_type_value = 0x0000001f; /* CKK_AES */
+ req_pl->attr_val_len = 0x00000161; /* CKA_VALUE_LEN */
+ req_pl->attr_val_len_value = keybitsize / 8;
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x00001085); /* CKM_AES_CBC_PAD */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* kek */
+ p += asn1tag_write(p, 0x04, kek, keksize);
+ /* empty mac key tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty pin tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* encrytped key value tag and bytes */
+ p += asn1tag_write(p, 0x04, enckey, enckeysize);
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct uw_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct uw_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *keybufsize) {
+ DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy key blob and set header values */
+ memcpy(keybuf, rep_pl->data, rep_pl->data_len);
+ *keybufsize = rep_pl->data_len;
+ kb = (struct ep11keyblob *) keybuf;
+ kb->head.type = TOKTYPE_NON_CCA;
+ kb->head.len = rep_pl->data_len;
+ kb->head.version = TOKVER_EP11_AES;
+ kb->head.keybitlen = keybitsize;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+static int ep11_wrapkey(u16 card, u16 domain,
+ const u8 *key, size_t keysize,
+ u32 mech, const u8 *iv,
+ u8 *databuf, size_t *datasize)
+{
+ struct wk_req_pl {
+ struct pl_head head;
+ u8 var_tag;
+ u8 var_len;
+ u32 var;
+ u8 mech_tag;
+ u8 mech_len;
+ u32 mech;
+ /*
+ * followed by iv data
+ * followed by key tag + key blob
+ * followed by dummy kek param
+ * followed by dummy mac param
+ */
+ } __packed * req_pl;
+ struct wk_rep_pl {
+ struct pl_head head;
+ u8 rc_tag;
+ u8 rc_len;
+ u32 rc;
+ u8 data_tag;
+ u8 data_lenfmt;
+ u16 data_len;
+ u8 data[512];
+ } __packed * rep_pl;
+ struct ep11_cprb *req = NULL, *rep = NULL;
+ struct ep11_target_dev target;
+ struct ep11_urb *urb = NULL;
+ struct ep11keyblob *kb;
+ size_t req_pl_size;
+ int api, rc = -ENOMEM;
+ u8 *p;
+
+ /* request cprb and payload */
+ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0)
+ + ASN1TAGLEN(keysize) + 4;
+ req = alloc_cprb(req_pl_size);
+ if (!req)
+ goto out;
+ if (!mech || mech == 0x80060001)
+ req->flags |= 0x20; /* CPACF_WRAP needs special bit */
+ req_pl = (struct wk_req_pl *) (((u8 *) req) + sizeof(*req));
+ api = (!mech || mech == 0x80060001) ? 4 : 1; /* CKM_IBM_CPACF_WRAP */
+ prep_head(&req_pl->head, req_pl_size, api, 33); /* WrapKey */
+ req_pl->var_tag = 0x04;
+ req_pl->var_len = sizeof(u32);
+ /* mech is mech + mech params (iv here) */
+ req_pl->mech_tag = 0x04;
+ req_pl->mech_len = sizeof(u32) + (iv ? 16 : 0);
+ req_pl->mech = (mech ? mech : 0x80060001); /* CKM_IBM_CPACF_WRAP */
+ p = ((u8 *) req_pl) + sizeof(*req_pl);
+ if (iv) {
+ memcpy(p, iv, 16);
+ p += 16;
+ }
+ /* key blob */
+ p += asn1tag_write(p, 0x04, key, keysize);
+ /* maybe the key argument needs the head data cleaned out */
+ kb = (struct ep11keyblob *)(p - keysize);
+ if (kb->head.version == TOKVER_EP11_AES)
+ memset(&kb->head, 0, sizeof(kb->head));
+ /* empty kek tag */
+ *p++ = 0x04;
+ *p++ = 0;
+ /* empty mac tag */
+ *p++ = 0x04;
+ *p++ = 0;
+
+ /* reply cprb and payload */
+ rep = alloc_cprb(sizeof(struct wk_rep_pl));
+ if (!rep)
+ goto out;
+ rep_pl = (struct wk_rep_pl *) (((u8 *) rep) + sizeof(*rep));
+
+ /* urb and target */
+ urb = kmalloc(sizeof(struct ep11_urb), GFP_KERNEL);
+ if (!urb)
+ goto out;
+ target.ap_id = card;
+ target.dom_id = domain;
+ prep_urb(urb, &target, 1,
+ req, sizeof(*req) + req_pl_size,
+ rep, sizeof(*rep) + sizeof(*rep_pl));
+
+ rc = _zcrypt_send_ep11_cprb(urb);
+ if (rc) {
+ DEBUG_ERR(
+ "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int) card, (int) domain, rc);
+ goto out;
+ }
+
+ rc = check_reply_pl((u8 *)rep_pl, __func__);
+ if (rc)
+ goto out;
+ if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
+ DEBUG_ERR("%s unknown reply data format\n", __func__);
+ rc = -EIO;
+ goto out;
+ }
+ if (rep_pl->data_len > *datasize) {
+ DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
+ rc = -ENOSPC;
+ goto out;
+ }
+
+ /* copy the data from the cprb to the data buffer */
+ memcpy(databuf, rep_pl->data, rep_pl->data_len);
+ *datasize = rep_pl->data_len;
+
+out:
+ kfree(req);
+ kfree(rep);
+ kfree(urb);
+ return rc;
+}
+
+int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
+{
+ int rc;
+ struct ep11keyblob *kb;
+ u8 encbuf[64], *kek = NULL;
+ size_t clrkeylen, keklen, encbuflen = sizeof(encbuf);
+
+ if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256)
+ clrkeylen = keybitsize / 8;
+ else {
+ DEBUG_ERR(
+ "%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
+ return -EINVAL;
+ }
+
+ /* allocate memory for the temp kek */
+ keklen = MAXEP11AESKEYBLOBSIZE;
+ kek = kmalloc(keklen, GFP_ATOMIC);
+ if (!kek) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Step 1: generate AES 256 bit random kek key */
+ rc = ep11_genaeskey(card, domain, 256,
+ 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+ kek, &keklen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s generate kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ kb = (struct ep11keyblob *) kek;
+ memset(&kb->head, 0, sizeof(kb->head));
+
+ /* Step 2: encrypt clear key value with the kek key */
+ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
+ clrkey, clrkeylen, encbuf, &encbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+ /* Step 3: import the encrypted key value as a new key */
+ rc = ep11_unwrapkey(card, domain, kek, keklen,
+ encbuf, encbuflen, 0, def_iv,
+ keybitsize, 0, keybuf, keybufsize);
+ if (rc) {
+ DEBUG_ERR(
+ "%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+
+out:
+ kfree(kek);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_clr2keyblob);
+
+int ep11_key2protkey(u16 card, u16 dom, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+{
+ int rc = -EIO;
+ u8 *wkbuf = NULL;
+ size_t wkbuflen = 256;
+ struct wk_info {
+ u16 version;
+ u8 res1[16];
+ u32 pkeytype;
+ u32 pkeybitsize;
+ u64 pkeysize;
+ u8 res2[8];
+ u8 pkey[0];
+ } __packed * wki;
+
+ /* alloc temp working buffer */
+ wkbuf = kmalloc(wkbuflen, GFP_ATOMIC);
+ if (!wkbuf)
+ return -ENOMEM;
+
+ /* ep11 secure key -> protected key + info */
+ rc = ep11_wrapkey(card, dom, key, keylen,
+ 0, def_iv, wkbuf, &wkbuflen);
+ if (rc) {
+ DEBUG_ERR(
+ "%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
+ goto out;
+ }
+ wki = (struct wk_info *) wkbuf;
+
+ /* check struct version and pkey type */
+ if (wki->version != 1 || wki->pkeytype != 1) {
+ DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int) wki->version, (int) wki->pkeytype);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy the tanslated protected key */
+ switch (wki->pkeysize) {
+ case 16+32:
+ /* AES 128 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+ if (protkeytype)
+ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported pkeysize %d\n",
+ __func__, (int) wki->pkeysize);
+ rc = -EIO;
+ goto out;
+ }
+ memcpy(protkey, wki->pkey, wki->pkeysize);
+ if (protkeylen)
+ *protkeylen = (u32) wki->pkeysize;
+ rc = 0;
+
+out:
+ kfree(wkbuf);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_key2protkey);
+
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp)
+{
+ struct zcrypt_device_status_ext *device_status;
+ u32 *_apqns = NULL, _nr_apqns = 0;
+ int i, card, dom, rc = -ENOMEM;
+ struct ep11_domain_info edi;
+ struct ep11_card_info eci;
+
+ /* fetch status of all crypto cards */
+ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
+ sizeof(struct zcrypt_device_status_ext),
+ GFP_KERNEL);
+ if (!device_status)
+ return -ENOMEM;
+ zcrypt_device_status_mask_ext(device_status);
+
+ /* allocate 1k space for up to 256 apqns */
+ _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ if (!_apqns) {
+ kfree(device_status);
+ return -ENOMEM;
+ }
+
+ /* walk through all the crypto apqnss */
+ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
+ card = AP_QID_CARD(device_status[i].qid);
+ dom = AP_QID_QUEUE(device_status[i].qid);
+ /* check online state */
+ if (!device_status[i].online)
+ continue;
+ /* check for ep11 functions */
+ if (!(device_status[i].functions & 0x01))
+ continue;
+ /* check cardnr */
+ if (cardnr != 0xFFFF && card != cardnr)
+ continue;
+ /* check domain */
+ if (domain != 0xFFFF && dom != domain)
+ continue;
+ /* check min hardware type */
+ if (minhwtype && device_status[i].hwtype < minhwtype)
+ continue;
+ /* check min api version if given */
+ if (minapi > 0) {
+ if (ep11_get_card_info(card, &eci, 0))
+ continue;
+ if (minapi > eci.API_ord_nr)
+ continue;
+ }
+ /* check wkvp if given */
+ if (wkvp) {
+ if (ep11_get_domain_info(card, dom, &edi))
+ continue;
+ if (edi.cur_wk_state != '1')
+ continue;
+ if (memcmp(wkvp, edi.cur_wkvp, 16))
+ continue;
+ }
+ /* apqn passed all filtering criterons, add to the array */
+ if (_nr_apqns < 256)
+ _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16) dom);
+ }
+
+ /* nothing found ? */
+ if (!_nr_apqns) {
+ kfree(_apqns);
+ rc = -ENODEV;
+ } else {
+ /* no re-allocation, simple return the _apqns array */
+ *apqns = _apqns;
+ *nr_apqns = _nr_apqns;
+ rc = 0;
+ }
+
+ kfree(device_status);
+ return rc;
+}
+EXPORT_SYMBOL(ep11_findcard2);
+
+void __exit zcrypt_ep11misc_exit(void)
+{
+ card_cache_free();
+}
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h
new file mode 100644
index 000000000000..e3ed5ed1de86
--- /dev/null
+++ b/drivers/s390/crypto/zcrypt_ep11misc.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright IBM Corp. 2019
+ * Author(s): Harald Freudenberger <freude@linux.ibm.com>
+ *
+ * Collection of EP11 misc functions used by zcrypt and pkey
+ */
+
+#ifndef _ZCRYPT_EP11MISC_H_
+#define _ZCRYPT_EP11MISC_H_
+
+#include <asm/zcrypt.h>
+#include <asm/pkey.h>
+
+#define TOKVER_EP11_AES 0x03 /* EP11 AES key blob */
+
+#define EP11_API_V 4 /* highest known and supported EP11 API version */
+
+#define EP11_STRUCT_MAGIC 0x1234
+#define EP11_BLOB_PKEY_EXTRACTABLE 0x200000
+
+/* inside view of an EP11 secure key blob */
+struct ep11keyblob {
+ union {
+ u8 session[32];
+ struct {
+ u8 type; /* 0x00 (TOKTYPE_NON_CCA) */
+ u8 res0; /* unused */
+ u16 len; /* total length in bytes of this blob */
+ u8 version; /* 0x06 (TOKVER_EP11_AES) */
+ u8 res1; /* unused */
+ u16 keybitlen; /* clear key bit len, 0 for unknown */
+ } head;
+ };
+ u8 wkvp[16]; /* wrapping key verification pattern */
+ u64 attr; /* boolean key attributes */
+ u64 mode; /* mode bits */
+ u16 version; /* 0x1234, EP11_STRUCT_MAGIC */
+ u8 iv[14];
+ u8 encrypted_key_data[144];
+ u8 mac[32];
+} __packed;
+
+/*
+ * Simple check if the key blob is a valid EP11 secure AES key.
+ * If keybitsize is given, the bitsize of the key is also checked.
+ * If checkcpacfexport is enabled, the key is also checked for the
+ * attributes needed to export this key for CPACF use.
+ * Returns 0 on success or errno value on failure.
+ */
+int ep11_check_aeskeyblob(debug_info_t *dbg, int dbflvl,
+ const u8 *key, int keybitsize,
+ int checkcpacfexport);
+
+/* EP11 card info struct */
+struct ep11_card_info {
+ u32 API_ord_nr; /* API ordinal number */
+ u16 FW_version; /* Firmware major and minor version */
+ char serial[16]; /* serial number string (16 ascii, no 0x00 !) */
+ u64 op_mode; /* card operational mode(s) */
+};
+
+/* EP11 domain info struct */
+struct ep11_domain_info {
+ char cur_wk_state; /* '0' invalid, '1' valid */
+ char new_wk_state; /* '0' empty, '1' uncommitted, '2' committed */
+ u8 cur_wkvp[32]; /* current wrapping key verification pattern */
+ u8 new_wkvp[32]; /* new wrapping key verification pattern */
+ u64 op_mode; /* domain operational mode(s) */
+};
+
+/*
+ * Provide information about an EP11 card.
+ */
+int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify);
+
+/*
+ * Provide information about a domain within an EP11 card.
+ */
+int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info);
+
+/*
+ * Generate (random) EP11 AES secure key.
+ */
+int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
+ u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Generate EP11 AES secure key with given clear key value.
+ */
+int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
+ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
+
+/*
+ * Derive proteced key from EP11 AES secure key blob.
+ */
+int ep11_key2protkey(u16 cardnr, u16 domain, const u8 *key, size_t keylen,
+ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+/*
+ * Build a list of ep11 apqns meeting the following constrains:
+ * - apqn is online and is in fact an EP11 apqn
+ * - if cardnr is not FFFF only apqns with this cardnr
+ * - if domain is not FFFF only apqns with this domainnr
+ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
+ * - if minapi > 0 only apqns with API_ord_nr >= minapi
+ * - if wkvp != NULL only apqns where the wkvp (EP11_WKVPLEN bytes) matches
+ * to the first EP11_WKVPLEN bytes of the wkvp of the current wrapping
+ * key for this domain. When a wkvp is given there will aways be a re-fetch
+ * of the domain info for the potential apqn - so this triggers an request
+ * reply to each apqn eligible.
+ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
+ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
+ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
+ * may be casted to struct pkey_apqn. The return value is either 0 for success
+ * or a negative errno value. If no apqn meeting the criterias is found,
+ * -ENODEV is returned.
+ */
+int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
+ int minhwtype, int minapi, const u8 *wkvp);
+
+void zcrypt_ep11misc_exit(void);
+
+#endif /* _ZCRYPT_EP11MISC_H_ */