diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2025-05-17 10:55:20 +0200 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2025-05-17 10:55:20 +0200 |
commit | d536e1941e77c403b54a16fe4c0a920022948d27 (patch) | |
tree | 392c0e1863a24f4d1507e46d920f2cda371b3997 | |
parent | 42a7d5a9bd0c86d060d808b9b542448d548c303e (diff) | |
parent | d35ef47a43f8a01252e89b1fd4a52c91017b5d59 (diff) | |
download | linux-for-next.tar.gz |
Merge branch 'features' into for-nextfor-next
* features:
s390/ptrace: Always inline regs_get_kernel_stack_nth() and regs_get_register()
s390/thread_info: Cleanup header includes
s390/extmem: Add workaround for DCSS unload diag
s390/crypto: Rework protected key AES for true asynch support
s390/cpacf: Rework cpacf_pcc() to return condition code
s390/mm: Fix potential use-after-free in __crst_table_upgrade()
s390/mm: Add mmap_assert_write_locked() check to crst_table_upgrade()
s390/string: Remove strcpy() implementation
s390/con3270: Use strscpy() instead of strcpy()
s390/boot: Use strspcy() instead of strcpy()
s390: Simple strcpy() to strscpy() conversions
s390/pkey/crypto: Introduce xflags param for pkey in-kernel API
s390/pkey: Provide and pass xflags within pkey and zcrypt layers
s390/uv: Remove uv_get_secret_metadata function
s390/pkey: Use preallocated memory for retrieve of UV secret metadata
s390/uv: Rename find_secret() to uv_find_secret() and publish
s390/pkey: Rework EP11 pkey handler to use stack for small memory allocs
s390/pkey: Rework CCA pkey handler to use stack for small memory allocs
s390/zcrypt: Rework ep11 misc functions to use cprb mempool
s390/zcrypt: Locate ep11_domain_query_info onto the stack instead of kmalloc
s390/zcrypt: Propagate xflags argument with cca_get_info()
s390/zcrypt: Rework cca misc functions kmallocs to use the cprb mempool
s390/zcrypt: Rework ep11 findcard() implementation and callers
s390/zcrypt: Rework cca findcard() implementation and callers
s390/zcrypt: Remove CCA and EP11 card and domain info caches
s390/zcrypt: Remove unused functions from cca misc
s390/zcrypt: Introduce pre-allocated device status array for ep11 misc
s390/zcrypt: Introduce pre-allocated device status array for cca misc
s390/zcrypt: Rework zcrypt function zcrypt_device_status_mask_ext
s390/zcrypt: Introduce cprb mempool for ep11 misc functions
s390/zcrypt: Introduce cprb mempool for cca misc functions
s390/ap/zcrypt: New xflag parameter
s390/zcrypt: Avoid alloc and copy of ep11 targets if kernelspace cprb
s390/ap: Introduce ap message buffer pool
s390/ap/zcrypt: Rework AP message buffer allocation
s390/ap: Move response_type struct into ap_msg struct
s390/cpumf: Adjust number of leading zeroes for z15 attributes
s390: Remove optional third argument of strscpy() if possible
s390/ipl: Rename and change strncpy_skip_quote()
s390/string: Remove optimized strncpy()
watchdog: diag288_wdt: Implement module autoload
s390/boot: Replace strncpy() with strscpy()
s390/boot: Add sized_strscpy() to enable strscpy() usage
s390/mm: Select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
s390/mm: Reimplement lazy ASCE handling
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
60 files changed, 2528 insertions, 1806 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 99fb986fca6e8d..0c16dc443e2f65 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -146,6 +146,7 @@ config S390 select ARCH_WANTS_NO_INSTR select ARCH_WANT_DEFAULT_BPF_JIT select ARCH_WANT_IPC_PARSE_VERSION + select ARCH_WANT_IRQS_OFF_ACTIVATE_MM select ARCH_WANT_KERNEL_PMD_MKWRITE select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP diff --git a/arch/s390/boot/ipl_parm.c b/arch/s390/boot/ipl_parm.c index d04e9b89d14a66..f584d7da29cb20 100644 --- a/arch/s390/boot/ipl_parm.c +++ b/arch/s390/boot/ipl_parm.c @@ -179,7 +179,7 @@ void setup_boot_command_line(void) if (has_ebcdic_char(parmarea.command_line)) EBCASC(parmarea.command_line, COMMAND_LINE_SIZE); /* copy arch command line */ - strcpy(early_command_line, strim(parmarea.command_line)); + strscpy(early_command_line, strim(parmarea.command_line)); /* append IPL PARM data to the boot command line */ if (!is_prot_virt_guest() && ipl_block_valid) @@ -253,7 +253,8 @@ void parse_boot_command_line(void) int rc; __kaslr_enabled = IS_ENABLED(CONFIG_RANDOMIZE_BASE); - args = strcpy(command_line_buf, early_command_line); + strscpy(command_line_buf, early_command_line); + args = command_line_buf; while (*args) { args = next_arg(args, ¶m, &val); @@ -309,7 +310,7 @@ void parse_boot_command_line(void) if (!strcmp(param, "bootdebug")) { bootdebug = true; if (val) - strncpy(bootdebug_filter, val, sizeof(bootdebug_filter) - 1); + strscpy(bootdebug_filter, val); } if (!strcmp(param, "quiet")) boot_console_loglevel = CONSOLE_LOGLEVEL_QUIET; diff --git a/arch/s390/boot/printk.c b/arch/s390/boot/printk.c index 8cf6331bc06083..4bb6bc95704e2a 100644 --- a/arch/s390/boot/printk.c +++ b/arch/s390/boot/printk.c @@ -29,7 +29,8 @@ static void boot_rb_add(const char *str, size_t len) /* store strings separated by '\0' */ if (len + 1 > avail) boot_rb_off = 0; - strcpy(boot_rb + boot_rb_off, str); + avail = sizeof(boot_rb) - boot_rb_off - 1; + strscpy(boot_rb + boot_rb_off, str, avail); boot_rb_off += len + 1; } @@ -158,10 +159,10 @@ static noinline char *strsym(char *buf, void *ip) p = findsym((unsigned long)ip, &off, &len); if (p) { - strncpy(buf, p, MAX_SYMLEN); + strscpy(buf, p, MAX_SYMLEN); /* reserve 15 bytes for offset/len in symbol+0x1234/0x1234 */ p = buf + strnlen(buf, MAX_SYMLEN - 15); - strcpy(p, "+0x"); + strscpy(p, "+0x", MAX_SYMLEN - (p - buf)); as_hex(p + 3, off, 0); strcat(p, "/0x"); as_hex(p + strlen(p), len, 0); diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 06316fb8e0fad8..da8337e63a3e23 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -6,6 +6,7 @@ #include <asm/boot_data.h> #include <asm/extmem.h> #include <asm/sections.h> +#include <asm/diag288.h> #include <asm/maccess.h> #include <asm/machine.h> #include <asm/sysinfo.h> @@ -71,6 +72,20 @@ static void detect_machine_type(void) set_machine_feature(MFEATURE_VM); } +static void detect_diag288(void) +{ + /* "BEGIN" in EBCDIC character set */ + static const char cmd[] = "\xc2\xc5\xc7\xc9\xd5"; + unsigned long action, len; + + action = machine_is_vm() ? (unsigned long)cmd : LPARWDT_RESTART; + len = machine_is_vm() ? sizeof(cmd) : 0; + if (__diag288(WDT_FUNC_INIT, MIN_INTERVAL, action, len)) + return; + __diag288(WDT_FUNC_CANCEL, 0, 0, 0); + set_machine_feature(MFEATURE_DIAG288); +} + static void detect_diag9c(void) { unsigned int cpu; @@ -519,6 +534,8 @@ void startup_kernel(void) detect_facilities(); detect_diag9c(); detect_machine_type(); + /* detect_diag288() needs machine type */ + detect_diag288(); cmma_init(); sanitize_prot_virt_host(); max_physmem_end = detect_max_physmem_end(); diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c index f6b9b1df48a82c..bd68161434a602 100644 --- a/arch/s390/boot/string.c +++ b/arch/s390/boot/string.c @@ -29,6 +29,18 @@ int strncmp(const char *cs, const char *ct, size_t count) return 0; } +ssize_t sized_strscpy(char *dst, const char *src, size_t count) +{ + size_t len; + + if (count == 0) + return -E2BIG; + len = strnlen(src, count - 1); + memcpy(dst, src, len); + dst[len] = '\0'; + return src[len] ? -E2BIG : len; +} + void *memset64(uint64_t *s, uint64_t v, size_t count) { uint64_t *xs = s; diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index 511093713a6fc8..47f5ce7904f873 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -5,7 +5,7 @@ * s390 implementation of the AES Cipher Algorithm with protected keys. * * s390 Version: - * Copyright IBM Corp. 2017, 2023 + * Copyright IBM Corp. 2017, 2025 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * Harald Freudenberger <freude@de.ibm.com> */ @@ -13,16 +13,18 @@ #define KMSG_COMPONENT "paes_s390" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <crypto/aes.h> -#include <crypto/algapi.h> -#include <linux/bug.h> -#include <linux/err.h> -#include <linux/module.h> +#include <linux/atomic.h> #include <linux/cpufeature.h> +#include <linux/delay.h> +#include <linux/err.h> #include <linux/init.h> +#include <linux/miscdevice.h> +#include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> -#include <linux/delay.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/engine.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <asm/cpacf.h> @@ -44,23 +46,61 @@ static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions; +static struct crypto_engine *paes_crypto_engine; +#define MAX_QLEN 10 + +/* + * protected key specific stuff + */ + struct paes_protkey { u32 type; u32 len; u8 protkey[PXTS_256_PROTKEY_SIZE]; }; -struct key_blob { - /* - * Small keys will be stored in the keybuf. Larger keys are - * stored in extra allocated memory. In both cases does - * key point to the memory where the key is stored. - * The code distinguishes by checking keylen against - * sizeof(keybuf). See the two following helper functions. - */ - u8 *key; - u8 keybuf[128]; +#define PK_STATE_NO_KEY 0 +#define PK_STATE_CONVERT_IN_PROGRESS 1 +#define PK_STATE_VALID 2 + +struct s390_paes_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[PAES_MAX_KEYSIZE]; + unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk holds the protected key */ + struct paes_protkey pk; +}; + +struct s390_pxts_ctx { + /* source key material used to derive a protected key from */ + u8 keybuf[2 * PAES_MAX_KEYSIZE]; unsigned int keylen; + + /* cpacf function code to use with this protected key type */ + long fc; + + /* nr of requests enqueued via crypto engine which use this tfm ctx */ + atomic_t via_engine_ctr; + + /* spinlock to atomic read/update all the following fields */ + spinlock_t pk_lock; + + /* see PK_STATE* defines above, < 0 holds convert failure rc */ + int pk_state; + /* if state is valid, pk[] hold(s) the protected key(s) */ + struct paes_protkey pk[2]; }; /* @@ -89,214 +129,367 @@ static inline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest) return sizeof(*token) + cklen; } -static inline int _key_to_kb(struct key_blob *kb, - const u8 *key, - unsigned int keylen) +/* + * paes_ctx_setkey() - Set key value into context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int paes_ctx_setkey(struct s390_paes_ctx *ctx, + const u8 *key, unsigned int keylen) { + if (keylen > sizeof(ctx->keybuf)) + return -EINVAL; + switch (keylen) { case 16: case 24: case 32: /* clear key value, prepare pkey clear key token in keybuf */ - memset(kb->keybuf, 0, sizeof(kb->keybuf)); - kb->keylen = make_clrkey_token(key, keylen, kb->keybuf); - kb->key = kb->keybuf; + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); + ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf); break; default: /* other key material, let pkey handle this */ - if (keylen <= sizeof(kb->keybuf)) - kb->key = kb->keybuf; - else { - kb->key = kmalloc(keylen, GFP_KERNEL); - if (!kb->key) - return -ENOMEM; - } - memcpy(kb->key, key, keylen); - kb->keylen = keylen; + memcpy(ctx->keybuf, key, keylen); + ctx->keylen = keylen; break; } return 0; } -static inline int _xts_key_to_kb(struct key_blob *kb, - const u8 *key, - unsigned int keylen) +/* + * pxts_ctx_setkey() - Set key value into context, maybe construct + * a clear key token digestible by pkey from a clear key value. + */ +static inline int pxts_ctx_setkey(struct s390_pxts_ctx *ctx, + const u8 *key, unsigned int keylen) { size_t cklen = keylen / 2; - memset(kb->keybuf, 0, sizeof(kb->keybuf)); + if (keylen > sizeof(ctx->keybuf)) + return -EINVAL; switch (keylen) { case 32: case 64: /* clear key value, prepare pkey clear key tokens in keybuf */ - kb->key = kb->keybuf; - kb->keylen = make_clrkey_token(key, cklen, kb->key); - kb->keylen += make_clrkey_token(key + cklen, cklen, - kb->key + kb->keylen); + memset(ctx->keybuf, 0, sizeof(ctx->keybuf)); + ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf); + ctx->keylen += make_clrkey_token(key + cklen, cklen, + ctx->keybuf + ctx->keylen); break; default: /* other key material, let pkey handle this */ - if (keylen <= sizeof(kb->keybuf)) { - kb->key = kb->keybuf; - } else { - kb->key = kmalloc(keylen, GFP_KERNEL); - if (!kb->key) - return -ENOMEM; - } - memcpy(kb->key, key, keylen); - kb->keylen = keylen; + memcpy(ctx->keybuf, key, keylen); + ctx->keylen = keylen; break; } return 0; } -static inline void _free_kb_keybuf(struct key_blob *kb) +/* + * Convert the raw key material into a protected key via PKEY api. + * This function may sleep - don't call in non-sleeping context. + */ +static inline int convert_key(const u8 *key, unsigned int keylen, + struct paes_protkey *pk) { - if (kb->key && kb->key != kb->keybuf - && kb->keylen > sizeof(kb->keybuf)) { - kfree_sensitive(kb->key); - kb->key = NULL; - } - memzero_explicit(kb->keybuf, sizeof(kb->keybuf)); -} - -struct s390_paes_ctx { - struct key_blob kb; - struct paes_protkey pk; - spinlock_t pk_lock; - unsigned long fc; -}; - -struct s390_pxts_ctx { - struct key_blob kb; - struct paes_protkey pk[2]; - spinlock_t pk_lock; - unsigned long fc; -}; + int rc, i; -static inline int __paes_keyblob2pkey(const u8 *key, unsigned int keylen, - struct paes_protkey *pk) -{ - int i, rc = -EIO; + pk->len = sizeof(pk->protkey); /* try three times in case of busy card */ - for (i = 0; rc && i < 3; i++) { - if (rc == -EBUSY && in_task()) { - if (msleep_interruptible(1000)) - return -EINTR; + for (rc = -EIO, i = 0; rc && i < 3; i++) { + if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) { + rc = -EINTR; + goto out; } - rc = pkey_key2protkey(key, keylen, pk->protkey, &pk->len, - &pk->type); + rc = pkey_key2protkey(key, keylen, + pk->protkey, &pk->len, &pk->type, + PKEY_XFLAG_NOMEMALLOC); } +out: + pr_debug("rc=%d\n", rc); return rc; } -static inline int __paes_convert_key(struct s390_paes_ctx *ctx) +/* + * (Re-)Convert the raw key material from the ctx into a protected key + * via convert_key() function. Update the pk_state, pk_type, pk_len + * and the protected key in the tfm context. + * Please note this function may be invoked concurrently with the very + * same tfm context. The pk_lock spinlock in the context ensures an + * atomic update of the pk and the pk state but does not guarantee any + * order of update. So a fresh converted valid protected key may get + * updated with an 'old' expired key value. As the cpacf instructions + * detect this, refuse to operate with an invalid key and the calling + * code triggers a (re-)conversion this does no harm. This may lead to + * unnecessary additional conversion but never to invalid data on en- + * or decrypt operations. + */ +static int paes_convert_key(struct s390_paes_ctx *ctx) { struct paes_protkey pk; int rc; - pk.len = sizeof(pk.protkey); - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk); - if (rc) - return rc; + spin_lock_bh(&ctx->pk_lock); + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&ctx->pk_lock); + + rc = convert_key(ctx->keybuf, ctx->keylen, &pk); + /* update context */ spin_lock_bh(&ctx->pk_lock); - memcpy(&ctx->pk, &pk, sizeof(pk)); + if (rc) { + ctx->pk_state = rc; + } else { + ctx->pk_state = PK_STATE_VALID; + ctx->pk = pk; + } spin_unlock_bh(&ctx->pk_lock); - return 0; + memzero_explicit(&pk, sizeof(pk)); + pr_debug("rc=%d\n", rc); + return rc; } -static int ecb_paes_init(struct crypto_skcipher *tfm) +/* + * (Re-)Convert the raw xts key material from the ctx into a + * protected key via convert_key() function. Update the pk_state, + * pk_type, pk_len and the protected key in the tfm context. + * See also comments on function paes_convert_key. + */ +static int pxts_convert_key(struct s390_pxts_ctx *ctx) { - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct paes_protkey pk0, pk1; + size_t split_keylen; + int rc; - ctx->kb.key = NULL; - spin_lock_init(&ctx->pk_lock); + spin_lock_bh(&ctx->pk_lock); + ctx->pk_state = PK_STATE_CONVERT_IN_PROGRESS; + spin_unlock_bh(&ctx->pk_lock); - return 0; -} + rc = convert_key(ctx->keybuf, ctx->keylen, &pk0); + if (rc) + goto out; -static void ecb_paes_exit(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + switch (pk0.type) { + case PKEY_KEYTYPE_AES_128: + case PKEY_KEYTYPE_AES_256: + /* second keytoken required */ + if (ctx->keylen % 2) { + rc = -EINVAL; + goto out; + } + split_keylen = ctx->keylen / 2; + rc = convert_key(ctx->keybuf + split_keylen, + split_keylen, &pk1); + if (rc) + goto out; + if (pk0.type != pk1.type) { + rc = -EINVAL; + goto out; + } + break; + case PKEY_KEYTYPE_AES_XTS_128: + case PKEY_KEYTYPE_AES_XTS_256: + /* single key */ + pk1.type = 0; + break; + default: + /* unsupported protected keytype */ + rc = -EINVAL; + goto out; + } + +out: + /* update context */ + spin_lock_bh(&ctx->pk_lock); + if (rc) { + ctx->pk_state = rc; + } else { + ctx->pk_state = PK_STATE_VALID; + ctx->pk[0] = pk0; + ctx->pk[1] = pk1; + } + spin_unlock_bh(&ctx->pk_lock); - _free_kb_keybuf(&ctx->kb); + memzero_explicit(&pk0, sizeof(pk0)); + memzero_explicit(&pk1, sizeof(pk1)); + pr_debug("rc=%d\n", rc); + return rc; } -static inline int __ecb_paes_set_key(struct s390_paes_ctx *ctx) +/* + * PAES ECB implementation + */ + +struct ecb_param { + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; + +struct s390_pecb_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct ecb_param param; +}; + +static int ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { - unsigned long fc; + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + long fc; int rc; - rc = __paes_convert_key(ctx); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KM_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KM_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KM_PAES_256 : 0; + /* convert key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; - /* Check if the function code is available */ + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk.type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KM_PAES_128; + break; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KM_PAES_192; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KM_PAES_256; + break; + default: + fc = 0; + break; + } ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; - return ctx->fc ? 0 : -EINVAL; + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int ecb_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int ecb_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pecb_req_ctx *req_ctx, + bool maysleep) { - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - int rc; - - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); + struct ecb_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; + unsigned int nbytes, n, k; + int pk_state, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } if (rc) - return rc; + goto out; - return __ecb_paes_set_key(ctx); + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { + /* only use complete blocks */ + n = nbytes & ~(AES_BLOCK_SIZE - 1); + k = cpacf_km(ctx->fc | req_ctx->modifier, param, + walk->dst.virt.addr, walk->src.virt.addr, n); + if (k) + rc = skcipher_walk_done(walk, nbytes - k); + if (k < n) { + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + } + +out: + pr_debug("rc=%d\n", rc); + return rc; } static int ecb_paes_crypt(struct skcipher_request *req, unsigned long modifier) { + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; - unsigned int nbytes, n, k; + struct skcipher_walk *walk = &req_ctx->walk; int rc; - rc = skcipher_walk_virt(&walk, req, false); + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); if (rc) - return rc; + goto out; - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; - while ((nbytes = walk.nbytes) != 0) { - /* only use complete blocks */ - n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, ¶m, - walk.dst.virt.addr, walk.src.virt.addr, n); - if (k) - rc = skcipher_walk_done(&walk, nbytes - k); - if (k < n) { - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = ecb_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); return rc; } @@ -310,112 +503,256 @@ static int ecb_paes_decrypt(struct skcipher_request *req) return ecb_paes_crypt(req, CPACF_DECRYPT); } -static struct skcipher_alg ecb_paes_alg = { - .base.cra_name = "ecb(paes)", - .base.cra_driver_name = "ecb-paes-s390", - .base.cra_priority = 401, /* combo: aes + ecb + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.cra_list), - .init = ecb_paes_init, - .exit = ecb_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .setkey = ecb_paes_set_key, - .encrypt = ecb_paes_encrypt, - .decrypt = ecb_paes_decrypt, -}; - -static int cbc_paes_init(struct crypto_skcipher *tfm) +static int ecb_paes_init(struct crypto_skcipher *tfm) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->kb.key = NULL; + memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->pk_lock); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pecb_req_ctx)); + return 0; } -static void cbc_paes_exit(struct crypto_skcipher *tfm) +static void ecb_paes_exit(struct crypto_skcipher *tfm) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); + memzero_explicit(ctx, sizeof(*ctx)); } -static inline int __cbc_paes_set_key(struct s390_paes_ctx *ctx) +static int ecb_paes_do_one_request(struct crypto_engine *engine, void *areq) { - unsigned long fc; + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pecb_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; int rc; - rc = __paes_convert_key(ctx); - if (rc) - return rc; + /* walk has already been prepared */ + + rc = ecb_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMC_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMC_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? CPACF_KMC_PAES_256 : 0; + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; +} - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; +static struct skcipher_engine_alg ecb_paes_alg = { + .base = { + .base.cra_name = "ecb(paes)", + .base.cra_driver_name = "ecb-paes-s390", + .base.cra_priority = 401, /* combo: aes + ecb + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ecb_paes_alg.base.base.cra_list), + .init = ecb_paes_init, + .exit = ecb_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .setkey = ecb_paes_setkey, + .encrypt = ecb_paes_encrypt, + .decrypt = ecb_paes_decrypt, + }, + .op = { + .do_one_request = ecb_paes_do_one_request, + }, +}; - return ctx->fc ? 0 : -EINVAL; -} +/* + * PAES CBC implementation + */ + +struct cbc_param { + u8 iv[AES_BLOCK_SIZE]; + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; + +struct s390_pcbc_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct cbc_param param; +}; -static int cbc_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + long fc; int rc; - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; - return __cbc_paes_set_key(ctx); + /* convert raw key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; + + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk.type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KMC_PAES_128; + break; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KMC_PAES_192; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KMC_PAES_256; + break; + default: + fc = 0; + break; + } + ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0; + + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) +static int cbc_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pcbc_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - struct { - u8 iv[AES_BLOCK_SIZE]; - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; + struct cbc_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int nbytes, n, k; - int rc; - - rc = skcipher_walk_virt(&walk, req, false); + int pk_state, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } if (rc) - return rc; + goto out; - memcpy(param.iv, walk.iv, AES_BLOCK_SIZE); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); + memcpy(param->iv, walk->iv, AES_BLOCK_SIZE); - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_kmc(ctx->fc | modifier, ¶m, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_kmc(ctx->fc | req_ctx->modifier, param, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) { - memcpy(walk.iv, param.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes - k); + memcpy(walk->iv, param->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, nbytes - k); } if (k < n) { - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); spin_unlock_bh(&ctx->pk_lock); } } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int cbc_paes_crypt(struct skcipher_request *req, unsigned long modifier) +{ + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; + + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; + + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = cbc_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); return rc; } @@ -429,496 +766,882 @@ static int cbc_paes_decrypt(struct skcipher_request *req) return cbc_paes_crypt(req, CPACF_DECRYPT); } -static struct skcipher_alg cbc_paes_alg = { - .base.cra_name = "cbc(paes)", - .base.cra_driver_name = "cbc-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.cra_list), - .init = cbc_paes_init, - .exit = cbc_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = cbc_paes_set_key, - .encrypt = cbc_paes_encrypt, - .decrypt = cbc_paes_decrypt, -}; - -static int xts_paes_init(struct crypto_skcipher *tfm) +static int cbc_paes_init(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - ctx->kb.key = NULL; + memset(ctx, 0, sizeof(*ctx)); spin_lock_init(&ctx->pk_lock); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pcbc_req_ctx)); + return 0; } -static void xts_paes_exit(struct crypto_skcipher *tfm) +static void cbc_paes_exit(struct crypto_skcipher *tfm) { - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); + memzero_explicit(ctx, sizeof(*ctx)); } -static inline int __xts_paes_convert_key(struct s390_pxts_ctx *ctx) +static int cbc_paes_do_one_request(struct crypto_engine *engine, void *areq) { - struct paes_protkey pk0, pk1; - size_t split_keylen; + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pcbc_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; int rc; - pk0.len = sizeof(pk0.protkey); - pk1.len = sizeof(pk1.protkey); - - rc = __paes_keyblob2pkey(ctx->kb.key, ctx->kb.keylen, &pk0); - if (rc) - return rc; + /* walk has already been prepared */ + + rc = cbc_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } - switch (pk0.type) { - case PKEY_KEYTYPE_AES_128: - case PKEY_KEYTYPE_AES_256: - /* second keytoken required */ - if (ctx->kb.keylen % 2) - return -EINVAL; - split_keylen = ctx->kb.keylen / 2; + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; +} - rc = __paes_keyblob2pkey(ctx->kb.key + split_keylen, - split_keylen, &pk1); - if (rc) - return rc; +static struct skcipher_engine_alg cbc_paes_alg = { + .base = { + .base.cra_name = "cbc(paes)", + .base.cra_driver_name = "cbc-paes-s390", + .base.cra_priority = 402, /* cbc-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(cbc_paes_alg.base.base.cra_list), + .init = cbc_paes_init, + .exit = cbc_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = cbc_paes_setkey, + .encrypt = cbc_paes_encrypt, + .decrypt = cbc_paes_decrypt, + }, + .op = { + .do_one_request = cbc_paes_do_one_request, + }, +}; - if (pk0.type != pk1.type) - return -EINVAL; - break; - case PKEY_KEYTYPE_AES_XTS_128: - case PKEY_KEYTYPE_AES_XTS_256: - /* single key */ - pk1.type = 0; - break; - default: - /* unsupported protected keytype */ - return -EINVAL; - } +/* + * PAES CTR implementation + */ - spin_lock_bh(&ctx->pk_lock); - ctx->pk[0] = pk0; - ctx->pk[1] = pk1; - spin_unlock_bh(&ctx->pk_lock); +struct ctr_param { + u8 key[PAES_256_PROTKEY_SIZE]; +} __packed; - return 0; -} +struct s390_pctr_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + struct ctr_param param; +}; -static inline int __xts_paes_set_key(struct s390_pxts_ctx *ctx) +static int ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) { - unsigned long fc; + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + long fc; int rc; - rc = __xts_paes_convert_key(ctx); + /* set raw key into context */ + rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) - return rc; + goto out; + + /* convert raw key into protected key */ + rc = paes_convert_key(ctx); + if (rc) + goto out; /* Pick the correct function code based on the protected key type */ - switch (ctx->pk[0].type) { + switch (ctx->pk.type) { case PKEY_KEYTYPE_AES_128: - fc = CPACF_KM_PXTS_128; - break; - case PKEY_KEYTYPE_AES_256: - fc = CPACF_KM_PXTS_256; + fc = CPACF_KMCTR_PAES_128; break; - case PKEY_KEYTYPE_AES_XTS_128: - fc = CPACF_KM_PXTS_128_FULL; + case PKEY_KEYTYPE_AES_192: + fc = CPACF_KMCTR_PAES_192; break; - case PKEY_KEYTYPE_AES_XTS_256: - fc = CPACF_KM_PXTS_256_FULL; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KMCTR_PAES_256; break; default: fc = 0; break; } + ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static inline unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) +{ + unsigned int i, n; + + /* only use complete blocks, max. PAGE_SIZE */ + memcpy(ctrptr, iv, AES_BLOCK_SIZE); + n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); + for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { + memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); + crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); + ctrptr += AES_BLOCK_SIZE; + } + return n; +} + +static int ctr_paes_do_crypt(struct s390_paes_ctx *ctx, + struct s390_pctr_req_ctx *req_ctx, + bool maysleep) +{ + struct ctr_param *param = &req_ctx->param; + struct skcipher_walk *walk = &req_ctx->walk; + u8 buf[AES_BLOCK_SIZE], *ctrptr; + unsigned int nbytes, n, k; + int pk_state, locked, rc = 0; + + if (!req_ctx->param_init_done) { + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + req_ctx->param_init_done = true; + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + } + if (rc) + goto out; + + locked = mutex_trylock(&ctrblk_lock); + + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { + n = AES_BLOCK_SIZE; + if (nbytes >= 2 * AES_BLOCK_SIZE && locked) + n = __ctrblk_init(ctrblk, walk->iv, nbytes); + ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv; + k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr, + walk->src.virt.addr, n, ctrptr); + if (k) { + if (ctrptr == ctrblk) + memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE, + AES_BLOCK_SIZE); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, nbytes - k); + } + if (k < n) { + if (!maysleep) { + if (locked) + mutex_unlock(&ctrblk_lock); + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) { + if (locked) + mutex_unlock(&ctrblk_lock); + goto out; + } + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + } + if (locked) + mutex_unlock(&ctrblk_lock); + + /* final block may be < AES_BLOCK_SIZE, copy only nbytes */ + if (nbytes) { + memset(buf, 0, AES_BLOCK_SIZE); + memcpy(buf, walk->src.virt.addr, nbytes); + while (1) { + if (cpacf_kmctr(ctx->fc, param, buf, + buf, AES_BLOCK_SIZE, + walk->iv) == AES_BLOCK_SIZE) + break; + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = paes_convert_key(ctx); + if (rc) + goto out; + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key, ctx->pk.protkey, sizeof(param->key)); + spin_unlock_bh(&ctx->pk_lock); + } + memcpy(walk->dst.virt.addr, buf, nbytes); + crypto_inc(walk->iv, AES_BLOCK_SIZE); + rc = skcipher_walk_done(walk, 0); + } + +out: + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ctr_paes_crypt(struct skcipher_request *req) +{ + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; + + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ + + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; + + req_ctx->param_init_done = false; + + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = ctr_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } + + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); + +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); + return rc; +} + +static int ctr_paes_init(struct crypto_skcipher *tfm) +{ + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->pk_lock); + + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pctr_req_ctx)); + + return 0; +} + +static void ctr_paes_exit(struct crypto_skcipher *tfm) +{ + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + + memzero_explicit(ctx, sizeof(*ctx)); +} + +static int ctr_paes_do_one_request(struct crypto_engine *engine, void *areq) +{ + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pctr_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; - return ctx->fc ? 0 : -EINVAL; + /* walk has already been prepared */ + + rc = ctr_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); + } + + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); + return rc; } -static int xts_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int in_keylen) +static struct skcipher_engine_alg ctr_paes_alg = { + .base = { + .base.cra_name = "ctr(paes)", + .base.cra_driver_name = "ctr-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = 1, + .base.cra_ctxsize = sizeof(struct s390_paes_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.base.cra_list), + .init = ctr_paes_init, + .exit = ctr_paes_exit, + .min_keysize = PAES_MIN_KEYSIZE, + .max_keysize = PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = ctr_paes_setkey, + .encrypt = ctr_paes_crypt, + .decrypt = ctr_paes_crypt, + .chunksize = AES_BLOCK_SIZE, + }, + .op = { + .do_one_request = ctr_paes_do_one_request, + }, +}; + +/* + * PAES XTS implementation + */ + +struct xts_full_km_param { + u8 key[64]; + u8 tweak[16]; + u8 nap[16]; + u8 wkvp[32]; +} __packed; + +struct xts_km_param { + u8 key[PAES_256_PROTKEY_SIZE]; + u8 init[16]; +} __packed; + +struct xts_pcc_param { + u8 key[PAES_256_PROTKEY_SIZE]; + u8 tweak[16]; + u8 block[16]; + u8 bit[16]; + u8 xts[16]; +} __packed; + +struct s390_pxts_req_ctx { + unsigned long modifier; + struct skcipher_walk walk; + bool param_init_done; + union { + struct xts_full_km_param full_km_param; + struct xts_km_param km_param; + } param; +}; + +static int xts_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int in_keylen) { struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); u8 ckey[2 * AES_MAX_KEY_SIZE]; unsigned int ckey_len; + long fc; int rc; if ((in_keylen == 32 || in_keylen == 64) && xts_verify_key(tfm, in_key, in_keylen)) return -EINVAL; - _free_kb_keybuf(&ctx->kb); - rc = _xts_key_to_kb(&ctx->kb, in_key, in_keylen); + /* set raw key into context */ + rc = pxts_ctx_setkey(ctx, in_key, in_keylen); if (rc) - return rc; + goto out; - rc = __xts_paes_set_key(ctx); + /* convert raw key(s) into protected key(s) */ + rc = pxts_convert_key(ctx); if (rc) - return rc; + goto out; /* - * It is not possible on a single protected key (e.g. full AES-XTS) to - * check, if k1 and k2 are the same. - */ - if (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128 || - ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_256) - return 0; - /* * xts_verify_key verifies the key length is not odd and makes * sure that the two keys are not the same. This can be done - * on the two protected keys as well + * on the two protected keys as well - but not for full xts keys. */ - ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? - AES_KEYSIZE_128 : AES_KEYSIZE_256; - memcpy(ckey, ctx->pk[0].protkey, ckey_len); - memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); - return xts_verify_key(tfm, ckey, 2*ckey_len); + if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 || + ctx->pk[0].type == PKEY_KEYTYPE_AES_256) { + ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? + AES_KEYSIZE_128 : AES_KEYSIZE_256; + memcpy(ckey, ctx->pk[0].protkey, ckey_len); + memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len); + rc = xts_verify_key(tfm, ckey, 2 * ckey_len); + memzero_explicit(ckey, sizeof(ckey)); + if (rc) + goto out; + } + + /* Pick the correct function code based on the protected key type */ + switch (ctx->pk[0].type) { + case PKEY_KEYTYPE_AES_128: + fc = CPACF_KM_PXTS_128; + break; + case PKEY_KEYTYPE_AES_256: + fc = CPACF_KM_PXTS_256; + break; + case PKEY_KEYTYPE_AES_XTS_128: + fc = CPACF_KM_PXTS_128_FULL; + break; + case PKEY_KEYTYPE_AES_XTS_256: + fc = CPACF_KM_PXTS_256_FULL; + break; + default: + fc = 0; + break; + } + ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0; + + rc = fc ? 0 : -EINVAL; + +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int paes_xts_crypt_full(struct skcipher_request *req, - unsigned long modifier) +static int xts_paes_do_crypt_fullkey(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct xts_full_km_param *param = &req_ctx->param.full_km_param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int keylen, offset, nbytes, n, k; - struct { - u8 key[64]; - u8 tweak[16]; - u8 nap[16]; - u8 wkvp[32]; - } fxts_param = { - .nap = {0}, - }; - struct skcipher_walk walk; - int rc; + int rc = 0; - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; + /* + * The calling function xts_paes_do_crypt() ensures the + * protected key state is always PK_STATE_VALID when this + * function is invoked. + */ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_XTS_128) ? 32 : 0; - spin_lock_bh(&ctx->pk_lock); - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, keylen); - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, - sizeof(fxts_param.wkvp)); - spin_unlock_bh(&ctx->pk_lock); - memcpy(fxts_param.tweak, walk.iv, sizeof(fxts_param.tweak)); - fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + if (!req_ctx->param_init_done) { + memset(param, 0, sizeof(*param)); + spin_lock_bh(&ctx->pk_lock); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); + spin_unlock_bh(&ctx->pk_lock); + memcpy(param->tweak, walk->iv, sizeof(param->tweak)); + param->nap[0] = 0x01; /* initial alpha power (1, little-endian) */ + req_ctx->param_init_done = true; + } - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, fxts_param.key + offset, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) - rc = skcipher_walk_done(&walk, nbytes - k); + rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { - if (__xts_paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = pxts_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(fxts_param.key + offset, ctx->pk[0].protkey, - keylen); - memcpy(fxts_param.wkvp, ctx->pk[0].protkey + keylen, - sizeof(fxts_param.wkvp)); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp)); spin_unlock_bh(&ctx->pk_lock); } } +out: + pr_debug("rc=%d\n", rc); return rc; } -static int paes_xts_crypt(struct skcipher_request *req, unsigned long modifier) +static inline int __xts_2keys_prep_param(struct s390_pxts_ctx *ctx, + struct xts_km_param *param, + struct skcipher_walk *walk, + unsigned int keylen, + unsigned int offset, bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct xts_pcc_param pcc_param; + unsigned long cc = 1; + int rc = 0; + + while (cc) { + memset(&pcc_param, 0, sizeof(pcc_param)); + memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak)); + spin_lock_bh(&ctx->pk_lock); + memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); + spin_unlock_bh(&ctx->pk_lock); + cc = cpacf_pcc(ctx->fc, pcc_param.key + offset); + if (cc) { + if (!maysleep) { + rc = -EKEYEXPIRED; + break; + } + rc = pxts_convert_key(ctx); + if (rc) + break; + continue; + } + memcpy(param->init, pcc_param.xts, 16); + } + + memzero_explicit(pcc_param.key, sizeof(pcc_param.key)); + return rc; +} + +static int xts_paes_do_crypt_2keys(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) +{ + struct xts_km_param *param = &req_ctx->param.km_param; + struct skcipher_walk *walk = &req_ctx->walk; unsigned int keylen, offset, nbytes, n, k; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - u8 tweak[16]; - u8 block[16]; - u8 bit[16]; - u8 xts[16]; - } pcc_param; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - u8 init[16]; - } xts_param; - struct skcipher_walk walk; - int rc; + int rc = 0; - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; + /* + * The calling function xts_paes_do_crypt() ensures the + * protected key state is always PK_STATE_VALID when this + * function is invoked. + */ keylen = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 48 : 64; offset = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ? 16 : 0; - memset(&pcc_param, 0, sizeof(pcc_param)); - memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak)); - spin_lock_bh(&ctx->pk_lock); - memcpy(pcc_param.key + offset, ctx->pk[1].protkey, keylen); - memcpy(xts_param.key + offset, ctx->pk[0].protkey, keylen); - spin_unlock_bh(&ctx->pk_lock); - cpacf_pcc(ctx->fc, pcc_param.key + offset); - memcpy(xts_param.init, pcc_param.xts, 16); + if (!req_ctx->param_init_done) { + rc = __xts_2keys_prep_param(ctx, param, walk, + keylen, offset, maysleep); + if (rc) + goto out; + req_ctx->param_init_done = true; + } - while ((nbytes = walk.nbytes) != 0) { + /* + * Note that in case of partial processing or failure the walk + * is NOT unmapped here. So a follow up task may reuse the walk + * or in case of unrecoverable failure needs to unmap it. + */ + while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */ n = nbytes & ~(AES_BLOCK_SIZE - 1); - k = cpacf_km(ctx->fc | modifier, xts_param.key + offset, - walk.dst.virt.addr, walk.src.virt.addr, n); + k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset, + walk->dst.virt.addr, walk->src.virt.addr, n); if (k) - rc = skcipher_walk_done(&walk, nbytes - k); + rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { - if (__xts_paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); + if (!maysleep) { + rc = -EKEYEXPIRED; + goto out; + } + rc = pxts_convert_key(ctx); + if (rc) + goto out; spin_lock_bh(&ctx->pk_lock); - memcpy(xts_param.key + offset, - ctx->pk[0].protkey, keylen); + memcpy(param->key + offset, ctx->pk[0].protkey, keylen); spin_unlock_bh(&ctx->pk_lock); } } +out: + pr_debug("rc=%d\n", rc); return rc; } -static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) +static int xts_paes_do_crypt(struct s390_pxts_ctx *ctx, + struct s390_pxts_req_ctx *req_ctx, + bool maysleep) { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + int pk_state, rc = 0; + + /* fetch and check protected key state */ + spin_lock_bh(&ctx->pk_lock); + pk_state = ctx->pk_state; + switch (pk_state) { + case PK_STATE_NO_KEY: + rc = -ENOKEY; + break; + case PK_STATE_CONVERT_IN_PROGRESS: + rc = -EKEYEXPIRED; + break; + case PK_STATE_VALID: + break; + default: + rc = pk_state < 0 ? pk_state : -EIO; + break; + } + spin_unlock_bh(&ctx->pk_lock); + if (rc) + goto out; + /* Call the 'real' crypt function based on the xts prot key type. */ switch (ctx->fc) { case CPACF_KM_PXTS_128: case CPACF_KM_PXTS_256: - return paes_xts_crypt(req, modifier); + rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep); + break; case CPACF_KM_PXTS_128_FULL: case CPACF_KM_PXTS_256_FULL: - return paes_xts_crypt_full(req, modifier); + rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep); + break; default: - return -EINVAL; + rc = -EINVAL; } -} -static int xts_paes_encrypt(struct skcipher_request *req) -{ - return xts_paes_crypt(req, 0); +out: + pr_debug("rc=%d\n", rc); + return rc; } -static int xts_paes_decrypt(struct skcipher_request *req) +static inline int xts_paes_crypt(struct skcipher_request *req, unsigned long modifier) { - return xts_paes_crypt(req, CPACF_DECRYPT); -} + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; -static struct skcipher_alg xts_paes_alg = { - .base.cra_name = "xts(paes)", - .base.cra_driver_name = "xts-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.cra_list), - .init = xts_paes_init, - .exit = xts_paes_exit, - .min_keysize = 2 * PAES_MIN_KEYSIZE, - .max_keysize = 2 * PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = xts_paes_set_key, - .encrypt = xts_paes_encrypt, - .decrypt = xts_paes_decrypt, -}; + /* + * Attempt synchronous encryption first. If it fails, schedule the request + * asynchronously via the crypto engine. To preserve execution order, + * once a request is queued to the engine, further requests using the same + * tfm will also be routed through the engine. + */ -static int ctr_paes_init(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + rc = skcipher_walk_virt(walk, req, false); + if (rc) + goto out; - ctx->kb.key = NULL; - spin_lock_init(&ctx->pk_lock); + req_ctx->modifier = modifier; + req_ctx->param_init_done = false; - return 0; -} + /* Try synchronous operation if no active engine usage */ + if (!atomic_read(&ctx->via_engine_ctr)) { + rc = xts_paes_do_crypt(ctx, req_ctx, false); + if (rc == 0) + goto out; + } -static void ctr_paes_exit(struct crypto_skcipher *tfm) -{ - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); + /* + * If sync operation failed or key expired or there are already + * requests enqueued via engine, fallback to async. Mark tfm as + * using engine to serialize requests. + */ + if (rc == 0 || rc == -EKEYEXPIRED) { + atomic_inc(&ctx->via_engine_ctr); + rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); + if (rc != -EINPROGRESS) + atomic_dec(&ctx->via_engine_ctr); + } + + if (rc != -EINPROGRESS) + skcipher_walk_done(walk, rc); - _free_kb_keybuf(&ctx->kb); +out: + if (rc != -EINPROGRESS) + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("rc=%d\n", rc); + return rc; } -static inline int __ctr_paes_set_key(struct s390_paes_ctx *ctx) +static int xts_paes_encrypt(struct skcipher_request *req) { - unsigned long fc; - int rc; - - rc = __paes_convert_key(ctx); - if (rc) - return rc; - - /* Pick the correct function code based on the protected key type */ - fc = (ctx->pk.type == PKEY_KEYTYPE_AES_128) ? CPACF_KMCTR_PAES_128 : - (ctx->pk.type == PKEY_KEYTYPE_AES_192) ? CPACF_KMCTR_PAES_192 : - (ctx->pk.type == PKEY_KEYTYPE_AES_256) ? - CPACF_KMCTR_PAES_256 : 0; - - /* Check if the function code is available */ - ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0; + return xts_paes_crypt(req, 0); +} - return ctx->fc ? 0 : -EINVAL; +static int xts_paes_decrypt(struct skcipher_request *req) +{ + return xts_paes_crypt(req, CPACF_DECRYPT); } -static int ctr_paes_set_key(struct crypto_skcipher *tfm, const u8 *in_key, - unsigned int key_len) +static int xts_paes_init(struct crypto_skcipher *tfm) { - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - int rc; + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); - _free_kb_keybuf(&ctx->kb); - rc = _key_to_kb(&ctx->kb, in_key, key_len); - if (rc) - return rc; + memset(ctx, 0, sizeof(*ctx)); + spin_lock_init(&ctx->pk_lock); - return __ctr_paes_set_key(ctx); + crypto_skcipher_set_reqsize(tfm, sizeof(struct s390_pxts_req_ctx)); + + return 0; } -static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes) +static void xts_paes_exit(struct crypto_skcipher *tfm) { - unsigned int i, n; + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); - /* only use complete blocks, max. PAGE_SIZE */ - memcpy(ctrptr, iv, AES_BLOCK_SIZE); - n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1); - for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) { - memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE); - crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE); - ctrptr += AES_BLOCK_SIZE; - } - return n; + memzero_explicit(ctx, sizeof(*ctx)); } -static int ctr_paes_crypt(struct skcipher_request *req) +static int xts_paes_do_one_request(struct crypto_engine *engine, void *areq) { + struct skcipher_request *req = skcipher_request_cast(areq); + struct s390_pxts_req_ctx *req_ctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); - u8 buf[AES_BLOCK_SIZE], *ctrptr; - struct { - u8 key[PAES_256_PROTKEY_SIZE]; - } param; - struct skcipher_walk walk; - unsigned int nbytes, n, k; - int rc, locked; - - rc = skcipher_walk_virt(&walk, req, false); - if (rc) - return rc; - - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - - locked = mutex_trylock(&ctrblk_lock); + struct s390_pxts_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk *walk = &req_ctx->walk; + int rc; - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { - n = AES_BLOCK_SIZE; - if (nbytes >= 2*AES_BLOCK_SIZE && locked) - n = __ctrblk_init(ctrblk, walk.iv, nbytes); - ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv; - k = cpacf_kmctr(ctx->fc, ¶m, walk.dst.virt.addr, - walk.src.virt.addr, n, ctrptr); - if (k) { - if (ctrptr == ctrblk) - memcpy(walk.iv, ctrptr + k - AES_BLOCK_SIZE, - AES_BLOCK_SIZE); - crypto_inc(walk.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes - k); - } - if (k < n) { - if (__paes_convert_key(ctx)) { - if (locked) - mutex_unlock(&ctrblk_lock); - return skcipher_walk_done(&walk, -EIO); - } - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } - } - if (locked) - mutex_unlock(&ctrblk_lock); - /* - * final block may be < AES_BLOCK_SIZE, copy only nbytes - */ - if (nbytes) { - memset(buf, 0, AES_BLOCK_SIZE); - memcpy(buf, walk.src.virt.addr, nbytes); - while (1) { - if (cpacf_kmctr(ctx->fc, ¶m, buf, - buf, AES_BLOCK_SIZE, - walk.iv) == AES_BLOCK_SIZE) - break; - if (__paes_convert_key(ctx)) - return skcipher_walk_done(&walk, -EIO); - spin_lock_bh(&ctx->pk_lock); - memcpy(param.key, ctx->pk.protkey, PAES_256_PROTKEY_SIZE); - spin_unlock_bh(&ctx->pk_lock); - } - memcpy(walk.dst.virt.addr, buf, nbytes); - crypto_inc(walk.iv, AES_BLOCK_SIZE); - rc = skcipher_walk_done(&walk, nbytes); + /* walk has already been prepared */ + + rc = xts_paes_do_crypt(ctx, req_ctx, true); + if (rc == -EKEYEXPIRED) { + /* + * Protected key expired, conversion is in process. + * Trigger a re-schedule of this request by returning + * -ENOSPC ("hardware queue is full") to the crypto engine. + * To avoid immediately re-invocation of this callback, + * tell the scheduler to voluntarily give up the CPU here. + */ + cond_resched(); + pr_debug("rescheduling request\n"); + return -ENOSPC; + } else if (rc) { + skcipher_walk_done(walk, rc); } + memzero_explicit(&req_ctx->param, sizeof(req_ctx->param)); + pr_debug("request complete with rc=%d\n", rc); + local_bh_disable(); + atomic_dec(&ctx->via_engine_ctr); + crypto_finalize_skcipher_request(engine, req, rc); + local_bh_enable(); return rc; } -static struct skcipher_alg ctr_paes_alg = { - .base.cra_name = "ctr(paes)", - .base.cra_driver_name = "ctr-paes-s390", - .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct s390_paes_ctx), - .base.cra_module = THIS_MODULE, - .base.cra_list = LIST_HEAD_INIT(ctr_paes_alg.base.cra_list), - .init = ctr_paes_init, - .exit = ctr_paes_exit, - .min_keysize = PAES_MIN_KEYSIZE, - .max_keysize = PAES_MAX_KEYSIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = ctr_paes_set_key, - .encrypt = ctr_paes_crypt, - .decrypt = ctr_paes_crypt, - .chunksize = AES_BLOCK_SIZE, +static struct skcipher_engine_alg xts_paes_alg = { + .base = { + .base.cra_name = "xts(paes)", + .base.cra_driver_name = "xts-paes-s390", + .base.cra_priority = 402, /* ecb-paes-s390 + 1 */ + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct s390_pxts_ctx), + .base.cra_module = THIS_MODULE, + .base.cra_list = LIST_HEAD_INIT(xts_paes_alg.base.base.cra_list), + .init = xts_paes_init, + .exit = xts_paes_exit, + .min_keysize = 2 * PAES_MIN_KEYSIZE, + .max_keysize = 2 * PAES_MAX_KEYSIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = xts_paes_setkey, + .encrypt = xts_paes_encrypt, + .decrypt = xts_paes_decrypt, + }, + .op = { + .do_one_request = xts_paes_do_one_request, + }, }; -static inline void __crypto_unregister_skcipher(struct skcipher_alg *alg) +/* + * alg register, unregister, module init, exit + */ + +static struct miscdevice paes_dev = { + .name = "paes", + .minor = MISC_DYNAMIC_MINOR, +}; + +static inline void __crypto_unregister_skcipher(struct skcipher_engine_alg *alg) { - if (!list_empty(&alg->base.cra_list)) - crypto_unregister_skcipher(alg); + if (!list_empty(&alg->base.base.cra_list)) + crypto_engine_unregister_skcipher(alg); } static void paes_s390_fini(void) { + if (paes_crypto_engine) { + crypto_engine_stop(paes_crypto_engine); + crypto_engine_exit(paes_crypto_engine); + } __crypto_unregister_skcipher(&ctr_paes_alg); __crypto_unregister_skcipher(&xts_paes_alg); __crypto_unregister_skcipher(&cbc_paes_alg); __crypto_unregister_skcipher(&ecb_paes_alg); if (ctrblk) - free_page((unsigned long) ctrblk); + free_page((unsigned long)ctrblk); + misc_deregister(&paes_dev); } static int __init paes_s390_init(void) { int rc; + /* register a simple paes pseudo misc device */ + rc = misc_register(&paes_dev); + if (rc) + return rc; + + /* with this pseudo devie alloc and start a crypto engine */ + paes_crypto_engine = + crypto_engine_alloc_init_and_set(paes_dev.this_device, + true, NULL, false, MAX_QLEN); + if (!paes_crypto_engine) { + rc = -ENOMEM; + goto out_err; + } + rc = crypto_engine_start(paes_crypto_engine); + if (rc) { + crypto_engine_exit(paes_crypto_engine); + paes_crypto_engine = NULL; + goto out_err; + } + /* Query available functions for KM, KMC and KMCTR */ cpacf_query(CPACF_KM, &km_functions); cpacf_query(CPACF_KMC, &kmc_functions); @@ -927,40 +1650,45 @@ static int __init paes_s390_init(void) if (cpacf_test_func(&km_functions, CPACF_KM_PAES_128) || cpacf_test_func(&km_functions, CPACF_KM_PAES_192) || cpacf_test_func(&km_functions, CPACF_KM_PAES_256)) { - rc = crypto_register_skcipher(&ecb_paes_alg); + rc = crypto_engine_register_skcipher(&ecb_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", ecb_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) { - rc = crypto_register_skcipher(&cbc_paes_alg); + rc = crypto_engine_register_skcipher(&cbc_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", cbc_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&km_functions, CPACF_KM_PXTS_128) || cpacf_test_func(&km_functions, CPACF_KM_PXTS_256)) { - rc = crypto_register_skcipher(&xts_paes_alg); + rc = crypto_engine_register_skcipher(&xts_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", xts_paes_alg.base.base.cra_driver_name); } if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_128) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_192) || cpacf_test_func(&kmctr_functions, CPACF_KMCTR_PAES_256)) { - ctrblk = (u8 *) __get_free_page(GFP_KERNEL); + ctrblk = (u8 *)__get_free_page(GFP_KERNEL); if (!ctrblk) { rc = -ENOMEM; goto out_err; } - rc = crypto_register_skcipher(&ctr_paes_alg); + rc = crypto_engine_register_skcipher(&ctr_paes_alg); if (rc) goto out_err; + pr_debug("%s registered\n", ctr_paes_alg.base.base.cra_driver_name); } return 0; + out_err: paes_s390_fini(); return rc; diff --git a/arch/s390/include/asm/asce.h b/arch/s390/include/asm/asce.h new file mode 100644 index 00000000000000..f6dfaaba735a20 --- /dev/null +++ b/arch/s390/include/asm/asce.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_ASCE_H +#define _ASM_S390_ASCE_H + +#include <linux/thread_info.h> +#include <linux/irqflags.h> +#include <asm/lowcore.h> +#include <asm/ctlreg.h> + +static inline bool enable_sacf_uaccess(void) +{ + unsigned long flags; + + if (test_thread_flag(TIF_ASCE_PRIMARY)) + return true; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->kernel_asce); + set_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); + return false; +} + +static inline void disable_sacf_uaccess(bool previous) +{ + unsigned long flags; + + if (previous) + return; + local_irq_save(flags); + local_ctl_load(1, &get_lowcore()->user_asce); + clear_thread_flag(TIF_ASCE_PRIMARY); + local_irq_restore(flags); +} + +#endif /* _ASM_S390_ASCE_H */ diff --git a/arch/s390/include/asm/cpacf.h b/arch/s390/include/asm/cpacf.h index 59ab1192e2d5b4..54cb97603ec050 100644 --- a/arch/s390/include/asm/cpacf.h +++ b/arch/s390/include/asm/cpacf.h @@ -649,18 +649,30 @@ static inline void cpacf_trng(u8 *ucbuf, unsigned long ucbuf_len, * instruction * @func: the function code passed to PCC; see CPACF_KM_xxx defines * @param: address of parameter block; see POP for details on each func + * + * Returns the condition code, this is + * 0 - cc code 0 (normal completion) + * 1 - cc code 1 (protected key wkvp mismatch or src operand out of range) + * 2 - cc code 2 (something invalid, scalar multiply infinity, ...) + * Condition code 3 (partial completion) is handled within the asm code + * and never returned. */ -static inline void cpacf_pcc(unsigned long func, void *param) +static inline int cpacf_pcc(unsigned long func, void *param) { + int cc; + asm volatile( " lgr 0,%[fc]\n" " lgr 1,%[pba]\n" "0: .insn rre,%[opc] << 16,0,0\n" /* PCC opcode */ " brc 1,0b\n" /* handle partial completion */ - : + CC_IPM(cc) + : CC_OUT(cc, cc) : [fc] "d" (func), [pba] "d" ((unsigned long)param), [opc] "i" (CPACF_PCC) - : "cc", "memory", "0", "1"); + : CC_CLOBBER_LIST("memory", "0", "1")); + + return CC_TRANSFORM(cc); } /** diff --git a/arch/s390/include/asm/cpufeature.h b/arch/s390/include/asm/cpufeature.h index e08169bd63a55d..6c6a99660e78b8 100644 --- a/arch/s390/include/asm/cpufeature.h +++ b/arch/s390/include/asm/cpufeature.h @@ -15,6 +15,7 @@ enum { S390_CPU_FEATURE_MSA, S390_CPU_FEATURE_VXRS, S390_CPU_FEATURE_UV, + S390_CPU_FEATURE_D288, MAX_CPU_FEATURES }; diff --git a/arch/s390/include/asm/diag288.h b/arch/s390/include/asm/diag288.h new file mode 100644 index 00000000000000..5e1b43cea9d64c --- /dev/null +++ b/arch/s390/include/asm/diag288.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_S390_DIAG288_H +#define _ASM_S390_DIAG288_H + +#include <asm/asm-extable.h> +#include <asm/types.h> + +#define MIN_INTERVAL 15 /* Minimal time supported by diag288 */ +#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ + +#define WDT_DEFAULT_TIMEOUT 30 + +/* Function codes - init, change, cancel */ +#define WDT_FUNC_INIT 0 +#define WDT_FUNC_CHANGE 1 +#define WDT_FUNC_CANCEL 2 +#define WDT_FUNC_CONCEAL 0x80000000 + +/* Action codes for LPAR watchdog */ +#define LPARWDT_RESTART 0 + +static inline int __diag288(unsigned int func, unsigned int timeout, + unsigned long action, unsigned int len) +{ + union register_pair r1 = { .even = func, .odd = timeout, }; + union register_pair r3 = { .even = action, .odd = len, }; + int rc = -EINVAL; + + asm volatile( + " diag %[r1],%[r3],0x288\n" + "0: lhi %[rc],0\n" + "1:" + EX_TABLE(0b, 1b) + : [rc] "+d" (rc) + : [r1] "d" (r1.pair), [r3] "d" (r3.pair) + : "cc", "memory"); + return rc; +} + +#endif /* _ASM_S390_DIAG288_H */ diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h index f5781794356bae..942f21c396973d 100644 --- a/arch/s390/include/asm/futex.h +++ b/arch/s390/include/asm/futex.h @@ -13,9 +13,11 @@ static uaccess_kmsan_or_inline int \ __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ { \ + bool sacf_flag; \ int rc, new; \ \ instrument_copy_from_user_before(old, uaddr, sizeof(*old)); \ + sacf_flag = enable_sacf_uaccess(); \ asm_inline volatile( \ " sacf 256\n" \ "0: l %[old],%[uaddr]\n" \ @@ -32,6 +34,7 @@ __futex_atomic_##name(int oparg, int *old, u32 __user *uaddr) \ [new] "=&d" (new), [uaddr] "+Q" (*uaddr) \ : [oparg] "d" (oparg) \ : "cc"); \ + disable_sacf_uaccess(sacf_flag); \ if (!rc) \ instrument_copy_from_user_after(old, uaddr, sizeof(*old), 0); \ return rc; \ @@ -75,9 +78,11 @@ int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) static uaccess_kmsan_or_inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval) { + bool sacf_flag; int rc; instrument_copy_from_user_before(uval, uaddr, sizeof(*uval)); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " sacf 256\n" "0: cs %[old],%[new],%[uaddr]\n" @@ -88,6 +93,7 @@ int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 : [rc] "=d" (rc), [old] "+d" (oldval), [uaddr] "+Q" (*uaddr) : [new] "d" (newval) : "cc", "memory"); + disable_sacf_uaccess(sacf_flag); *uval = oldval; instrument_copy_from_user_after(uval, uaddr, sizeof(*uval), 0); return rc; diff --git a/arch/s390/include/asm/machine.h b/arch/s390/include/asm/machine.h index 54478caa52378b..8abe5afdbfc45e 100644 --- a/arch/s390/include/asm/machine.h +++ b/arch/s390/include/asm/machine.h @@ -18,6 +18,7 @@ #define MFEATURE_VM 7 #define MFEATURE_KVM 8 #define MFEATURE_LPAR 9 +#define MFEATURE_DIAG288 10 #ifndef __ASSEMBLY__ diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 88f84beebb9e7d..d9b8501bc93d07 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -13,6 +13,7 @@ #include <linux/mm_types.h> #include <asm/tlbflush.h> #include <asm/ctlreg.h> +#include <asm/asce.h> #include <asm-generic/mm_hooks.h> #define init_new_context init_new_context @@ -77,7 +78,8 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct * else get_lowcore()->user_asce.val = next->context.asce; cpumask_set_cpu(cpu, &next->context.cpu_attach_mask); - /* Clear previous user-ASCE from CR7 */ + /* Clear previous user-ASCE from CR1 and CR7 */ + local_ctl_load(1, &s390_invalid_asce); local_ctl_load(7, &s390_invalid_asce); if (prev != next) cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask); @@ -99,6 +101,7 @@ static inline void finish_arch_post_lock_switch(void) { struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; + unsigned long flags; if (mm) { preempt_disable(); @@ -108,15 +111,25 @@ static inline void finish_arch_post_lock_switch(void) __tlb_flush_mm_lazy(mm); preempt_enable(); } + local_irq_save(flags); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); local_ctl_load(7, &get_lowcore()->user_asce); + local_irq_restore(flags); } #define activate_mm activate_mm static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) { - switch_mm(prev, next, current); + switch_mm_irqs_off(prev, next, current); cpumask_set_cpu(smp_processor_id(), mm_cpumask(next)); + if (test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &get_lowcore()->kernel_asce); + else + local_ctl_load(1, &get_lowcore()->user_asce); local_ctl_load(7, &get_lowcore()->user_asce); } diff --git a/arch/s390/include/asm/pkey.h b/arch/s390/include/asm/pkey.h index 5dca1a46a9f656..b7b59faf16f497 100644 --- a/arch/s390/include/asm/pkey.h +++ b/arch/s390/include/asm/pkey.h @@ -20,9 +20,22 @@ * @param key pointer to a buffer containing the key blob * @param keylen size of the key blob in bytes * @param protkey pointer to buffer receiving the protected key + * @param xflags additional execution flags (see PKEY_XFLAG_* definitions below) + * As of now the only supported flag is PKEY_XFLAG_NOMEMALLOC. * @return 0 on success, negative errno value on failure */ int pkey_key2protkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); + +/* + * If this flag is given in the xflags parameter, the pkey implementation + * is not allowed to allocate memory but instead should fall back to use + * preallocated memory or simple fail with -ENOMEM. + * This flag is for protected key derive within a cipher or similar + * which must not allocate memory which would cause io operations - see + * also the CRYPTO_ALG_ALLOCATES_MEMORY flag in crypto.h. + */ +#define PKEY_XFLAG_NOMEMALLOC 0x0001 #endif /* _KAPI_PKEY_H */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index c66f3fc6daaf3f..62c0ab4a4b9de8 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -9,6 +9,7 @@ #include <linux/bits.h> #include <uapi/asm/ptrace.h> +#include <asm/thread_info.h> #include <asm/tpi.h> #define PIF_SYSCALL 0 /* inside a system call */ @@ -126,7 +127,6 @@ struct pt_regs { struct tpi_info tpi_info; }; unsigned long flags; - unsigned long cr1; unsigned long last_break; }; @@ -229,8 +229,44 @@ static inline void instruction_pointer_set(struct pt_regs *regs, int regs_query_register_offset(const char *name); const char *regs_query_register_name(unsigned int offset); -unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset); -unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n); + +static __always_inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->gprs[15]; +} + +static __always_inline unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (offset >= NUM_GPRS) + return 0; + return regs->gprs[offset]; +} + +static __always_inline int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +static __always_inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return READ_ONCE_NOCHECK(addr); +} /** * regs_get_kernel_argument() - get Nth function argument in kernel @@ -251,11 +287,6 @@ static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, return regs_get_kernel_stack_nth(regs, argoffset + n); } -static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) -{ - return regs->gprs[15]; -} - static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) { regs->gprs[2] = rc; diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h index 2ab868cbae6c13..f8f68f4ef255d1 100644 --- a/arch/s390/include/asm/string.h +++ b/arch/s390/include/asm/string.h @@ -26,11 +26,9 @@ void *memmove(void *dest, const void *src, size_t n); #define __HAVE_ARCH_MEMSCAN /* inline & arch function */ #define __HAVE_ARCH_STRCAT /* inline & arch function */ #define __HAVE_ARCH_STRCMP /* arch function */ -#define __HAVE_ARCH_STRCPY /* inline & arch function */ #define __HAVE_ARCH_STRLCAT /* arch function */ #define __HAVE_ARCH_STRLEN /* inline & arch function */ #define __HAVE_ARCH_STRNCAT /* arch function */ -#define __HAVE_ARCH_STRNCPY /* arch function */ #define __HAVE_ARCH_STRNLEN /* inline & arch function */ #define __HAVE_ARCH_STRSTR /* arch function */ #define __HAVE_ARCH_MEMSET16 /* arch function */ @@ -42,7 +40,6 @@ int memcmp(const void *s1, const void *s2, size_t n); int strcmp(const char *s1, const char *s2); size_t strlcat(char *dest, const char *src, size_t n); char *strncat(char *dest, const char *src, size_t n); -char *strncpy(char *dest, const char *src, size_t n); char *strstr(const char *s1, const char *s2); #endif /* !defined(CONFIG_KASAN) && !defined(CONFIG_KMSAN) */ @@ -155,22 +152,6 @@ static inline char *strcat(char *dst, const char *src) } #endif -#ifdef __HAVE_ARCH_STRCPY -static inline char *strcpy(char *dst, const char *src) -{ - char *ret = dst; - - asm volatile( - " lghi 0,0\n" - "0: mvst %[dst],%[src]\n" - " jo 0b" - : [dst] "+&a" (dst), [src] "+&a" (src) - : - : "cc", "memory", "0"); - return ret; -} -#endif - #if defined(__HAVE_ARCH_STRLEN) || (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) static inline size_t __no_sanitize_prefix_strfunc(strlen)(const char *s) { @@ -208,7 +189,6 @@ static inline size_t strnlen(const char * s, size_t n) void *memchr(const void * s, int c, size_t n); void *memscan(void *s, int c, size_t n); char *strcat(char *dst, const char *src); -char *strcpy(char *dst, const char *src); size_t strlen(const char *s); size_t strnlen(const char * s, size_t n); #endif /* !IN_ARCH_STRING_C */ diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 91f569cae1ce28..391eb04d26d81b 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h @@ -9,6 +9,7 @@ #define _ASM_THREAD_INFO_H #include <linux/bits.h> +#include <vdso/page.h> /* * General size of kernel stacks @@ -24,8 +25,6 @@ #define STACK_INIT_OFFSET (THREAD_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) #ifndef __ASSEMBLY__ -#include <asm/lowcore.h> -#include <asm/page.h> /* * low level task data that entry.S needs immediate access to @@ -64,6 +63,7 @@ void arch_setup_new_exec(void); #define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling needed */ #define TIF_UPROBE 4 /* breakpointed or single-stepping */ #define TIF_PATCH_PENDING 5 /* pending live patching update */ +#define TIF_ASCE_PRIMARY 6 /* primary asce is kernel asce */ #define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ #define TIF_GUARDED_STORAGE 8 /* load guarded storage control block */ #define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */ @@ -85,6 +85,7 @@ void arch_setup_new_exec(void); #define _TIF_NEED_RESCHED_LAZY BIT(TIF_NEED_RESCHED_LAZY) #define _TIF_UPROBE BIT(TIF_UPROBE) #define _TIF_PATCH_PENDING BIT(TIF_PATCH_PENDING) +#define _TIF_ASCE_PRIMARY BIT(TIF_ASCE_PRIMARY) #define _TIF_NOTIFY_SIGNAL BIT(TIF_NOTIFY_SIGNAL) #define _TIF_GUARDED_STORAGE BIT(TIF_GUARDED_STORAGE) #define _TIF_ISOLATE_BP_GUEST BIT(TIF_ISOLATE_BP_GUEST) diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 8629d70ec38b5d..a43fc88c005052 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -19,6 +19,7 @@ #include <asm/extable.h> #include <asm/facility.h> #include <asm-generic/access_ok.h> +#include <asm/asce.h> #include <linux/instrumented.h> void debug_user_asce(int exit); @@ -478,6 +479,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, __uint128_t old, __uint128_t new, unsigned long key, int size) { + bool sacf_flag; int rc = 0; switch (size) { @@ -490,6 +492,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, _old = ((unsigned int)old & 0xff) << shift; _new = ((unsigned int)new & 0xff) << shift; mask = ~(0xff << shift); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -524,6 +527,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [default_key] "J" (PAGE_DEFAULT_KEY), [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned char *)uval = prev >> shift; if (!count) rc = -EAGAIN; @@ -538,6 +542,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, _old = ((unsigned int)old & 0xffff) << shift; _new = ((unsigned int)new & 0xffff) << shift; mask = ~(0xffff << shift); + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -572,6 +577,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [default_key] "J" (PAGE_DEFAULT_KEY), [max_loops] "J" (CMPXCHG_USER_KEY_MAX_LOOPS) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned short *)uval = prev >> shift; if (!count) rc = -EAGAIN; @@ -580,6 +586,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, case 4: { unsigned int prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -595,12 +602,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned int *)uval = prev; return rc; } case 8: { unsigned long prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -616,12 +625,14 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(unsigned long *)uval = prev; return rc; } case 16: { __uint128_t prev = old; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile( " spka 0(%[key])\n" " sacf 256\n" @@ -637,6 +648,7 @@ static __always_inline int __cmpxchg_user_key(unsigned long address, void *uval, [key] "a" (key << 4), [default_key] "J" (PAGE_DEFAULT_KEY) : "memory", "cc"); + disable_sacf_uaccess(sacf_flag); *(__uint128_t *)uval = prev; return rc; } diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h index 46fb0ef6f98470..b008402ec9aa77 100644 --- a/arch/s390/include/asm/uv.h +++ b/arch/s390/include/asm/uv.h @@ -616,8 +616,9 @@ static inline int uv_remove_shared(unsigned long addr) return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS); } -int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list_item_hdr *secret); +int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret); int uv_retrieve_secret(u16 secret_idx, u8 *buf, size_t buf_size); extern int prot_virt_host; diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 841e05f7fa7e30..95ecad9c7d7d27 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -50,7 +50,6 @@ int main(void) OFFSET(__PT_ORIG_GPR2, pt_regs, orig_gpr2); OFFSET(__PT_INT_CODE, pt_regs, int_code); OFFSET(__PT_FLAGS, pt_regs, flags); - OFFSET(__PT_CR1, pt_regs, cr1); OFFSET(__PT_LAST_BREAK, pt_regs, last_break); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c index 03f3a1e524305a..c217a5e640943d 100644 --- a/arch/s390/kernel/cert_store.c +++ b/arch/s390/kernel/cert_store.c @@ -138,7 +138,7 @@ static void cert_store_key_describe(const struct key *key, struct seq_file *m) * First 64 bytes of the key description is key name in EBCDIC CP 500. * Convert it to ASCII for displaying in /proc/keys. */ - strscpy(ascii, key->description, sizeof(ascii)); + strscpy(ascii, key->description); EBCASC_500(ascii, VC_NAME_LEN_BYTES); seq_puts(m, ascii); diff --git a/arch/s390/kernel/cpufeature.c b/arch/s390/kernel/cpufeature.c index 1b2ae42a0c156e..76210f001028fa 100644 --- a/arch/s390/kernel/cpufeature.c +++ b/arch/s390/kernel/cpufeature.c @@ -5,11 +5,13 @@ #include <linux/cpufeature.h> #include <linux/bug.h> +#include <asm/machine.h> #include <asm/elf.h> enum { TYPE_HWCAP, TYPE_FACILITY, + TYPE_MACHINE, }; struct s390_cpu_feature { @@ -21,6 +23,7 @@ static struct s390_cpu_feature s390_cpu_features[MAX_CPU_FEATURES] = { [S390_CPU_FEATURE_MSA] = {.type = TYPE_HWCAP, .num = HWCAP_NR_MSA}, [S390_CPU_FEATURE_VXRS] = {.type = TYPE_HWCAP, .num = HWCAP_NR_VXRS}, [S390_CPU_FEATURE_UV] = {.type = TYPE_FACILITY, .num = 158}, + [S390_CPU_FEATURE_D288] = {.type = TYPE_MACHINE, .num = MFEATURE_DIAG288}, }; /* @@ -38,6 +41,8 @@ int cpu_have_feature(unsigned int num) return !!(elf_hwcap & BIT(feature->num)); case TYPE_FACILITY: return test_facility(feature->num); + case TYPE_MACHINE: + return test_machine_feature(feature->num); default: WARN_ON_ONCE(1); return 0; diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index 4a981266b48337..adb164223f8c6d 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -354,7 +354,7 @@ static void *nt_prpsinfo(void *ptr) memset(&prpsinfo, 0, sizeof(prpsinfo)); prpsinfo.pr_sname = 'R'; - strcpy(prpsinfo.pr_fname, "vmlinux"); + strscpy(prpsinfo.pr_fname, "vmlinux"); return nt_init(ptr, PRPSINFO, prpsinfo); } diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c index ce038e9205f718..2a41be2f79251f 100644 --- a/arch/s390/kernel/debug.c +++ b/arch/s390/kernel/debug.c @@ -251,7 +251,7 @@ static debug_info_t *debug_info_alloc(const char *name, int pages_per_area, rc->level = level; rc->buf_size = buf_size; rc->entry_size = sizeof(debug_entry_t) + buf_size; - strscpy(rc->name, name, sizeof(rc->name)); + strscpy(rc->name, name); memset(rc->views, 0, DEBUG_MAX_VIEWS * sizeof(struct debug_view *)); memset(rc->debugfs_entries, 0, DEBUG_MAX_VIEWS * sizeof(struct dentry *)); refcount_set(&(rc->ref_count), 0); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 9980c17ba22d95..0f00f4b06d51bb 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -116,7 +116,7 @@ _LPP_OFFSET = __LC_LPP .macro SIEEXIT sie_control,lowcore lg %r9,\sie_control # get control block pointer ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_KERNEL_ASCE(\lowcore) # load primary asce + lctlg %c1,%c1,__LC_USER_ASCE(\lowcore) # load primary asce lg %r9,__LC_CURRENT(\lowcore) mvi __TI_sie(%r9),0 larl %r9,sie_exit # skip forward to sie_exit @@ -208,7 +208,7 @@ SYM_FUNC_START(__sie64a) lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE GET_LC %r14 - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r14) # load primary asce + lctlg %c1,%c1,__LC_USER_ASCE(%r14) # load primary asce lg %r14,__LC_CURRENT(%r14) mvi __TI_sie(%r14),0 SYM_INNER_LABEL(sie_exit, SYM_L_GLOBAL) @@ -240,7 +240,6 @@ SYM_CODE_START(system_call) lghi %r14,0 .Lsysc_per: STBEAR __LC_LAST_BREAK(%r13) - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) lg %r15,__LC_KERNEL_STACK(%r13) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) stmg %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15) @@ -261,7 +260,6 @@ SYM_CODE_START(system_call) lgr %r3,%r14 brasl %r14,__do_syscall STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPON LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) @@ -278,7 +276,6 @@ SYM_CODE_START(ret_from_fork) brasl %r14,__ret_from_fork STACKLEAK_ERASE GET_LC %r13 - lctlg %c1,%c1,__LC_USER_ASCE(%r13) mvc __LC_RETURN_PSW(16,%r13),STACK_FRAME_OVERHEAD+__PT_PSW(%r15) BPON LBEAR STACK_FRAME_OVERHEAD+__PT_LAST_BREAK(%r15) @@ -299,10 +296,7 @@ SYM_CODE_START(pgm_check_handler) lmg %r8,%r9,__LC_PGM_OLD_PSW(%r13) xgr %r10,%r10 tmhh %r8,0x0001 # coming from user space? - jno .Lpgm_skip_asce - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) - j 3f # -> fault in user space -.Lpgm_skip_asce: + jo 3f # -> fault in user space #if IS_ENABLED(CONFIG_KVM) lg %r11,__LC_CURRENT(%r13) tm __TI_sie(%r11),0xff @@ -340,7 +334,6 @@ SYM_CODE_START(pgm_check_handler) tmhh %r8,0x0001 # returning to user space? jno .Lpgm_exit_kernel STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) BPON stpt __LC_EXIT_TIMER(%r13) .Lpgm_exit_kernel: @@ -384,8 +377,7 @@ SYM_CODE_START(\name) #endif 0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j 2f -1: lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) - lg %r15,__LC_KERNEL_STACK(%r13) +1: lg %r15,__LC_KERNEL_STACK(%r13) 2: xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) @@ -408,7 +400,6 @@ SYM_CODE_START(\name) tmhh %r8,0x0001 # returning to user ? jno 2f STACKLEAK_ERASE - lctlg %c1,%c1,__LC_USER_ASCE(%r13) BPON stpt __LC_EXIT_TIMER(%r13) 2: LBEAR __PT_LAST_BREAK(%r11) @@ -476,8 +467,6 @@ SYM_CODE_START(mcck_int_handler) .Lmcck_user: lg %r15,__LC_MCCK_STACK(%r13) la %r11,STACK_FRAME_OVERHEAD(%r15) - stctg %c1,%c1,__PT_CR1(%r11) - lctlg %c1,%c1,__LC_KERNEL_ASCE(%r13) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lay %r14,__LC_GPREGS_SAVE_AREA(%r13) mvc __PT_R0(128,%r11),0(%r14) @@ -495,7 +484,6 @@ SYM_CODE_START(mcck_int_handler) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check - lctlg %c1,%c1,__PT_CR1(%r11) lmg %r0,%r10,__PT_R0(%r11) mvc __LC_RETURN_MCCK_PSW(16,%r13),__PT_PSW(%r11) # move return PSW tm __LC_RETURN_MCCK_PSW+1(%r13),0x01 # returning to user ? diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 3b9d9ccfad63ef..ff15f91affdea4 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -270,7 +270,7 @@ static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \ { \ if (len >= sizeof(_value)) \ return -E2BIG; \ - len = strscpy(_value, buf, sizeof(_value)); \ + len = strscpy(_value, buf); \ if ((ssize_t)len < 0) \ return len; \ strim(_value); \ @@ -2249,26 +2249,28 @@ static int __init s390_ipl_init(void) __initcall(s390_ipl_init); -static void __init strncpy_skip_quote(char *dst, char *src, int n) +static void __init strscpy_skip_quote(char *dst, char *src, int n) { int sx, dx; - dx = 0; - for (sx = 0; src[sx] != 0; sx++) { + if (!n) + return; + for (sx = 0, dx = 0; src[sx]; sx++) { if (src[sx] == '"') continue; - dst[dx++] = src[sx]; - if (dx >= n) + dst[dx] = src[sx]; + if (dx + 1 == n) break; + dx++; } + dst[dx] = '\0'; } static int __init vmcmd_on_reboot_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_reboot, str, VMCMD_MAX_SIZE); - vmcmd_on_reboot[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_reboot, str, sizeof(vmcmd_on_reboot)); on_reboot_trigger.action = &vmcmd_action; return 1; } @@ -2278,8 +2280,7 @@ static int __init vmcmd_on_panic_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_panic, str, VMCMD_MAX_SIZE); - vmcmd_on_panic[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_panic, str, sizeof(vmcmd_on_panic)); on_panic_trigger.action = &vmcmd_action; return 1; } @@ -2289,8 +2290,7 @@ static int __init vmcmd_on_halt_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_halt, str, VMCMD_MAX_SIZE); - vmcmd_on_halt[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_halt, str, sizeof(vmcmd_on_halt)); on_halt_trigger.action = &vmcmd_action; return 1; } @@ -2300,8 +2300,7 @@ static int __init vmcmd_on_poff_setup(char *str) { if (!machine_is_vm()) return 1; - strncpy_skip_quote(vmcmd_on_poff, str, VMCMD_MAX_SIZE); - vmcmd_on_poff[VMCMD_MAX_SIZE] = 0; + strscpy_skip_quote(vmcmd_on_poff, str, sizeof(vmcmd_on_poff)); on_poff_trigger.action = &vmcmd_action; return 1; } diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c index 690a293eb10d63..7ace1f9e4ccf67 100644 --- a/arch/s390/kernel/perf_cpum_cf_events.c +++ b/arch/s390/kernel/perf_cpum_cf_events.c @@ -290,8 +290,8 @@ CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_NO_SPECIAL, 0x00f4); CPUMF_EVENT_ATTR(cf_z15, TX_C_TABORT_SPECIAL, 0x00f5); CPUMF_EVENT_ATTR(cf_z15, DFLT_ACCESS, 0x00f7); CPUMF_EVENT_ATTR(cf_z15, DFLT_CYCLES, 0x00fc); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x00108); -CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x00109); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CC, 0x0108); +CPUMF_EVENT_ATTR(cf_z15, DFLT_CCFINISH, 0x0109); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); CPUMF_EVENT_ATTR(cf_z15, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); CPUMF_EVENT_ATTR(cf_z16, L1D_RO_EXCL_WRITES, 0x0080); diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c index 80b1f7a29f1164..11f70c1e2797cb 100644 --- a/arch/s390/kernel/processor.c +++ b/arch/s390/kernel/processor.c @@ -268,35 +268,35 @@ static int __init setup_elf_platform(void) add_device_randomness(&cpu_id, sizeof(cpu_id)); switch (cpu_id.machine) { default: /* Use "z10" as default. */ - strcpy(elf_platform, "z10"); + strscpy(elf_platform, "z10"); break; case 0x2817: case 0x2818: - strcpy(elf_platform, "z196"); + strscpy(elf_platform, "z196"); break; case 0x2827: case 0x2828: - strcpy(elf_platform, "zEC12"); + strscpy(elf_platform, "zEC12"); break; case 0x2964: case 0x2965: - strcpy(elf_platform, "z13"); + strscpy(elf_platform, "z13"); break; case 0x3906: case 0x3907: - strcpy(elf_platform, "z14"); + strscpy(elf_platform, "z14"); break; case 0x8561: case 0x8562: - strcpy(elf_platform, "z15"); + strscpy(elf_platform, "z15"); break; case 0x3931: case 0x3932: - strcpy(elf_platform, "z16"); + strscpy(elf_platform, "z16"); break; case 0x9175: case 0x9176: - strcpy(elf_platform, "z17"); + strscpy(elf_platform, "z17"); break; } return 0; diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 34b8d9e745df05..e1240f6b29fad1 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c @@ -1524,13 +1524,6 @@ static const char *gpr_names[NUM_GPRS] = { "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", }; -unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset) -{ - if (offset >= NUM_GPRS) - return 0; - return regs->gprs[offset]; -} - int regs_query_register_offset(const char *name) { unsigned long offset; @@ -1550,29 +1543,3 @@ const char *regs_query_register_name(unsigned int offset) return NULL; return gpr_names[offset]; } - -static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) -{ - unsigned long ksp = kernel_stack_pointer(regs); - - return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); -} - -/** - * regs_get_kernel_stack_nth() - get Nth entry of the stack - * @regs:pt_regs which contains kernel stack pointer. - * @n:stack entry number. - * - * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which - * is specifined by @regs. If the @n th entry is NOT in the kernel stack, - * this returns 0. - */ -unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) -{ - unsigned long addr; - - addr = kernel_stack_pointer(regs) + n * sizeof(long); - if (!regs_within_kernel_stack(regs, addr)) - return 0; - return READ_ONCE_NOCHECK(addr); -} diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 63f41dfaba85d0..81f12bb77f6208 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -263,7 +263,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu) abs_lc = get_abs_lowcore(); memcpy(lc->cregs_save_area, abs_lc->cregs_save_area, sizeof(lc->cregs_save_area)); put_abs_lowcore(abs_lc); - lc->cregs_save_area[1] = lc->kernel_asce; + lc->cregs_save_area[1] = lc->user_asce; lc->cregs_save_area[7] = lc->user_asce; save_access_regs((unsigned int *) lc->access_regs_save_area); arch_spin_lock_setup(cpu); diff --git a/arch/s390/kernel/uv.c b/arch/s390/kernel/uv.c index 9a5d5be8acf41e..4ab0b6b4866e20 100644 --- a/arch/s390/kernel/uv.c +++ b/arch/s390/kernel/uv.c @@ -782,7 +782,12 @@ out_kobj: device_initcall(uv_sysfs_init); /* - * Find the secret with the secret_id in the provided list. + * Locate a secret in the list by its id. + * @secret_id: search pattern. + * @list: ephemeral buffer space + * @secret: output data, containing the secret's metadata. + * + * Search for a secret with the given secret_id in the Ultravisor secret store. * * Context: might sleep. */ @@ -803,12 +808,15 @@ static int find_secret_in_page(const u8 secret_id[UV_SECRET_ID_LEN], /* * Do the actual search for `uv_get_secret_metadata`. + * @secret_id: search pattern. + * @list: ephemeral buffer space + * @secret: output data, containing the secret's metadata. * * Context: might sleep. */ -static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list *list, - struct uv_secret_list_item_hdr *secret) +int uv_find_secret(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list *list, + struct uv_secret_list_item_hdr *secret) { u16 start_idx = 0; u16 list_rc; @@ -830,36 +838,7 @@ static int find_secret(const u8 secret_id[UV_SECRET_ID_LEN], return -ENOENT; } - -/** - * uv_get_secret_metadata() - get secret metadata for a given secret id. - * @secret_id: search pattern. - * @secret: output data, containing the secret's metadata. - * - * Search for a secret with the given secret_id in the Ultravisor secret store. - * - * Context: might sleep. - * - * Return: - * * %0: - Found entry; secret->idx and secret->type are valid. - * * %ENOENT - No entry found. - * * %ENODEV: - Not supported: UV not available or command not available. - * * %EIO: - Other unexpected UV error. - */ -int uv_get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], - struct uv_secret_list_item_hdr *secret) -{ - struct uv_secret_list *buf; - int rc; - - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - rc = find_secret(secret_id, buf, secret); - kfree(buf); - return rc; -} -EXPORT_SYMBOL_GPL(uv_get_secret_metadata); +EXPORT_SYMBOL_GPL(uv_find_secret); /** * uv_retrieve_secret() - get the secret value for the secret index. diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index 373fa1f019376d..099de76e8b1ab3 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -78,50 +78,6 @@ EXPORT_SYMBOL(strnlen); #endif /** - * strcpy - Copy a %NUL terminated string - * @dest: Where to copy the string to - * @src: Where to copy the string from - * - * returns a pointer to @dest - */ -#ifdef __HAVE_ARCH_STRCPY -char *strcpy(char *dest, const char *src) -{ - char *ret = dest; - - asm volatile( - " lghi 0,0\n" - "0: mvst %[dest],%[src]\n" - " jo 0b\n" - : [dest] "+&a" (dest), [src] "+&a" (src) - : - : "cc", "memory", "0"); - return ret; -} -EXPORT_SYMBOL(strcpy); -#endif - -/** - * strncpy - Copy a length-limited, %NUL-terminated string - * @dest: Where to copy the string to - * @src: Where to copy the string from - * @n: The maximum number of bytes to copy - * - * The result is not %NUL-terminated if the source exceeds - * @n bytes. - */ -#ifdef __HAVE_ARCH_STRNCPY -char *strncpy(char *dest, const char *src, size_t n) -{ - size_t len = __strnend(src, n) - src; - memset(dest + len, 0, n - len); - memcpy(dest, src, len); - return dest; -} -EXPORT_SYMBOL(strncpy); -#endif - -/** * strcat - Append one %NUL-terminated string to another * @dest: The string to be appended to * @src: The string to append to it @@ -181,9 +137,6 @@ EXPORT_SYMBOL(strlcat); * @n: The maximum numbers of bytes to copy * * returns a pointer to @dest - * - * Note that in contrast to strncpy, strncat ensures the result is - * terminated. */ #ifdef __HAVE_ARCH_STRNCAT char *strncat(char *dest, const char *src, size_t n) diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index cec20db884795f..fa7d98fa1320b8 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -17,17 +17,18 @@ #ifdef CONFIG_DEBUG_ENTRY void debug_user_asce(int exit) { + struct lowcore *lc = get_lowcore(); struct ctlreg cr1, cr7; local_ctl_store(1, &cr1); local_ctl_store(7, &cr7); - if (cr1.val == get_lowcore()->kernel_asce.val && cr7.val == get_lowcore()->user_asce.val) + if (cr1.val == lc->user_asce.val && cr7.val == lc->user_asce.val) return; panic("incorrect ASCE on kernel %s\n" "cr1: %016lx cr7: %016lx\n" "kernel: %016lx user: %016lx\n", exit ? "exit" : "entry", cr1.val, cr7.val, - get_lowcore()->kernel_asce.val, get_lowcore()->user_asce.val); + lc->kernel_asce.val, lc->user_asce.val); } #endif /*CONFIG_DEBUG_ENTRY */ diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index a6b8b8ea90864e..f7da53e212f554 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c @@ -530,6 +530,14 @@ segment_modify_shared (char *name, int do_nonshared) return rc; } +static void __dcss_diag_purge_on_cpu_0(void *data) +{ + struct dcss_segment *seg = (struct dcss_segment *)data; + unsigned long dummy; + + dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); +} + /* * Decrease the use count of a DCSS segment and remove * it from the address space if nobody is using it @@ -538,7 +546,6 @@ segment_modify_shared (char *name, int do_nonshared) void segment_unload(char *name) { - unsigned long dummy; struct dcss_segment *seg; if (!machine_is_vm()) @@ -556,7 +563,14 @@ segment_unload(char *name) kfree(seg->res); vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); list_del(&seg->list); - dcss_diag(&purgeseg_scode, seg->dcss_name, &dummy, &dummy); + /* + * Workaround for z/VM issue, where calling the DCSS unload diag on + * a non-IPL CPU would cause bogus sclp maximum memory detection on + * next IPL. + * IPL CPU 0 cannot be set offline, so the dcss_diag() call can + * directly be scheduled to that CPU. + */ + smp_call_function_single(0, __dcss_diag_purge_on_cpu_0, seg, 1); kfree(seg); out_unlock: mutex_unlock(&dcss_lock); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index e3a6f8ae156cd6..d177bea0bd7302 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -38,11 +38,15 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table) static void __crst_table_upgrade(void *arg) { struct mm_struct *mm = arg; + struct ctlreg asce; /* change all active ASCEs to avoid the creation of new TLBs */ if (current->active_mm == mm) { - get_lowcore()->user_asce.val = mm->context.asce; - local_ctl_load(7, &get_lowcore()->user_asce); + asce.val = mm->context.asce; + get_lowcore()->user_asce = asce; + local_ctl_load(7, &asce); + if (!test_thread_flag(TIF_ASCE_PRIMARY)) + local_ctl_load(1, &asce); } __tlb_flush_local(); } @@ -52,6 +56,8 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) unsigned long *pgd = NULL, *p4d = NULL, *__pgd; unsigned long asce_limit = mm->context.asce_limit; + mmap_assert_write_locked(mm); + /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */ VM_BUG_ON(asce_limit < _REGION2_SIZE); @@ -75,13 +81,6 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) spin_lock_bh(&mm->page_table_lock); - /* - * This routine gets called with mmap_lock lock held and there is - * no reason to optimize for the case of otherwise. However, if - * that would ever change, the below check will let us know. - */ - VM_BUG_ON(asce_limit != mm->context.asce_limit); - if (p4d) { __pgd = (unsigned long *) mm->pgd; p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd); diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c index 5fcc1a3b04bd0b..9680055edb7840 100644 --- a/arch/s390/pci/pci_mmio.c +++ b/arch/s390/pci/pci_mmio.c @@ -32,8 +32,10 @@ static inline int __pcistb_mio_inuser( u64 len, u8 *status) { int cc, exception; + bool sacf_flag; exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" @@ -44,6 +46,7 @@ static inline int __pcistb_mio_inuser( : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception) : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) : CC_CLOBBER_LIST("memory")); + disable_sacf_uaccess(sacf_flag); *status = len >> 24 & 0xff; return exception ? -ENXIO : CC_TRANSFORM(cc); } @@ -54,6 +57,7 @@ static inline int __pcistg_mio_inuser( { union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; int cc, exception; + bool sacf_flag; u64 val = 0; u64 cnt = ulen; u8 tmp; @@ -64,6 +68,7 @@ static inline int __pcistg_mio_inuser( * address space. pcistg then uses the user mappings. */ exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: llgc %[tmp],0(%[src])\n" @@ -81,6 +86,7 @@ static inline int __pcistg_mio_inuser( CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair) : : CC_CLOBBER_LIST("memory")); + disable_sacf_uaccess(sacf_flag); *status = ioaddr_len.odd >> 24 & 0xff; cc = exception ? -ENXIO : CC_TRANSFORM(cc); @@ -204,6 +210,7 @@ static inline int __pcilg_mio_inuser( u64 ulen, u8 *status) { union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen}; + bool sacf_flag; u64 cnt = ulen; int shift = ulen * 8; int cc, exception; @@ -215,6 +222,7 @@ static inline int __pcilg_mio_inuser( * user address @dst */ exception = 1; + sacf_flag = enable_sacf_uaccess(); asm_inline volatile ( " sacf 256\n" "0: .insn rre,0xb9d60000,%[val],%[ioaddr_len]\n" @@ -239,7 +247,7 @@ static inline int __pcilg_mio_inuser( [shift] "+d" (shift) : : CC_CLOBBER_LIST("memory")); - + disable_sacf_uaccess(sacf_flag); cc = exception ? -ENXIO : CC_TRANSFORM(cc); /* did we write everything to the user space buffer? */ if (!cc && cnt != 0) diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 7248e547fefb79..cdc7b2f16b884f 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -314,7 +314,7 @@ dcssblk_load_segment(char *name, struct segment_info **seg_info) if (*seg_info == NULL) return -ENOMEM; - strcpy((*seg_info)->segment_name, name); + strscpy((*seg_info)->segment_name, name); /* load the segment */ rc = segment_load(name, SEGMENT_SHARED, @@ -612,7 +612,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char rc = -ENOMEM; goto out; } - strcpy(dev_info->segment_name, local_buf); + strscpy(dev_info->segment_name, local_buf); dev_info->segment_type = seg_info->segment_type; INIT_LIST_HEAD(&dev_info->seg_list); } diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 34f3820d7f7495..8402a0042c0d37 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c @@ -102,6 +102,7 @@ struct tty3270 { /* Input stuff. */ char *prompt; /* Output string for input area. */ + size_t prompt_sz; /* Size of output string. */ char *input; /* Input string for read request. */ struct raw3270_request *read; /* Single read request. */ struct raw3270_request *kreset; /* Single keyboard reset request. */ @@ -206,7 +207,7 @@ static int tty3270_input_size(int cols) static void tty3270_update_prompt(struct tty3270 *tp, char *input) { - strcpy(tp->prompt, input); + strscpy(tp->prompt, input, tp->prompt_sz); tp->update_flags |= TTY_UPDATE_INPUT; tty3270_set_timer(tp, 1); } @@ -971,6 +972,7 @@ static void tty3270_resize(struct raw3270_view *view, char *old_input, *new_input; struct tty_struct *tty; struct winsize ws; + size_t prompt_sz; int new_allocated, old_allocated = tp->allocated_lines; if (old_model == new_model && @@ -982,10 +984,11 @@ static void tty3270_resize(struct raw3270_view *view, return; } - new_input = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(new_cols); + new_input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!new_input) return; - new_prompt = kzalloc(tty3270_input_size(new_cols), GFP_KERNEL); + new_prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!new_prompt) goto out_input; screen = tty3270_alloc_screen(tp, new_rows, new_cols, &new_allocated); @@ -1010,6 +1013,7 @@ static void tty3270_resize(struct raw3270_view *view, old_rcl_lines = tp->rcl_lines; tp->input = new_input; tp->prompt = new_prompt; + tp->prompt_sz = prompt_sz; tp->rcl_lines = new_rcl_lines; tp->rcl_read_index = 0; tp->rcl_write_index = 0; @@ -1096,6 +1100,7 @@ static int tty3270_create_view(int index, struct tty3270 **newtp) { struct tty3270 *tp; + size_t prompt_sz; int rc; if (tty3270_max_index < index + 1) @@ -1125,17 +1130,19 @@ tty3270_create_view(int index, struct tty3270 **newtp) goto out_free_screen; } - tp->input = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL | GFP_DMA); + prompt_sz = tty3270_input_size(tp->view.cols); + tp->input = kzalloc(prompt_sz, GFP_KERNEL | GFP_DMA); if (!tp->input) { rc = -ENOMEM; goto out_free_converted_line; } - tp->prompt = kzalloc(tty3270_input_size(tp->view.cols), GFP_KERNEL); + tp->prompt = kzalloc(prompt_sz, GFP_KERNEL); if (!tp->prompt) { rc = -ENOMEM; goto out_free_input; } + tp->prompt_sz = prompt_sz; tp->rcl_lines = tty3270_alloc_recall(tp->view.cols); if (!tp->rcl_lines) { diff --git a/drivers/s390/char/diag_ftp.c b/drivers/s390/char/diag_ftp.c index 711f6982438e97..f41b39c9d26798 100644 --- a/drivers/s390/char/diag_ftp.c +++ b/drivers/s390/char/diag_ftp.c @@ -159,7 +159,7 @@ ssize_t diag_ftp_cmd(const struct hmcdrv_ftp_cmdspec *ftp, size_t *fsize) goto out; } - len = strscpy(ldfpl->fident, ftp->fname, sizeof(ldfpl->fident)); + len = strscpy(ldfpl->fident, ftp->fname); if (len < 0) { len = -EINVAL; goto out_free; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 1564cd7e3f598a..288734cd8f4b95 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -41,6 +41,7 @@ #include <linux/module.h> #include <asm/uv.h> #include <asm/chsc.h> +#include <linux/mempool.h> #include "ap_bus.h" #include "ap_debug.h" @@ -103,6 +104,27 @@ static struct ap_config_info *const ap_qci_info_old = &qci[1]; debug_info_t *ap_dbf_info; /* + * There is a need for a do-not-allocate-memory path through the AP bus + * layer. The pkey layer may be triggered via the in-kernel interface from + * a protected key crypto algorithm (namely PAES) to convert a secure key + * into a protected key. This happens in a workqueue context, so sleeping + * is allowed but memory allocations causing IO operations are not permitted. + * To accomplish this, an AP message memory pool with pre-allocated space + * is established. When ap_init_apmsg() with use_mempool set to true is + * called, instead of kmalloc() the ap message buffer is allocated from + * the ap_msg_pool. This pool only holds a limited amount of buffers: + * ap_msg_pool_min_items with the item size AP_DEFAULT_MAX_MSG_SIZE and + * exactly one of these items (if available) is returned if ap_init_apmsg() + * with the use_mempool arg set to true is called. When this pool is exhausted + * and use_mempool is set true, ap_init_apmsg() returns -ENOMEM without + * any attempt to allocate memory and the caller has to deal with that. + */ +static mempool_t *ap_msg_pool; +static unsigned int ap_msg_pool_min_items = 8; +module_param_named(msgpool_min_items, ap_msg_pool_min_items, uint, 0440); +MODULE_PARM_DESC(msgpool_min_items, "AP message pool minimal items"); + +/* * AP bus rescan related things. */ static bool ap_scan_bus(void); @@ -547,6 +569,48 @@ static void ap_poll_thread_stop(void) #define is_card_dev(x) ((x)->parent == ap_root_device) #define is_queue_dev(x) ((x)->parent != ap_root_device) +/* + * ap_init_apmsg() - Initialize ap_message. + */ +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags) +{ + unsigned int maxmsgsize; + + memset(ap_msg, 0, sizeof(*ap_msg)); + ap_msg->flags = flags; + + if (flags & AP_MSG_FLAG_MEMPOOL) { + ap_msg->msg = mempool_alloc_preallocated(ap_msg_pool); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; + return 0; + } + + maxmsgsize = atomic_read(&ap_max_msg_size); + ap_msg->msg = kmalloc(maxmsgsize, GFP_KERNEL); + if (!ap_msg->msg) + return -ENOMEM; + ap_msg->bufsize = maxmsgsize; + + return 0; +} +EXPORT_SYMBOL(ap_init_apmsg); + +/* + * ap_release_apmsg() - Release ap_message. + */ +void ap_release_apmsg(struct ap_message *ap_msg) +{ + if (ap_msg->flags & AP_MSG_FLAG_MEMPOOL) { + memzero_explicit(ap_msg->msg, ap_msg->bufsize); + mempool_free(ap_msg->msg, ap_msg_pool); + } else { + kfree_sensitive(ap_msg->msg); + } +} +EXPORT_SYMBOL(ap_release_apmsg); + /** * ap_bus_match() * @dev: Pointer to device @@ -2431,6 +2495,14 @@ static int __init ap_module_init(void) /* init ap_queue hashtable */ hash_init(ap_queues); + /* create ap msg buffer memory pool */ + ap_msg_pool = mempool_create_kmalloc_pool(ap_msg_pool_min_items, + AP_DEFAULT_MAX_MSG_SIZE); + if (!ap_msg_pool) { + rc = -ENOMEM; + goto out; + } + /* set up the AP permissions (ioctls, ap and aq masks) */ ap_perms_init(); @@ -2477,6 +2549,7 @@ out_device: out_bus: bus_unregister(&ap_bus_type); out: + mempool_destroy(ap_msg_pool); ap_debug_exit(); return rc; } @@ -2487,6 +2560,7 @@ static void __exit ap_module_exit(void) ap_irq_exit(); root_device_unregister(ap_root_device); bus_unregister(&ap_bus_type); + mempool_destroy(ap_msg_pool); ap_debug_exit(); } diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index f4622ee4d89473..88b625ba197802 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -214,6 +214,11 @@ struct ap_queue { typedef enum ap_sm_wait (ap_func_t)(struct ap_queue *queue); +struct ap_response_type { + struct completion work; + int type; +}; + struct ap_message { struct list_head list; /* Request queueing. */ unsigned long psmid; /* Message id. */ @@ -222,7 +227,7 @@ struct ap_message { size_t bufsize; /* allocated msg buffer size */ u16 flags; /* Flags, see AP_MSG_FLAG_xxx */ int rc; /* Return code for this message */ - void *private; /* ap driver private pointer. */ + struct ap_response_type response; /* receive is called from tasklet context */ void (*receive)(struct ap_queue *, struct ap_message *, struct ap_message *); @@ -231,27 +236,10 @@ struct ap_message { #define AP_MSG_FLAG_SPECIAL 0x0001 /* flag msg as 'special' with NQAP */ #define AP_MSG_FLAG_USAGE 0x0002 /* CCA, EP11: usage (no admin) msg */ #define AP_MSG_FLAG_ADMIN 0x0004 /* CCA, EP11: admin (=control) msg */ +#define AP_MSG_FLAG_MEMPOOL 0x0008 /* ap msg buffer allocated via mempool */ -/** - * ap_init_message() - Initialize ap_message. - * Initialize a message before using. Otherwise this might result in - * unexpected behaviour. - */ -static inline void ap_init_message(struct ap_message *ap_msg) -{ - memset(ap_msg, 0, sizeof(*ap_msg)); -} - -/** - * ap_release_message() - Release ap_message. - * Releases all memory used internal within the ap_message struct - * Currently this is the message and private field. - */ -static inline void ap_release_message(struct ap_message *ap_msg) -{ - kfree_sensitive(ap_msg->msg); - kfree_sensitive(ap_msg->private); -} +int ap_init_apmsg(struct ap_message *ap_msg, u32 flags); +void ap_release_apmsg(struct ap_message *ap_msg); enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event); enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 3a39e167bdbff8..cef60770f68bc6 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -24,7 +24,8 @@ */ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, size_t keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; @@ -32,14 +33,14 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, rc = pkey_handler_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); /* if this did not work, try the slowpath way */ if (rc == -ENODEV) { rc = pkey_handler_slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); if (rc) rc = -ENODEV; } @@ -52,16 +53,16 @@ static int key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, * In-Kernel function: Transform a key blob (of any type) into a protected key */ int pkey_key2protkey(const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = key2protkey(NULL, 0, key, keylen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, xflags); } return rc; @@ -103,7 +104,7 @@ static int pkey_ioctl_genseck(struct pkey_genseck __user *ugs) keybuflen = sizeof(kgs.seckey.seckey); rc = pkey_handler_gen_key(&apqn, 1, kgs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, - kgs.seckey.seckey, &keybuflen, NULL); + kgs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugs, &kgs, sizeof(kgs))) rc = -EFAULT; @@ -129,7 +130,7 @@ static int pkey_ioctl_clr2seck(struct pkey_clr2seck __user *ucs) kcs.keytype, PKEY_TYPE_CCA_DATA, 0, 0, kcs.clrkey.clrkey, pkey_keytype_aes_to_size(kcs.keytype), - kcs.seckey.seckey, &keybuflen, NULL); + kcs.seckey.seckey, &keybuflen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); if (!rc && copy_to_user(ucs, &kcs, sizeof(kcs))) rc = -EFAULT; @@ -154,7 +155,8 @@ static int pkey_ioctl_sec2protk(struct pkey_sec2protk __user *usp) ksp.seckey.seckey, sizeof(ksp.seckey.seckey), ksp.protkey.protkey, - &ksp.protkey.len, &ksp.protkey.type); + &ksp.protkey.len, &ksp.protkey.type, + 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -198,7 +200,7 @@ static int pkey_ioctl_clr2protk(struct pkey_clr2protk __user *ucp) rc = key2protkey(NULL, 0, tmpbuf, sizeof(*t) + keylen, kcp.protkey.protkey, - &kcp.protkey.len, &kcp.protkey.type); + &kcp.protkey.len, &kcp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -228,12 +230,12 @@ static int pkey_ioctl_findcard(struct pkey_findcard __user *ufc) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_CUR_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); if (rc == -ENODEV) rc = pkey_handler_apqns_for_key(kfc.seckey.seckey, sizeof(kfc.seckey.seckey), PKEY_FLAGS_MATCH_ALT_MKVP, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); if (rc) { kfree(apqns); @@ -262,7 +264,7 @@ static int pkey_ioctl_skey2pkey(struct pkey_skey2pkey __user *usp) sizeof(ksp.seckey.seckey), ksp.protkey.protkey, &ksp.protkey.len, - &ksp.protkey.type); + &ksp.protkey.type, 0); pr_debug("key_to_protkey()=%d\n", rc); if (!rc && copy_to_user(usp, &ksp, sizeof(ksp))) rc = -EFAULT; @@ -285,7 +287,7 @@ static int pkey_ioctl_verifykey(struct pkey_verifykey __user *uvk) rc = pkey_handler_verify_key(kvk.seckey.seckey, sizeof(kvk.seckey.seckey), &kvk.cardnr, &kvk.domain, - &keytype, &keybitsize, &flags); + &keytype, &keybitsize, &flags, 0); pr_debug("verify_key()=%d\n", rc); if (!rc && keytype != PKEY_TYPE_CCA_DATA) rc = -EINVAL; @@ -312,7 +314,7 @@ static int pkey_ioctl_genprotk(struct pkey_genprotk __user *ugp) rc = pkey_handler_gen_key(NULL, 0, kgp.keytype, PKEY_TYPE_PROTKEY, 0, 0, kgp.protkey.protkey, &kgp.protkey.len, - &kgp.protkey.type); + &kgp.protkey.type, 0); pr_debug("gen_key()=%d\n", rc); if (!rc && copy_to_user(ugp, &kgp, sizeof(kgp))) rc = -EFAULT; @@ -354,7 +356,7 @@ static int pkey_ioctl_verifyprotk(struct pkey_verifyprotk __user *uvp) memcpy(t->protkey, kvp.protkey.protkey, kvp.protkey.len); rc = pkey_handler_verify_key(tmpbuf, sizeof(*t), - NULL, NULL, NULL, NULL, NULL); + NULL, NULL, NULL, NULL, NULL, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(tmpbuf); @@ -377,7 +379,7 @@ static int pkey_ioctl_kblob2protk(struct pkey_kblob2pkey __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(NULL, 0, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree_sensitive(kkey); if (!rc && copy_to_user(utp, &ktp, sizeof(ktp))) @@ -414,7 +416,7 @@ static int pkey_ioctl_genseck2(struct pkey_genseck2 __user *ugs) } rc = pkey_handler_gen_key(apqns, kgs.apqn_entries, u, kgs.type, kgs.size, kgs.keygenflags, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("gen_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -471,7 +473,7 @@ static int pkey_ioctl_clr2seck2(struct pkey_clr2seck2 __user *ucs) rc = pkey_handler_clr_to_key(apqns, kcs.apqn_entries, u, kcs.type, kcs.size, kcs.keygenflags, kcs.clrkey.clrkey, kcs.size / 8, - kkey, &klen, NULL); + kkey, &klen, NULL, 0); pr_debug("clr_to_key()=%d\n", rc); kfree(apqns); if (rc) { @@ -514,7 +516,7 @@ static int pkey_ioctl_verifykey2(struct pkey_verifykey2 __user *uvk) rc = pkey_handler_verify_key(kkey, kvk.keylen, &kvk.cardnr, &kvk.domain, - &kvk.type, &kvk.size, &kvk.flags); + &kvk.type, &kvk.size, &kvk.flags, 0); pr_debug("verify_key()=%d\n", rc); kfree_sensitive(kkey); @@ -544,7 +546,7 @@ static int pkey_ioctl_kblob2protk2(struct pkey_kblob2pkey2 __user *utp) ktp.protkey.len = sizeof(ktp.protkey.protkey); rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, ktp.protkey.protkey, &ktp.protkey.len, - &ktp.protkey.type); + &ktp.protkey.type, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); @@ -579,7 +581,7 @@ static int pkey_ioctl_apqns4k(struct pkey_apqns4key __user *uak) return PTR_ERR(kkey); } rc = pkey_handler_apqns_for_key(kkey, kak.keylen, kak.flags, - apqns, &nr_apqns); + apqns, &nr_apqns, 0); pr_debug("apqns_for_key()=%d\n", rc); kfree_sensitive(kkey); if (rc && rc != -ENOSPC) { @@ -626,7 +628,7 @@ static int pkey_ioctl_apqns4kt(struct pkey_apqns4keytype __user *uat) } rc = pkey_handler_apqns_for_keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp, - kat.flags, apqns, &nr_apqns); + kat.flags, apqns, &nr_apqns, 0); pr_debug("apqns_for_keytype()=%d\n", rc); if (rc && rc != -ENOSPC) { kfree(apqns); @@ -678,7 +680,7 @@ static int pkey_ioctl_kblob2protk3(struct pkey_kblob2pkey3 __user *utp) return -ENOMEM; } rc = key2protkey(apqns, ktp.apqn_entries, kkey, ktp.keylen, - protkey, &protkeylen, &ktp.pkeytype); + protkey, &protkeylen, &ktp.pkeytype, 0); pr_debug("key2protkey()=%d\n", rc); kfree(apqns); kfree_sensitive(kkey); diff --git a/drivers/s390/crypto/pkey_base.c b/drivers/s390/crypto/pkey_base.c index 64a376501d2659..9e6f319acc63c4 100644 --- a/drivers/s390/crypto/pkey_base.c +++ b/drivers/s390/crypto/pkey_base.c @@ -150,7 +150,8 @@ EXPORT_SYMBOL(pkey_handler_put); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -159,7 +160,7 @@ int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->key_to_protkey) { rc = h->key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); } pkey_handler_put(h); @@ -177,7 +178,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { const struct pkey_handler *h, *htmp[10]; int i, n = 0, rc = -ENODEV; @@ -199,7 +200,7 @@ int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, rc = h->slowpath_key_to_protkey(apqns, nr_apqns, key, keylen, protkey, protkeylen, - protkeytype); + protkeytype, xflags); module_put(h->module); } @@ -210,7 +211,7 @@ EXPORT_SYMBOL(pkey_handler_slowpath_key_to_protkey); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -219,7 +220,7 @@ int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->gen_key) { rc = h->gen_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -231,7 +232,8 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -240,7 +242,7 @@ int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (h && h->clr_to_key) { rc = h->clr_to_key(apqns, nr_apqns, keytype, keysubtype, keybitsize, flags, clrkey, clrkeylen, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, xflags); } pkey_handler_put(h); @@ -250,7 +252,8 @@ EXPORT_SYMBOL(pkey_handler_clr_to_key); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -258,7 +261,7 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, h = pkey_handler_get_keybased(key, keylen); if (h && h->verify_key) { rc = h->verify_key(key, keylen, card, dom, - keytype, keybitsize, flags); + keytype, keybitsize, flags, xflags); } pkey_handler_put(h); @@ -267,14 +270,16 @@ int pkey_handler_verify_key(const u8 *key, u32 keylen, EXPORT_SYMBOL(pkey_handler_verify_key); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; h = pkey_handler_get_keybased(key, keylen); if (h && h->apqns_for_key) - rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns); + rc = h->apqns_for_key(key, keylen, flags, apqns, nr_apqns, + xflags); pkey_handler_put(h); return rc; @@ -283,7 +288,8 @@ EXPORT_SYMBOL(pkey_handler_apqns_for_key); int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags) { const struct pkey_handler *h; int rc = -ENODEV; @@ -292,7 +298,7 @@ int pkey_handler_apqns_for_keytype(enum pkey_key_type keysubtype, if (h && h->apqns_for_keytype) { rc = h->apqns_for_keytype(keysubtype, cur_mkvp, alt_mkvp, flags, - apqns, nr_apqns); + apqns, nr_apqns, xflags); } pkey_handler_put(h); diff --git a/drivers/s390/crypto/pkey_base.h b/drivers/s390/crypto/pkey_base.h index 7347647dfaa768..9cdb3e74477f6b 100644 --- a/drivers/s390/crypto/pkey_base.h +++ b/drivers/s390/crypto/pkey_base.h @@ -159,29 +159,33 @@ struct pkey_handler { bool (*is_supported_keytype)(enum pkey_key_type); int (*key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int (*slowpath_key_to_protkey)(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int (*gen_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*clr_to_key)(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int (*verify_key)(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int (*apqns_for_key)(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int (*apqns_for_keytype)(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* used internal by pkey base */ struct list_head list; }; @@ -199,29 +203,34 @@ void pkey_handler_put(const struct pkey_handler *handler); int pkey_handler_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); int pkey_handler_slowpath_key_to_protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); int pkey_handler_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, u32 xflags); int pkey_handler_clr_to_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 keysubtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo); + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 xflags); int pkey_handler_verify_key(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags); + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags); int pkey_handler_apqns_for_key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); int pkey_handler_apqns_for_keytype(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns); + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 xflags); /* * Unconditional try to load all handler modules diff --git a/drivers/s390/crypto/pkey_cca.c b/drivers/s390/crypto/pkey_cca.c index cda22db31f6c11..6c7897a93f273b 100644 --- a/drivers/s390/crypto/pkey_cca.c +++ b/drivers/s390/crypto/pkey_cca.c @@ -70,12 +70,15 @@ static bool is_cca_keytype(enum pkey_key_type key_type) } static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP | PKEY_FLAGS_MATCH_ALT_MKVP; @@ -107,9 +110,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -126,9 +129,9 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, /* unknown CCA internal 2 token type */ return -EINVAL; } - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -147,18 +150,21 @@ static int cca_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, + u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { @@ -171,9 +177,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, old_mkvp = *((u64 *)alt_mkvp); if (ktype == PKEY_TYPE_CCA_CIPHER) minhwtype = ZCRYPT_CEX6; - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, minhwtype, AES_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -184,9 +190,9 @@ static int cca_apqns4type(enum pkey_key_type ktype, cur_mkvp = *((u64 *)cur_mkvp); if (flags & PKEY_FLAGS_MATCH_ALT_MKVP) old_mkvp = *((u64 *)alt_mkvp); - rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + rc = cca_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, ZCRYPT_CEX7, APKA_MK_SET, - cur_mkvp, old_mkvp, 1); + cur_mkvp, old_mkvp, xflags); if (rc) goto out; @@ -205,19 +211,22 @@ static int cca_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -253,14 +262,10 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = cca_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = cca_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -268,16 +273,16 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, hdr->version == TOKVER_CCA_AES) { rc = cca_sec2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL && hdr->version == TOKVER_CCA_VLSC) { rc = cca_cipher2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) { rc = cca_ecc2protkey(apqns[i].card, apqns[i].domain, key, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -285,7 +290,6 @@ static int cca_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -302,10 +306,13 @@ out: static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -340,32 +347,27 @@ static int cca_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_gencipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_genseckey(apqns[i].card, apqns[i].domain, - keybitsize, keybuf); + keybitsize, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -383,10 +385,13 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -426,44 +431,42 @@ static int cca_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = cca_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { if (subtype == PKEY_TYPE_CCA_CIPHER) { rc = cca_clr2cipherkey(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen); + keybuf, keybuflen, xflags); } else { /* PKEY_TYPE_CCA_DATA */ rc = cca_clr2seckey(apqns[i].card, apqns[i].domain, - keybitsize, clrkey, keybuf); + keybitsize, clrkey, keybuf, xflags); *keybuflen = (rc ? 0 : SECKEYBLOBSIZE); } } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int cca_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -478,15 +481,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, goto out; *keytype = PKEY_TYPE_CCA_DATA; *keybitsize = t->bitsize; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - t->mkvp, 0, 1); + t->mkvp, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX3C, AES_MK_SET, - 0, t->mkvp, 1); + 0, t->mkvp, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -511,15 +515,16 @@ static int cca_verifykey(const u8 *key, u32 keylen, *keybitsize = PKEY_SIZE_AES_192; else if (!t->plfver && t->wpllen == 640) *keybitsize = PKEY_SIZE_AES_256; - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - t->mkvp0, 0, 1); + t->mkvp0, 0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_CUR_MKVP; if (rc == -ENODEV) { - rc = cca_findcard2(&apqns, &nr_apqns, *card, *dom, + nr_apqns = ARRAY_SIZE(apqns); + rc = cca_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX6, AES_MK_SET, - 0, t->mkvp0, 1); + 0, t->mkvp0, xflags); if (!rc) *flags = PKEY_FLAGS_MATCH_ALT_MKVP; } @@ -535,7 +540,6 @@ static int cca_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -551,12 +555,12 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[SECKEYBLOBSIZE]; /* 64 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -568,26 +572,20 @@ static int cca_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(SECKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = SECKEYBLOBSIZE; rc = cca_clr2key(NULL, 0, t->keytype, PKEY_TYPE_CCA_DATA, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("cca_clr2key()=%d\n", rc); if (rc) continue; rc = cca_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("cca_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_ep11.c b/drivers/s390/crypto/pkey_ep11.c index 5b033ca3e8285a..6b23adc560c8a8 100644 --- a/drivers/s390/crypto/pkey_ep11.c +++ b/drivers/s390/crypto/pkey_ep11.c @@ -70,12 +70,15 @@ static bool is_ep11_keytype(enum pkey_key_type key_type) } static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (!flags) flags = PKEY_FLAGS_MATCH_CUR_MKVP; @@ -98,8 +101,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -115,8 +118,8 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, minhwtype = ZCRYPT_CEX7; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; } - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - minhwtype, api, kb->wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + minhwtype, api, kb->wkvp, xflags); if (rc) goto out; @@ -135,18 +138,20 @@ static int ep11_apqns4key(const u8 *key, u32 keylen, u32 flags, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_apqns4type(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, - struct pkey_apqn *apqns, size_t *nr_apqns) + struct pkey_apqn *apqns, size_t *nr_apqns, u32 pflags) { - u32 _nr_apqns, *_apqns = NULL; + u32 _apqns[MAXAPQNSINLIST], _nr_apqns = ARRAY_SIZE(_apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + zcrypt_wait_api_operational(); if (ktype == PKEY_TYPE_EP11 || @@ -158,8 +163,8 @@ static int ep11_apqns4type(enum pkey_key_type ktype, if (flags & PKEY_FLAGS_MATCH_CUR_MKVP) wkvp = cur_mkvp; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, - ZCRYPT_CEX7, api, wkvp); + rc = ep11_findcard2(_apqns, &_nr_apqns, 0xFFFF, 0xFFFF, + ZCRYPT_CEX7, api, wkvp, xflags); if (rc) goto out; @@ -178,19 +183,22 @@ static int ep11_apqns4type(enum pkey_key_type ktype, *nr_apqns = _nr_apqns; out: - kfree(_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; + u32 xflags; int i, rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -225,14 +233,10 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; - rc = ep11_apqns4key(key, keylen, 0, local_apqns, &nr_apqns); + rc = ep11_apqns4key(key, keylen, 0, _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { @@ -241,19 +245,19 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_ECC_WITH_HEADER && is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else if (hdr->type == TOKTYPE_NON_CCA && hdr->version == TOKVER_EP11_AES && is_ep11_keyblob(key)) { rc = ep11_kblob2protkey(apqns[i].card, apqns[i].domain, key, hdr->len, protkey, - protkeylen, protkeytype); + protkeylen, protkeytype, xflags); } else { rc = -EINVAL; break; @@ -261,7 +265,6 @@ static int ep11_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -278,10 +281,13 @@ out: static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, keybitsize */ switch (keytype) { @@ -316,25 +322,20 @@ static int ep11_gen_key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_genaeskey(apqns[i].card, apqns[i].domain, keybitsize, flags, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -352,10 +353,13 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, u32 keytype, u32 subtype, u32 keybitsize, u32 flags, const u8 *clrkey, u32 clrkeylen, - u8 *keybuf, u32 *keybuflen, u32 *_keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *_keyinfo, u32 pflags) { - struct pkey_apqn *local_apqns = NULL; + struct pkey_apqn _apqns[MAXAPQNSINLIST]; int i, len, rc; + u32 xflags; + + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; /* check keytype, subtype, clrkeylen, keybitsize */ switch (keytype) { @@ -395,37 +399,35 @@ static int ep11_clr2key(const struct pkey_apqn *apqns, size_t nr_apqns, if (!apqns || (nr_apqns == 1 && apqns[0].card == 0xFFFF && apqns[0].domain == 0xFFFF)) { nr_apqns = MAXAPQNSINLIST; - local_apqns = kmalloc_array(nr_apqns, sizeof(struct pkey_apqn), - GFP_KERNEL); - if (!local_apqns) - return -ENOMEM; rc = ep11_apqns4type(subtype, NULL, NULL, 0, - local_apqns, &nr_apqns); + _apqns, &nr_apqns, pflags); if (rc) goto out; - apqns = local_apqns; + apqns = _apqns; } for (rc = -ENODEV, i = 0; rc && i < nr_apqns; i++) { rc = ep11_clr2keyblob(apqns[i].card, apqns[i].domain, keybitsize, flags, clrkey, - keybuf, keybuflen, subtype); + keybuf, keybuflen, subtype, xflags); } out: - kfree(local_apqns); pr_debug("rc=%d\n", rc); return rc; } static int ep11_verifykey(const u8 *key, u32 keylen, u16 *card, u16 *dom, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, u32 pflags) { struct keytoken_header *hdr = (struct keytoken_header *)key; - u32 nr_apqns, *apqns = NULL; + u32 apqns[MAXAPQNSINLIST], nr_apqns = ARRAY_SIZE(apqns); + u32 xflags; int rc; + xflags = pflags & PKEY_XFLAG_NOMEMALLOC ? ZCRYPT_XFLAG_NOMEMALLOC : 0; + if (keylen < sizeof(*hdr)) return -EINVAL; @@ -443,9 +445,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kb->head.bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -467,9 +469,9 @@ static int ep11_verifykey(const u8 *key, u32 keylen, *keybitsize = kh->bitlen; api = ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4; - rc = ep11_findcard2(&apqns, &nr_apqns, *card, *dom, + rc = ep11_findcard2(apqns, &nr_apqns, *card, *dom, ZCRYPT_CEX7, api, - ep11_kb_wkvp(key, keylen)); + ep11_kb_wkvp(key, keylen), xflags); if (rc) goto out; @@ -484,7 +486,6 @@ static int ep11_verifykey(const u8 *key, u32 keylen, } out: - kfree(apqns); pr_debug("rc=%d\n", rc); return rc; } @@ -500,12 +501,12 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, size_t nr_apqns, const u8 *key, u32 keylen, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 pflags) { const struct keytoken_header *hdr = (const struct keytoken_header *)key; const struct clearkeytoken *t = (const struct clearkeytoken *)key; + u8 tmpbuf[MAXEP11AESKEYBLOBSIZE]; /* 336 bytes */ u32 tmplen, keysize = 0; - u8 *tmpbuf; int i, rc; if (keylen < sizeof(*hdr)) @@ -517,26 +518,20 @@ static int ep11_slowpath_key2protkey(const struct pkey_apqn *apqns, if (!keysize || t->len != keysize) return -EINVAL; - /* alloc tmp key buffer */ - tmpbuf = kmalloc(MAXEP11AESKEYBLOBSIZE, GFP_ATOMIC); - if (!tmpbuf) - return -ENOMEM; - /* try two times in case of failure */ for (i = 0, rc = -ENODEV; i < 2 && rc; i++) { tmplen = MAXEP11AESKEYBLOBSIZE; rc = ep11_clr2key(NULL, 0, t->keytype, PKEY_TYPE_EP11, 8 * keysize, 0, t->clearkey, t->len, - tmpbuf, &tmplen, NULL); + tmpbuf, &tmplen, NULL, pflags); pr_debug("ep11_clr2key()=%d\n", rc); if (rc) continue; rc = ep11_key2protkey(NULL, 0, tmpbuf, tmplen, - protkey, protkeylen, protkeytype); + protkey, protkeylen, protkeytype, pflags); pr_debug("ep11_key2protkey()=%d\n", rc); } - kfree(tmpbuf); pr_debug("rc=%d\n", rc); return rc; } diff --git a/drivers/s390/crypto/pkey_pckmo.c b/drivers/s390/crypto/pkey_pckmo.c index 835d59f4fbc563..7eca9f1340bdd3 100644 --- a/drivers/s390/crypto/pkey_pckmo.c +++ b/drivers/s390/crypto/pkey_pckmo.c @@ -406,7 +406,8 @@ out: static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, size_t _nr_apqns, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_key2protkey(key, keylen, protkey, protkeylen, keyinfo); @@ -415,7 +416,8 @@ static int pkey_pckmo_key2protkey(const struct pkey_apqn *_apqns, static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, u32 keytype, u32 keysubtype, u32 _keybitsize, u32 _flags, - u8 *keybuf, u32 *keybuflen, u32 *keyinfo) + u8 *keybuf, u32 *keybuflen, u32 *keyinfo, + u32 _xflags __always_unused) { return pckmo_gen_protkey(keytype, keysubtype, keybuf, keybuflen, keyinfo); @@ -423,7 +425,8 @@ static int pkey_pckmo_gen_key(const struct pkey_apqn *_apqns, size_t _nr_apqns, static int pkey_pckmo_verifykey(const u8 *key, u32 keylen, u16 *_card, u16 *_dom, - u32 *_keytype, u32 *_keybitsize, u32 *_flags) + u32 *_keytype, u32 *_keybitsize, + u32 *_flags, u32 _xflags __always_unused) { return pckmo_verify_key(key, keylen); } diff --git a/drivers/s390/crypto/pkey_sysfs.c b/drivers/s390/crypto/pkey_sysfs.c index 57edc97bafd294..cea77297364911 100644 --- a/drivers/s390/crypto/pkey_sysfs.c +++ b/drivers/s390/crypto/pkey_sysfs.c @@ -29,13 +29,13 @@ static int sys_pkey_handler_gen_key(u32 keytype, u32 keysubtype, rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); if (rc == -ENODEV) { pkey_handler_request_modules(); rc = pkey_handler_gen_key(NULL, 0, keytype, keysubtype, keybitsize, flags, - keybuf, keybuflen, keyinfo); + keybuf, keybuflen, keyinfo, 0); } return rc; diff --git a/drivers/s390/crypto/pkey_uv.c b/drivers/s390/crypto/pkey_uv.c index 805817b1435401..e5c6e01acaf393 100644 --- a/drivers/s390/crypto/pkey_uv.c +++ b/drivers/s390/crypto/pkey_uv.c @@ -21,6 +21,12 @@ MODULE_AUTHOR("IBM Corporation"); MODULE_DESCRIPTION("s390 protected key UV handler"); /* + * One pre-allocated uv_secret_list for use with uv_find_secret() + */ +static struct uv_secret_list *uv_list; +static DEFINE_MUTEX(uv_list_mutex); + +/* * UV secret token struct and defines. */ @@ -85,13 +91,26 @@ static bool is_uv_keytype(enum pkey_key_type keytype) } } +static int get_secret_metadata(const u8 secret_id[UV_SECRET_ID_LEN], + struct uv_secret_list_item_hdr *secret) +{ + int rc; + + mutex_lock(&uv_list_mutex); + memset(uv_list, 0, sizeof(*uv_list)); + rc = uv_find_secret(secret_id, uv_list, secret); + mutex_unlock(&uv_list_mutex); + + return rc; +} + static int retrieve_secret(const u8 secret_id[UV_SECRET_ID_LEN], u16 *secret_type, u8 *buf, u32 *buflen) { struct uv_secret_list_item_hdr secret_meta_data; int rc; - rc = uv_get_secret_metadata(secret_id, &secret_meta_data); + rc = get_secret_metadata(secret_id, &secret_meta_data); if (rc) return rc; @@ -172,7 +191,8 @@ static int uv_get_size_and_type(u16 secret_type, u32 *pkeysize, u32 *pkeytype) static int uv_key2protkey(const struct pkey_apqn *_apqns __always_unused, size_t _nr_apqns __always_unused, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *keyinfo) + u8 *protkey, u32 *protkeylen, u32 *keyinfo, + u32 _xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; u32 pkeysize, pkeytype; @@ -214,7 +234,8 @@ out: static int uv_verifykey(const u8 *key, u32 keylen, u16 *_card __always_unused, u16 *_dom __always_unused, - u32 *keytype, u32 *keybitsize, u32 *flags) + u32 *keytype, u32 *keybitsize, u32 *flags, + u32 xflags __always_unused) { struct uvsecrettoken *t = (struct uvsecrettoken *)key; struct uv_secret_list_item_hdr secret_meta_data; @@ -225,7 +246,7 @@ static int uv_verifykey(const u8 *key, u32 keylen, if (rc) goto out; - rc = uv_get_secret_metadata(t->secret_id, &secret_meta_data); + rc = get_secret_metadata(t->secret_id, &secret_meta_data); if (rc) goto out; @@ -263,13 +284,23 @@ static struct pkey_handler uv_handler = { */ static int __init pkey_uv_init(void) { + int rc; + if (!is_prot_virt_guest()) return -ENODEV; if (!test_bit_inv(BIT_UVC_CMD_RETR_SECRET, uv_info.inst_calls_list)) return -ENODEV; - return pkey_handler_register(&uv_handler); + uv_list = kmalloc(sizeof(*uv_list), GFP_KERNEL); + if (!uv_list) + return -ENOMEM; + + rc = pkey_handler_register(&uv_handler); + if (rc) + kfree(uv_list); + + return rc; } /* @@ -278,6 +309,9 @@ static int __init pkey_uv_init(void) static void __exit pkey_uv_exit(void) { pkey_handler_unregister(&uv_handler); + mutex_lock(&uv_list_mutex); + kvfree(uv_list); + mutex_unlock(&uv_list_mutex); } module_cpu_feature_match(S390_CPU_FEATURE_UV, pkey_uv_init); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 5020696f13797d..89baa87a13fcc2 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -50,6 +50,10 @@ MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ "Copyright IBM Corp. 2001, 2012"); MODULE_LICENSE("GPL"); +unsigned int zcrypt_mempool_threshold = 5; +module_param_named(mempool_threshold, zcrypt_mempool_threshold, uint, 0440); +MODULE_PARM_DESC(mempool_threshold, "CCA and EP11 request/reply mempool minimal items (min: 1)"); + /* * zcrypt tracepoint functions */ @@ -642,16 +646,17 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (mex->outputdatalength < mex->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -728,7 +733,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -746,16 +751,17 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; - int cpen, qpen, qid = 0, rc = -ENODEV; + unsigned int func_code = 0; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(crt, TP_ICARSACRT); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; if (crt->outputdatalength < crt->inputdatalength) { - func_code = 0; rc = -EINVAL; goto out; } @@ -832,7 +838,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -842,23 +848,28 @@ out: return rc; } -static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ica_xcRB *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; struct ap_message ap_msg; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; unsigned short *domain, tdom; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB); xcrb->status = 0; - ap_init_message(&ap_msg); + + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) @@ -962,7 +973,7 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -972,7 +983,7 @@ out: return rc; } -long zcrypt_send_cprb(struct ica_xcRB *xcrb) +long zcrypt_send_cprb(struct ica_xcRB *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -980,13 +991,13 @@ long zcrypt_send_cprb(struct ica_xcRB *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1024,50 +1035,50 @@ static bool is_desired_ep11_queue(unsigned int dev_qid, return false; } -static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, +static long _zcrypt_send_ep11_cprb(u32 xflags, struct ap_perms *perms, struct zcrypt_track *tr, struct ep11_urb *xcrb) { + bool userspace = xflags & ZCRYPT_XFLAG_USERSPACE; struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; - struct ep11_target_dev *targets; + struct ep11_target_dev *targets = NULL; unsigned short target_num; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code, domain; + unsigned int func_code = 0, domain; struct ap_message ap_msg; - int cpen, qpen, qid = 0, rc = -ENODEV; + int cpen, qpen, qid = 0, rc; struct module *mod; trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, xflags & ZCRYPT_XFLAG_NOMEMALLOC ? + AP_MSG_FLAG_MEMPOOL : 0); + if (rc) + goto out; target_num = (unsigned short)xcrb->targets_num; /* empty list indicates autoselect (all available targets) */ - targets = NULL; + rc = -ENOMEM; if (target_num != 0) { - struct ep11_target_dev __user *uptr; - - targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); - if (!targets) { - func_code = 0; - rc = -ENOMEM; - goto out; - } - - uptr = (struct ep11_target_dev __force __user *)xcrb->targets; - if (z_copy_from_user(userspace, targets, uptr, - target_num * sizeof(*targets))) { - func_code = 0; - rc = -EFAULT; - goto out_free; + if (userspace) { + targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); + if (!targets) + goto out; + if (copy_from_user(targets, xcrb->targets, + target_num * sizeof(*targets))) { + rc = -EFAULT; + goto out; + } + } else { + targets = (struct ep11_target_dev __force __kernel *)xcrb->targets; } } rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain); if (rc) - goto out_free; + goto out; print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1, ap_msg.msg, ap_msg.len, false); @@ -1075,11 +1086,11 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, if (ap_msg.flags & AP_MSG_FLAG_ADMIN) { if (!test_bit_inv(domain, perms->adm)) { rc = -ENODEV; - goto out_free; + goto out; } } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) { rc = -EOPNOTSUPP; - goto out_free; + goto out; } } @@ -1147,7 +1158,7 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, pr_debug("no match for address ff.ffff => ENODEV\n"); } rc = -ENODEV; - goto out_free; + goto out; } qid = pref_zq->queue->qid; @@ -1161,10 +1172,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); spin_unlock(&zcrypt_list_lock); -out_free: - kfree(targets); out: - ap_release_message(&ap_msg); + if (userspace) + kfree(targets); + ap_release_apmsg(&ap_msg); if (tr) { tr->last_rc = rc; tr->last_qid = qid; @@ -1174,7 +1185,7 @@ out: return rc; } -long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) +long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb, u32 xflags) { struct zcrypt_track tr; int rc; @@ -1182,13 +1193,13 @@ long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) memset(&tr, 0, sizeof(tr)); do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, &ap_perms, &tr, xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1204,7 +1215,7 @@ static long zcrypt_rng(char *buffer) struct zcrypt_card *zc, *pref_zc; struct zcrypt_queue *zq, *pref_zq; unsigned int wgt = 0, pref_wgt = 0; - unsigned int func_code; + unsigned int func_code = 0; struct ap_message ap_msg; unsigned int domain; int qid = 0, rc = -ENODEV; @@ -1212,7 +1223,9 @@ static long zcrypt_rng(char *buffer) trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); - ap_init_message(&ap_msg); + rc = ap_init_apmsg(&ap_msg, 0); + if (rc) + goto out; rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain); if (rc) goto out; @@ -1258,7 +1271,7 @@ static long zcrypt_rng(char *buffer) spin_unlock(&zcrypt_list_lock); out: - ap_release_message(&ap_msg); + ap_release_apmsg(&ap_msg); trace_s390_zcrypt_rep(buffer, func_code, rc, AP_QID_CARD(qid), AP_QID_QUEUE(qid)); return rc; @@ -1291,19 +1304,25 @@ static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) spin_unlock(&zcrypt_list_lock); } -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue) { struct zcrypt_card *zc; struct zcrypt_queue *zq; struct zcrypt_device_status_ext *stat; int card, queue; + maxcard = min_t(int, maxcard, MAX_ZDEV_CARDIDS_EXT); + maxqueue = min_t(int, maxqueue, MAX_ZDEV_DOMAINS_EXT); + spin_lock(&zcrypt_list_lock); for_each_zcrypt_card(zc) { for_each_zcrypt_queue(zq, zc) { card = AP_QID_CARD(zq->queue->qid); queue = AP_QID_QUEUE(zq->queue->qid); - stat = &devstatus[card * AP_DOMAINS + queue]; + if (card >= maxcard || queue >= maxqueue) + continue; + stat = &devstatus[card * maxqueue + queue]; stat->hwtype = zc->card->ap_dev.device_type; stat->functions = zc->card->hwinfo.fac >> 26; stat->qid = zq->queue->qid; @@ -1523,6 +1542,7 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ica_xcRB xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ica_xcRB __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1530,13 +1550,13 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1553,6 +1573,7 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) int rc; struct ep11_urb xcrb; struct zcrypt_track tr; + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct ep11_urb __user *uxcrb = (void __user *)arg; memset(&tr, 0, sizeof(tr)); @@ -1560,13 +1581,13 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) return -EFAULT; do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); + rc = _zcrypt_send_ep11_cprb(xflags, perms, &tr, &xcrb); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -1607,7 +1628,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, GFP_KERNEL); if (!device_status) return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + zcrypt_device_status_mask_ext(device_status, + MAX_ZDEV_CARDIDS_EXT, + MAX_ZDEV_DOMAINS_EXT); if (copy_to_user((char __user *)arg, device_status, total_size)) rc = -EFAULT; @@ -1827,6 +1850,7 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, unsigned int cmd, unsigned long arg) { struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg); + u32 xflags = ZCRYPT_XFLAG_USERSPACE; struct compat_ica_xcrb xcrb32; struct zcrypt_track tr; struct ica_xcRB xcrb64; @@ -1856,13 +1880,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp, xcrb64.priority_window = xcrb32.priority_window; xcrb64.status = xcrb32.status; do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); /* on ENODEV failure: retry once again after a requested rescan */ if (rc == -ENODEV && zcrypt_process_rescan()) do { - rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64); + rc = _zcrypt_send_cprb(xflags, perms, &tr, &xcrb64); } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX); if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) rc = -EIO; @@ -2132,13 +2156,27 @@ int __init zcrypt_api_init(void) { int rc; + /* make sure the mempool threshold is >= 1 */ + if (zcrypt_mempool_threshold < 1) { + rc = -EINVAL; + goto out; + } + rc = zcrypt_debug_init(); if (rc) goto out; rc = zcdn_init(); if (rc) - goto out; + goto out_zcdn_init_failed; + + rc = zcrypt_ccamisc_init(); + if (rc) + goto out_ccamisc_init_failed; + + rc = zcrypt_ep11misc_init(); + if (rc) + goto out_ep11misc_init_failed; /* Register the request sprayer. */ rc = misc_register(&zcrypt_misc_device); @@ -2151,7 +2189,12 @@ int __init zcrypt_api_init(void) return 0; out_misc_register_failed: + zcrypt_ep11misc_exit(); +out_ep11misc_init_failed: + zcrypt_ccamisc_exit(); +out_ccamisc_init_failed: zcdn_exit(); +out_zcdn_init_failed: zcrypt_debug_exit(); out: return rc; diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 4ed481df57ca4a..6ef8850a42df1f 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -76,6 +76,13 @@ struct zcrypt_track { #define TRACK_AGAIN_CARD_WEIGHT_PENALTY 1000 #define TRACK_AGAIN_QUEUE_WEIGHT_PENALTY 10000 +/* + * xflags - to be used with zcrypt_send_cprb() and + * zcrypt_send_ep11_cprb() for the xflags parameter. + */ +#define ZCRYPT_XFLAG_USERSPACE 0x0001 /* data ptrs address userspace */ +#define ZCRYPT_XFLAG_NOMEMALLOC 0x0002 /* do not allocate memory via kmalloc */ + struct zcrypt_ops { long (*rsa_modexpo)(struct zcrypt_queue *, struct ica_rsa_modexpo *, struct ap_message *); @@ -132,6 +139,8 @@ extern atomic_t zcrypt_rescan_req; extern spinlock_t zcrypt_list_lock; extern struct list_head zcrypt_card_list; +extern unsigned int zcrypt_mempool_threshold; + #define for_each_zcrypt_card(_zc) \ list_for_each_entry(_zc, &zcrypt_card_list, list) @@ -161,9 +170,10 @@ void zcrypt_msgtype_unregister(struct zcrypt_ops *); struct zcrypt_ops *zcrypt_msgtype(unsigned char *, int); int zcrypt_api_init(void); void zcrypt_api_exit(void); -long zcrypt_send_cprb(struct ica_xcRB *xcRB); -long zcrypt_send_ep11_cprb(struct ep11_urb *urb); -void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus); +long zcrypt_send_cprb(struct ica_xcRB *xcRB, u32 xflags); +long zcrypt_send_ep11_cprb(struct ep11_urb *urb, u32 xflags); +void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus, + int maxcard, int maxqueue); int zcrypt_device_status_ext(int card, int queue, struct zcrypt_device_status_ext *devstatus); diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c index 43a27cb3db847e..b975a3728c2368 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.c +++ b/drivers/s390/crypto/zcrypt_ccamisc.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/random.h> @@ -29,16 +30,31 @@ /* Size of vardata block used for some of the cca requests/replies */ #define VARDATASIZE 4096 -struct cca_info_list_entry { - struct list_head list; - u16 cardnr; - u16 domain; - struct cca_info info; -}; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used + * when alloc_and_prep_cprbmem() is called with the xflag + * ZCRYPT_XFLAG_NOMEMALLOC. The cprb memory needs to hold + * space for request AND reply! + */ +#define CPRB_MEMPOOL_ITEM_SIZE (16 * 1024) +static mempool_t *cprb_mempool; -/* a list with cca_info_list_entry entries */ -static LIST_HEAD(cca_info_list); -static DEFINE_SPINLOCK(cca_info_list_lock); +/* + * This is a pre-allocated memory for the device status array + * used within the findcard() functions. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); /* * Simple check if the token is a valid CCA secure AES data key @@ -219,19 +235,27 @@ EXPORT_SYMBOL(cca_check_sececckeytoken); static int alloc_and_prep_cprbmem(size_t paramblen, u8 **p_cprb_mem, struct CPRBX **p_req_cprb, - struct CPRBX **p_rep_cprb) + struct CPRBX **p_rep_cprb, + u32 xflags) { - u8 *cprbmem; + u8 *cprbmem = NULL; size_t cprbplusparamblen = sizeof(struct CPRBX) + paramblen; + size_t len = 2 * cprbplusparamblen; struct CPRBX *preqcblk, *prepcblk; /* * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kcalloc(2, cprbplusparamblen, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprbmem = mempool_alloc_preallocated(cprb_mempool); + } else { + cprbmem = kmalloc(len, GFP_KERNEL); + } if (!cprbmem) return -ENOMEM; + memset(cprbmem, 0, len); preqcblk = (struct CPRBX *)cprbmem; prepcblk = (struct CPRBX *)(cprbmem + cprbplusparamblen); @@ -261,11 +285,15 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * with zeros before freeing (useful if there was some * clear key material in there). */ -static void free_cprbmem(void *mem, size_t paramblen, int scrub) +static void free_cprbmem(void *mem, size_t paramblen, bool scrub, u32 xflags) { - if (scrub) + if (mem && scrub) memzero_explicit(mem, 2 * (sizeof(struct CPRBX) + paramblen)); - kfree(mem); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); } /* @@ -290,7 +318,7 @@ static inline void prep_xcrb(struct ica_xcRB *pxcrb, * Generate (random) CCA AES DATA secure key. */ int cca_genseckey(u16 cardnr, u16 domain, - u32 keybitsize, u8 *seckey) + u32 keybitsize, u8 *seckey, u32 xflags) { int i, rc, keysize; int seckeysize; @@ -332,7 +360,8 @@ int cca_genseckey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -379,7 +408,7 @@ int cca_genseckey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n", __func__, (int)cardnr, (int)domain, rc); @@ -424,7 +453,7 @@ int cca_genseckey(u16 cardnr, u16 domain, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_genseckey); @@ -433,7 +462,7 @@ EXPORT_SYMBOL(cca_genseckey); * Generate an CCA AES DATA secure key with given key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey) + const u8 *clrkey, u8 *seckey, u32 xflags) { int rc, keysize, seckeysize; u8 *mem, *ptr; @@ -473,7 +502,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -517,7 +547,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -563,7 +593,7 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE); out: - free_cprbmem(mem, PARMBSIZE, 1); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_clr2seckey); @@ -573,7 +603,7 @@ EXPORT_SYMBOL(cca_clr2seckey); */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype) + u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -619,7 +649,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -644,7 +675,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -712,7 +743,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain, *protkeylen = prepparm->lv3.ckb.len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_sec2protkey); @@ -737,7 +768,7 @@ static const u8 aes_cipher_key_skeleton[] = { * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize) + u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; u8 *mem, *ptr; @@ -813,7 +844,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, struct cipherkeytoken *t; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -872,7 +904,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -923,7 +955,7 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, *keybufsize = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } EXPORT_SYMBOL(cca_gencipherkey); @@ -938,7 +970,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, const u8 *clr_key_value, int clr_key_bit_size, u8 *key_token, - int *key_token_size) + int *key_token_size, + u32 xflags) { int rc, n; u8 *mem, *ptr; @@ -989,7 +1022,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1038,7 +1072,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1077,7 +1111,7 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain, *key_token_size = t->len; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, false, xflags); return rc; } @@ -1085,23 +1119,31 @@ out: * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize) + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, u32 xflags) { int rc; - u8 *token; + void *mem; int tokensize; - u8 exorbuf[32]; + u8 *token, exorbuf[32]; struct cipherkeytoken *t; /* fill exorbuf with random data */ get_random_bytes(exorbuf, sizeof(exorbuf)); - /* allocate space for the key token to build */ - token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL); - if (!token) + /* + * Allocate space for the key token to build. + * Also we only need up to MAXCCAVLSCTOKENSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; /* prepare the token with the key skeleton */ + token = (u8 *)mem; tokensize = SIZEOF_SKELETON; memcpy(token, aes_cipher_key_skeleton, tokensize); @@ -1120,28 +1162,28 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, * 4/4 COMPLETE the secure cipher key import */ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART", - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - clrkey, keybitsize, token, &tokensize); + clrkey, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL, - exorbuf, keybitsize, token, &tokensize); + exorbuf, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); goto out; } rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL, - NULL, keybitsize, token, &tokensize); + NULL, keybitsize, token, &tokensize, xflags); if (rc) { ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n", __func__, rc); @@ -1158,7 +1200,7 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags, *keybufsize = tokensize; out: - kfree(token); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(cca_clr2cipherkey); @@ -1167,7 +1209,8 @@ EXPORT_SYMBOL(cca_clr2cipherkey); * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { int rc; u8 *mem, *ptr; @@ -1219,7 +1262,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, int keytoklen = ((struct cipherkeytoken *)ckey)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1249,7 +1293,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1323,7 +1367,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, *protkeylen = prepparm->vud.ckb.keylen; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_cipher2protkey); @@ -1332,7 +1376,7 @@ EXPORT_SYMBOL(cca_cipher2protkey); * Derive protected key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags) { int rc; u8 *mem, *ptr; @@ -1382,7 +1426,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int keylen = ((struct eccprivkeytoken *)key)->len; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1412,7 +1457,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1470,7 +1515,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, *protkeytype = PKEY_KEYTYPE_ECC; out: - free_cprbmem(mem, PARMBSIZE, 0); + free_cprbmem(mem, PARMBSIZE, true, xflags); return rc; } EXPORT_SYMBOL(cca_ecc2protkey); @@ -1481,7 +1526,8 @@ EXPORT_SYMBOL(cca_ecc2protkey); int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen) + u8 *varray, size_t *varraylen, + u32 xflags) { int rc; u16 len; @@ -1505,7 +1551,8 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } __packed * prepparm; /* get already prepared memory for 2 cprbs with param block each */ - rc = alloc_and_prep_cprbmem(parmbsize, &mem, &preqcblk, &prepcblk); + rc = alloc_and_prep_cprbmem(parmbsize, &mem, + &preqcblk, &prepcblk, xflags); if (rc) return rc; @@ -1526,7 +1573,7 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk); /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */ - rc = zcrypt_send_cprb(&xcrb); + rc = zcrypt_send_cprb(&xcrb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -1573,94 +1620,21 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain, } out: - free_cprbmem(mem, parmbsize, 0); + free_cprbmem(mem, parmbsize, false, xflags); return rc; } EXPORT_SYMBOL(cca_query_crypto_facility); -static int cca_info_cache_fetch(u16 cardnr, u16 domain, struct cca_info *ci) -{ - int rc = -ENOENT; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && ptr->domain == domain) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&cca_info_list_lock); - - return rc; -} - -static void cca_info_cache_update(u16 cardnr, u16 domain, - const struct cca_info *ci) -{ - int found = 0; - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&cca_info_list_lock); - return; - } - ptr->cardnr = cardnr; - ptr->domain = domain; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &cca_info_list); - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void cca_info_cache_scrub(u16 cardnr, u16 domain) -{ - struct cca_info_list_entry *ptr; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry(ptr, &cca_info_list, list) { - if (ptr->cardnr == cardnr && - ptr->domain == domain) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&cca_info_list_lock); -} - -static void __exit mkvp_cache_free(void) -{ - struct cca_info_list_entry *ptr, *pnext; - - spin_lock_bh(&cca_info_list_lock); - list_for_each_entry_safe(ptr, pnext, &cca_info_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&cca_info_list_lock); -} - /* - * Fetch cca_info values via query_crypto_facility from adapter. + * Fetch cca_info values about a CCA queue via + * query_crypto_facility from adapter. */ -static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) +int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags) { + void *mem; int rc, found = 0; size_t rlen, vlen; - u8 *rarray, *varray, *pg; + u8 *rarray, *varray; struct zcrypt_device_status_ext devstat; memset(ci, 0, sizeof(*ci)); @@ -1671,17 +1645,22 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) return rc; ci->hwtype = devstat.hwtype; - /* prep page for rule array and var array use */ - pg = (u8 *)__get_free_page(GFP_KERNEL); - if (!pg) + /* + * Prep memory for rule array and var array use. + * Use the cprb mempool for this. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) return -ENOMEM; - rarray = pg; - varray = pg + PAGE_SIZE / 2; + rarray = (u8 *)mem; + varray = (u8 *)mem + PAGE_SIZE / 2; rlen = vlen = PAGE_SIZE / 2; /* QF for this card/domain */ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 10 * 8 && vlen >= 204) { memcpy(ci->serial, rarray, 8); ci->new_asym_mk_state = (char)rarray[4 * 8]; @@ -1708,7 +1687,7 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) goto out; rlen = vlen = PAGE_SIZE / 2; rc = cca_query_crypto_facility(cardnr, domain, "STATICSB", - rarray, &rlen, varray, &vlen); + rarray, &rlen, varray, &vlen, xflags); if (rc == 0 && rlen >= 13 * 8 && vlen >= 240) { ci->new_apka_mk_state = (char)rarray[10 * 8]; ci->cur_apka_mk_state = (char)rarray[11 * 8]; @@ -1723,177 +1702,32 @@ static int fetch_cca_info(u16 cardnr, u16 domain, struct cca_info *ci) } out: - free_page((unsigned long)pg); + mempool_free(mem, cprb_mempool); return found == 2 ? 0 : -ENOENT; } - -/* - * Fetch cca information about a CCA queue. - */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify) -{ - int rc; - - rc = cca_info_cache_fetch(card, dom, ci); - if (rc || verify) { - rc = fetch_cca_info(card, dom, ci); - if (rc == 0) - cca_info_cache_update(card, dom, ci); - } - - return rc; -} EXPORT_SYMBOL(cca_get_info); -/* - * Search for a matching crypto card based on the - * Master Key Verification Pattern given. - */ -static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain, - int verify, int minhwtype) -{ - struct zcrypt_device_status_ext *device_status; - u16 card, dom; - struct cca_info ci; - int i, rc, oi = -1; - - /* mkvp must not be zero, minhwtype needs to be >= 0 */ - if (mkvp == 0 || minhwtype < 0) - return -EINVAL; - - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); - - /* walk through all crypto cards */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - if (device_status[i].online && - device_status[i].functions & 0x04) { - /* enabled CCA card, check current mkvp from cache */ - if (cca_info_cache_fetch(card, dom, &ci) == 0 && - ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) { - if (!verify) - break; - /* verify: refresh card info */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - } - } - } else { - /* Card is offline and/or not a CCA card. */ - /* del mkvp entry from cache if it exists */ - cca_info_cache_scrub(card, dom); - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT) { - /* nothing found, so this time without cache */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { - if (!(device_status[i].online && - device_status[i].functions & 0x04)) - continue; - card = AP_QID_CARD(device_status[i].qid); - dom = AP_QID_QUEUE(device_status[i].qid); - /* fresh fetch mkvp from adapter */ - if (fetch_cca_info(card, dom, &ci) == 0) { - cca_info_cache_update(card, dom, &ci); - if (ci.hwtype >= minhwtype && - ci.cur_aes_mk_state == '2' && - ci.cur_aes_mkvp == mkvp) - break; - if (ci.hwtype >= minhwtype && - ci.old_aes_mk_state == '2' && - ci.old_aes_mkvp == mkvp && - oi < 0) - oi = i; - } - } - if (i >= MAX_ZDEV_ENTRIES_EXT && oi >= 0) { - /* old mkvp matched, use this card then */ - card = AP_QID_CARD(device_status[oi].qid); - dom = AP_QID_QUEUE(device_status[oi].qid); - } - } - if (i < MAX_ZDEV_ENTRIES_EXT || oi >= 0) { - if (pcardnr) - *pcardnr = card; - if (pdomain) - *pdomain = dom; - rc = (i < MAX_ZDEV_ENTRIES_EXT ? 0 : 1); - } else { - rc = -ENODEV; - } - - kvfree(device_status); - return rc; -} - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key token. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify) -{ - u64 mkvp; - int minhwtype = 0; - const struct keytoken_header *hdr = (struct keytoken_header *)key; - - if (hdr->type != TOKTYPE_CCA_INTERNAL) - return -EINVAL; - - switch (hdr->version) { - case TOKVER_CCA_AES: - mkvp = ((struct secaeskeytoken *)key)->mkvp; - break; - case TOKVER_CCA_VLSC: - mkvp = ((struct cipherkeytoken *)key)->mkvp0; - minhwtype = AP_DEVICE_TYPE_CEX6; - break; - default: - return -EINVAL; - } - - return findcard(mkvp, pcardnr, pdomain, verify, minhwtype); -} -EXPORT_SYMBOL(cca_findcard); - -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify) + u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, curmatch, oldmatch, rc = 0; + int i, card, dom, curmatch, oldmatch; struct cca_info ci; + u32 _nr_apqns = 0; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1909,7 +1743,7 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, if (domain != 0xFFFF && dom != domain) continue; /* get cca info on this apqn */ - if (cca_get_info(card, dom, &ci, verify)) + if (cca_get_info(card, dom, &ci, xflags)) continue; /* current master key needs to be valid */ if (mktype == AES_MK_SET && ci.cur_aes_mk_state != '2') @@ -1939,27 +1773,41 @@ int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + /* release the device status memory */ + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(cca_findcard2); -void __exit zcrypt_ccamisc_exit(void) +int __init zcrypt_ccamisc_init(void) +{ + /* Pre-allocate a small memory pool for cca cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in findcard() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ccamisc_exit(void) { - mkvp_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ccamisc.h b/drivers/s390/crypto/zcrypt_ccamisc.h index 26bdca702523dc..1ecc4e37e9ad3b 100644 --- a/drivers/s390/crypto/zcrypt_ccamisc.h +++ b/drivers/s390/crypto/zcrypt_ccamisc.h @@ -160,44 +160,47 @@ int cca_check_sececckeytoken(debug_info_t *dbg, int dbflvl, /* * Generate (random) CCA AES DATA secure key. */ -int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey); +int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey, + u32 xflags); /* * Generate CCA AES DATA secure key with given clear key value. */ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize, - const u8 *clrkey, u8 *seckey); + const u8 *clrkey, u8 *seckey, u32 xflags); /* * Derive proteced key from an CCA AES DATA secure key. */ int cca_sec2protkey(u16 cardnr, u16 domain, const u8 *seckey, u8 *protkey, u32 *protkeylen, - u32 *protkeytype); + u32 *protkeytype, u32 xflags); /* * Generate (random) CCA AES CIPHER secure key. */ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize); + u8 *keybuf, u32 *keybufsize, u32 xflags); /* * Derive proteced key from CCA AES cipher secure key. */ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); /* * Build CCA AES CIPHER secure key with a given clear key value. */ int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, - const u8 *clrkey, u8 *keybuf, u32 *keybufsize); + const u8 *clrkey, u8 *keybuf, u32 *keybufsize, + u32 xflags); /* * Derive proteced key from CCA ECC secure private key. */ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, u32 xflags); /* * Query cryptographic facility from CCA adapter @@ -205,16 +208,8 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key, int cca_query_crypto_facility(u16 cardnr, u16 domain, const char *keyword, u8 *rarray, size_t *rarraylen, - u8 *varray, size_t *varraylen); - -/* - * Search for a matching crypto card based on the Master Key - * Verification Pattern provided inside a secure key. - * Works with CCA AES data and cipher keys. - * Returns < 0 on failure, 0 if CURRENT MKVP matches and - * 1 if OLD MKVP matches. - */ -int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); + u8 *varray, size_t *varraylen, + u32 xflags); /* * Build a list of cca apqns meeting the following constrains: @@ -224,21 +219,16 @@ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify); * - if minhwtype > 0 only apqns with hwtype >= minhwtype * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp * - if old_mkvp != 0 only apqns where old_mkvp == mkvp - * - if verify is enabled and a cur_mkvp and/or old_mkvp - * value is given, then refetch the cca_info and make sure the current - * cur_mkvp or old_mkvp values of the apqn are used. * The mktype determines which set of master keys to use: * 0 = AES_MK_SET - AES MK set, 1 = APKA MK_SET - APKA MK set - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, +int cca_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, int minhwtype, int mktype, u64 cur_mkvp, u64 old_mkvp, - int verify); + u32 xflags); #define AES_MK_SET 0 #define APKA_MK_SET 1 @@ -270,8 +260,9 @@ struct cca_info { /* * Fetch cca information about an CCA queue. */ -int cca_get_info(u16 card, u16 dom, struct cca_info *ci, int verify); +int cca_get_info(u16 card, u16 dom, struct cca_info *ci, u32 xflags); +int zcrypt_ccamisc_init(void); void zcrypt_ccamisc_exit(void); #endif /* _ZCRYPT_CCAMISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 64df7d2f6266c4..6ba7fbddd3f7c6 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -79,14 +79,13 @@ static ssize_t cca_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct cca_info ci; struct ap_card *ac = to_ap_card(dev); + struct cca_info ci; memset(&ci, 0, sizeof(ci)); if (ap_domain_index >= 0) - cca_get_info(ac->id, ap_domain_index, &ci, zc->online); + cca_get_info(ac->id, ap_domain_index, &ci, 0); return sysfs_emit(buf, "%s\n", ci.serial); } @@ -110,17 +109,17 @@ static ssize_t cca_mkvps_show(struct device *dev, struct device_attribute *attr, char *buf) { + static const char * const new_state[] = { "empty", "partial", "full" }; + static const char * const cao_state[] = { "invalid", "valid" }; struct zcrypt_queue *zq = dev_get_drvdata(dev); - int n = 0; struct cca_info ci; - static const char * const cao_state[] = { "invalid", "valid" }; - static const char * const new_state[] = { "empty", "partial", "full" }; + int n = 0; memset(&ci, 0, sizeof(ci)); cca_get_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &ci, zq->online); + &ci, 0); if (ci.new_aes_mk_state >= '1' && ci.new_aes_mk_state <= '3') n += sysfs_emit_at(buf, n, "AES NEW: %s 0x%016llx\n", @@ -210,13 +209,12 @@ static ssize_t ep11_api_ordinalnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.API_ord_nr > 0) return sysfs_emit(buf, "%u\n", ci.API_ord_nr); @@ -231,13 +229,12 @@ static ssize_t ep11_fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.FW_version > 0) return sysfs_emit(buf, "%d.%d\n", @@ -254,13 +251,12 @@ static ssize_t ep11_serialnr_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); if (ci.serial[0]) return sysfs_emit(buf, "%16.16s\n", ci.serial); @@ -291,14 +287,13 @@ static ssize_t ep11_card_op_modes_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct zcrypt_card *zc = dev_get_drvdata(dev); - int i, n = 0; - struct ep11_card_info ci; struct ap_card *ac = to_ap_card(dev); + struct ep11_card_info ci; + int i, n = 0; memset(&ci, 0, sizeof(ci)); - ep11_get_card_info(ac->id, &ci, zc->online); + ep11_get_card_info(ac->id, &ci, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (ci.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { @@ -348,7 +343,7 @@ static ssize_t ep11_mkvps_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); if (di.cur_wk_state == '0') { n = sysfs_emit(buf, "WK CUR: %s -\n", @@ -395,7 +390,7 @@ static ssize_t ep11_queue_op_modes_show(struct device *dev, if (zq->online) ep11_get_domain_info(AP_QID_CARD(zq->queue->qid), AP_QID_QUEUE(zq->queue->qid), - &di); + &di, 0); for (i = 0; ep11_op_modes[i].mode_txt; i++) { if (di.op_mode & (1ULL << ep11_op_modes[i].mode_bit)) { diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c index cb7e6da43602d4..2f50fc7b8f614e 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.c +++ b/drivers/s390/crypto/zcrypt_ep11misc.c @@ -10,9 +10,10 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/init.h> +#include <linux/mempool.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/random.h> +#include <linux/slab.h> #include <asm/zcrypt.h> #include <asm/pkey.h> #include <crypto/aes.h> @@ -30,85 +31,29 @@ static const u8 def_iv[16] = { 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99, 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff }; -/* ep11 card info cache */ -struct card_list_entry { - struct list_head list; - u16 cardnr; - struct ep11_card_info info; -}; -static LIST_HEAD(card_list); -static DEFINE_SPINLOCK(card_list_lock); - -static int card_cache_fetch(u16 cardnr, struct ep11_card_info *ci) -{ - int rc = -ENOENT; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(ci, &ptr->info, sizeof(*ci)); - rc = 0; - break; - } - } - spin_unlock_bh(&card_list_lock); - - return rc; -} - -static void card_cache_update(u16 cardnr, const struct ep11_card_info *ci) -{ - int found = 0; - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - memcpy(&ptr->info, ci, sizeof(*ci)); - found = 1; - break; - } - } - if (!found) { - ptr = kmalloc(sizeof(*ptr), GFP_ATOMIC); - if (!ptr) { - spin_unlock_bh(&card_list_lock); - return; - } - ptr->cardnr = cardnr; - memcpy(&ptr->info, ci, sizeof(*ci)); - list_add(&ptr->list, &card_list); - } - spin_unlock_bh(&card_list_lock); -} - -static void card_cache_scrub(u16 cardnr) -{ - struct card_list_entry *ptr; - - spin_lock_bh(&card_list_lock); - list_for_each_entry(ptr, &card_list, list) { - if (ptr->cardnr == cardnr) { - list_del(&ptr->list); - kfree(ptr); - break; - } - } - spin_unlock_bh(&card_list_lock); -} - -static void __exit card_cache_free(void) -{ - struct card_list_entry *ptr, *pnext; +/* + * Cprb memory pool held for urgent cases where no memory + * can be allocated via kmalloc. This pool is only used when + * alloc_cprbmem() is called with the xflag ZCRYPT_XFLAG_NOMEMALLOC. + */ +#define CPRB_MEMPOOL_ITEM_SIZE (8 * 1024) +static mempool_t *cprb_mempool; - spin_lock_bh(&card_list_lock); - list_for_each_entry_safe(ptr, pnext, &card_list, list) { - list_del(&ptr->list); - kfree(ptr); - } - spin_unlock_bh(&card_list_lock); -} +/* + * This is a pre-allocated memory for the device status array + * used within the ep11_findcard2() function. It is currently + * 128 * 128 * 4 bytes = 64 KB big. Usage of this memory is + * controlled via dev_status_mem_mutex. Needs adaption if more + * than 128 cards or domains to be are supported. + */ +#define ZCRYPT_DEV_STATUS_CARD_MAX 128 +#define ZCRYPT_DEV_STATUS_QUEUE_MAX 128 +#define ZCRYPT_DEV_STATUS_ENTRIES (ZCRYPT_DEV_STATUS_CARD_MAX * \ + ZCRYPT_DEV_STATUS_QUEUE_MAX) +#define ZCRYPT_DEV_STATUS_EXT_SIZE (ZCRYPT_DEV_STATUS_ENTRIES * \ + sizeof(struct zcrypt_device_status_ext)) +static void *dev_status_mem; +static DEFINE_MUTEX(dev_status_mem_mutex); static int ep11_kb_split(const u8 *kb, size_t kblen, u32 kbver, struct ep11kblob_header **kbhdr, size_t *kbhdrsize, @@ -411,14 +356,20 @@ EXPORT_SYMBOL(ep11_check_aes_key); /* * Allocate and prepare ep11 cprb plus additional payload. */ -static inline struct ep11_cprb *alloc_cprb(size_t payload_len) +static void *alloc_cprbmem(size_t payload_len, u32 xflags) { size_t len = sizeof(struct ep11_cprb) + payload_len; - struct ep11_cprb *cprb; + struct ep11_cprb *cprb = NULL; - cprb = kzalloc(len, GFP_KERNEL); + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) { + if (len <= CPRB_MEMPOOL_ITEM_SIZE) + cprb = mempool_alloc_preallocated(cprb_mempool); + } else { + cprb = kmalloc(len, GFP_KERNEL); + } if (!cprb) return NULL; + memset(cprb, 0, len); cprb->cprb_len = sizeof(struct ep11_cprb); cprb->cprb_ver_id = 0x04; @@ -430,6 +381,20 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len) } /* + * Free ep11 cprb buffer space. + */ +static void free_cprbmem(void *mem, size_t payload_len, bool scrub, u32 xflags) +{ + if (mem && scrub) + memzero_explicit(mem, sizeof(struct ep11_cprb) + payload_len); + + if (xflags & ZCRYPT_XFLAG_NOMEMALLOC) + mempool_free(mem, cprb_mempool); + else + kfree(mem); +} + +/* * Some helper functions related to ASN1 encoding. * Limited to length info <= 2 byte. */ @@ -489,6 +454,7 @@ static inline void prep_urb(struct ep11_urb *u, struct ep11_cprb *req, size_t req_len, struct ep11_cprb *rep, size_t rep_len) { + memset(u, 0, sizeof(*u)); u->targets = (u8 __user *)t; u->targets_num = nt; u->req = (u8 __user *)req; @@ -583,7 +549,7 @@ static int check_reply_cprb(const struct ep11_cprb *rep, const char *func) * Helper function which does an ep11 query with given query type. */ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, - size_t buflen, u8 *buf) + size_t buflen, u8 *buf, u32 xflags) { struct ep11_info_req_pl { struct pl_head head; @@ -605,11 +571,11 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api = EP11_API_V1, rc = -ENOMEM; /* request cprb and payload */ - req = alloc_cprb(sizeof(struct ep11_info_req_pl)); + req = alloc_cprbmem(sizeof(struct ep11_info_req_pl), xflags); if (!req) goto out; req_pl = (struct ep11_info_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -621,22 +587,19 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, req_pl->query_subtype_len = sizeof(u32); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct ep11_info_rep_pl) + buflen); + rep = alloc_cprbmem(sizeof(struct ep11_info_rep_pl) + buflen, xflags); if (!rep) goto out; rep_pl = (struct ep11_info_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = cardnr; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + sizeof(*req_pl), rep, sizeof(*rep) + sizeof(*rep_pl) + buflen); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)cardnr, (int)domain, rc); @@ -667,16 +630,15 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type, memcpy(buf, ((u8 *)rep_pl) + sizeof(*rep_pl), rep_pl->data_len); out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, 0, false, xflags); return rc; } /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags) { int rc; struct ep11_module_query_info { @@ -706,30 +668,26 @@ int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify) u32 max_CP_index; } __packed * pmqi = NULL; - rc = card_cache_fetch(card, info); - if (rc || verify) { - pmqi = kmalloc(sizeof(*pmqi), GFP_KERNEL); - if (!pmqi) - return -ENOMEM; - rc = ep11_query_info(card, AUTOSEL_DOM, - 0x01 /* module info query */, - sizeof(*pmqi), (u8 *)pmqi); - if (rc) { - if (rc == -ENODEV) - card_cache_scrub(card); - goto out; - } - memset(info, 0, sizeof(*info)); - info->API_ord_nr = pmqi->API_ord_nr; - info->FW_version = - (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; - memcpy(info->serial, pmqi->serial, sizeof(info->serial)); - info->op_mode = pmqi->op_mode; - card_cache_update(card, info); - } + /* use the cprb mempool to satisfy this short term mem alloc */ + pmqi = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!pmqi) + return -ENOMEM; + rc = ep11_query_info(card, AUTOSEL_DOM, + 0x01 /* module info query */, + sizeof(*pmqi), (u8 *)pmqi, xflags); + if (rc) + goto out; + + memset(info, 0, sizeof(*info)); + info->API_ord_nr = pmqi->API_ord_nr; + info->FW_version = (pmqi->FW_major_vers << 8) + pmqi->FW_minor_vers; + memcpy(info->serial, pmqi->serial, sizeof(info->serial)); + info->op_mode = pmqi->op_mode; out: - kfree(pmqi); + mempool_free(pmqi, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_get_card_info); @@ -737,7 +695,8 @@ EXPORT_SYMBOL(ep11_get_card_info); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags) { int rc; struct ep11_domain_query_info { @@ -746,36 +705,32 @@ int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info) u8 new_WK_VP[32]; u32 dom_flags; u64 op_mode; - } __packed * p_dom_info; - - p_dom_info = kmalloc(sizeof(*p_dom_info), GFP_KERNEL); - if (!p_dom_info) - return -ENOMEM; + } __packed dom_query_info; rc = ep11_query_info(card, domain, 0x03 /* domain info query */, - sizeof(*p_dom_info), (u8 *)p_dom_info); + sizeof(dom_query_info), (u8 *)&dom_query_info, + xflags); if (rc) goto out; memset(info, 0, sizeof(*info)); info->cur_wk_state = '0'; info->new_wk_state = '0'; - if (p_dom_info->dom_flags & 0x10 /* left imprint mode */) { - if (p_dom_info->dom_flags & 0x02 /* cur wk valid */) { + if (dom_query_info.dom_flags & 0x10 /* left imprint mode */) { + if (dom_query_info.dom_flags & 0x02 /* cur wk valid */) { info->cur_wk_state = '1'; - memcpy(info->cur_wkvp, p_dom_info->cur_WK_VP, 32); + memcpy(info->cur_wkvp, dom_query_info.cur_WK_VP, 32); } - if (p_dom_info->dom_flags & 0x04 || /* new wk present */ - p_dom_info->dom_flags & 0x08 /* new wk committed */) { + if (dom_query_info.dom_flags & 0x04 || /* new wk present */ + dom_query_info.dom_flags & 0x08 /* new wk committed */) { info->new_wk_state = - p_dom_info->dom_flags & 0x08 ? '2' : '1'; - memcpy(info->new_wkvp, p_dom_info->new_WK_VP, 32); + dom_query_info.dom_flags & 0x08 ? '2' : '1'; + memcpy(info->new_wkvp, dom_query_info.new_WK_VP, 32); } } - info->op_mode = p_dom_info->op_mode; + info->op_mode = dom_query_info.op_mode; out: - kfree(p_dom_info); return rc; } EXPORT_SYMBOL(ep11_get_domain_info); @@ -788,7 +743,7 @@ EXPORT_SYMBOL(ep11_get_domain_info); static int _ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct keygen_req_pl { struct pl_head head; @@ -823,7 +778,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -851,7 +806,7 @@ static int _ep11_genaeskey(u16 card, u16 domain, pinblob_size = EP11_PINBLOB_V1_BYTES; } req_pl_size = sizeof(struct keygen_req_pl) + ASN1TAGLEN(pinblob_size); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct keygen_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -877,22 +832,19 @@ static int _ep11_genaeskey(u16 card, u16 domain, *p++ = pinblob_size; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct keygen_rep_pl)); + rep = alloc_cprbmem(sizeof(struct keygen_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct keygen_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -925,14 +877,13 @@ static int _ep11_genaeskey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, 0, false, xflags); + free_cprbmem(rep, sizeof(struct keygen_rep_pl), true, xflags); return rc; } int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver) + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -953,7 +904,7 @@ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return rc; rc = _ep11_genaeskey(card, domain, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -973,7 +924,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, u16 mode, u32 mech, const u8 *iv, const u8 *key, size_t keysize, const u8 *inbuf, size_t inbufsize, - u8 *outbuf, size_t *outbufsize) + u8 *outbuf, size_t *outbufsize, + u32 xflags) { struct crypt_req_pl { struct pl_head head; @@ -1000,8 +952,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; - size_t req_pl_size, rep_pl_size; + struct ep11_urb urb; + size_t req_pl_size, rep_pl_size = 0; int n, api = EP11_API_V1, rc = -ENOMEM; u8 *p; @@ -1012,7 +964,7 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct crypt_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + ASN1TAGLEN(inbufsize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct crypt_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1034,22 +986,19 @@ static int ep11_cryptsingle(u16 card, u16 domain, /* reply cprb and payload, assume out data size <= in data size + 32 */ rep_pl_size = sizeof(struct crypt_rep_pl) + ASN1TAGLEN(inbufsize + 32); - rep = alloc_cprb(rep_pl_size); + rep = alloc_cprbmem(rep_pl_size, xflags); if (!rep) goto out; rep_pl = (struct crypt_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + rep_pl_size); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1095,9 +1044,8 @@ static int ep11_cryptsingle(u16 card, u16 domain, *outbufsize = n; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, rep_pl_size, true, xflags); return rc; } @@ -1106,7 +1054,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, const u8 *enckey, size_t enckeysize, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, - u8 *keybuf, size_t *keybufsize) + u8 *keybuf, size_t *keybufsize, u32 xflags) { struct uw_req_pl { struct pl_head head; @@ -1143,7 +1091,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, struct ep11_cprb *req = NULL, *rep = NULL; size_t req_pl_size, pinblob_size = 0; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; int api, rc = -ENOMEM; u8 *p; @@ -1161,7 +1109,7 @@ static int _ep11_unwrapkey(u16 card, u16 domain, req_pl_size = sizeof(struct uw_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keksize) + ASN1TAGLEN(0) + ASN1TAGLEN(pinblob_size) + ASN1TAGLEN(enckeysize); - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; req_pl = (struct uw_req_pl *)(((u8 *)req) + sizeof(*req)); @@ -1197,22 +1145,19 @@ static int _ep11_unwrapkey(u16 card, u16 domain, p += asn1tag_write(p, 0x04, enckey, enckeysize); /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct uw_rep_pl)); + rep = alloc_cprbmem(sizeof(struct uw_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct uw_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1245,9 +1190,8 @@ static int _ep11_unwrapkey(u16 card, u16 domain, *keybufsize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct uw_rep_pl), true, xflags); return rc; } @@ -1257,7 +1201,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, u32 mech, const u8 *iv, u32 keybitsize, u32 keygenflags, u8 *keybuf, u32 *keybufsize, - u8 keybufver) + u8 keybufver, u32 xflags) { struct ep11kblob_header *hdr; size_t hdr_size, pl_size; @@ -1271,7 +1215,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, rc = _ep11_unwrapkey(card, domain, kek, keksize, enckey, enckeysize, mech, iv, keybitsize, keygenflags, - pl, &pl_size); + pl, &pl_size, xflags); if (rc) return rc; @@ -1290,7 +1234,7 @@ static int ep11_unwrapkey(u16 card, u16 domain, static int _ep11_wrapkey(u16 card, u16 domain, const u8 *key, size_t keysize, u32 mech, const u8 *iv, - u8 *databuf, size_t *datasize) + u8 *databuf, size_t *datasize, u32 xflags) { struct wk_req_pl { struct pl_head head; @@ -1319,7 +1263,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, } __packed * rep_pl; struct ep11_cprb *req = NULL, *rep = NULL; struct ep11_target_dev target; - struct ep11_urb *urb = NULL; + struct ep11_urb urb; size_t req_pl_size; int api, rc = -ENOMEM; u8 *p; @@ -1327,7 +1271,7 @@ static int _ep11_wrapkey(u16 card, u16 domain, /* request cprb and payload */ req_pl_size = sizeof(struct wk_req_pl) + (iv ? 16 : 0) + ASN1TAGLEN(keysize) + 4; - req = alloc_cprb(req_pl_size); + req = alloc_cprbmem(req_pl_size, xflags); if (!req) goto out; if (!mech || mech == 0x80060001) @@ -1357,22 +1301,19 @@ static int _ep11_wrapkey(u16 card, u16 domain, *p++ = 0; /* reply cprb and payload */ - rep = alloc_cprb(sizeof(struct wk_rep_pl)); + rep = alloc_cprbmem(sizeof(struct wk_rep_pl), xflags); if (!rep) goto out; rep_pl = (struct wk_rep_pl *)(((u8 *)rep) + sizeof(*rep)); /* urb and target */ - urb = kmalloc(sizeof(*urb), GFP_KERNEL); - if (!urb) - goto out; target.ap_id = card; target.dom_id = domain; - prep_urb(urb, &target, 1, + prep_urb(&urb, &target, 1, req, sizeof(*req) + req_pl_size, rep, sizeof(*rep) + sizeof(*rep_pl)); - rc = zcrypt_send_ep11_cprb(urb); + rc = zcrypt_send_ep11_cprb(&urb, xflags); if (rc) { ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n", __func__, (int)card, (int)domain, rc); @@ -1405,18 +1346,18 @@ static int _ep11_wrapkey(u16 card, u16 domain, *datasize = rep_pl->data_len; out: - kfree(req); - kfree(rep); - kfree(urb); + free_cprbmem(req, req_pl_size, true, xflags); + free_cprbmem(rep, sizeof(struct wk_rep_pl), true, xflags); return rc; } int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype) + u32 keytype, u32 xflags) { int rc; - u8 encbuf[64], *kek = NULL; + void *mem; + u8 encbuf[64], *kek; size_t clrkeylen, keklen, encbuflen = sizeof(encbuf); if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) { @@ -1427,18 +1368,24 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, return -EINVAL; } - /* allocate memory for the temp kek */ + /* + * Allocate space for the temp kek. + * Also we only need up to MAXEP11AESKEYBLOBSIZE bytes for this + * we use the already existing cprb mempool to solve this + * short term memory requirement. + */ + mem = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_KERNEL); + if (!mem) + return -ENOMEM; + kek = (u8 *)mem; keklen = MAXEP11AESKEYBLOBSIZE; - kek = kmalloc(keklen, GFP_ATOMIC); - if (!kek) { - rc = -ENOMEM; - goto out; - } /* Step 1: generate AES 256 bit random kek key */ rc = _ep11_genaeskey(card, domain, 256, 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */ - kek, &keklen); + kek, &keklen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n", __func__, rc); @@ -1447,7 +1394,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 2: encrypt clear key value with the kek key */ rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen, - clrkey, clrkeylen, encbuf, &encbuflen); + clrkey, clrkeylen, encbuf, &encbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n", __func__, rc); @@ -1457,22 +1404,23 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, /* Step 3: import the encrypted key value as a new key */ rc = ep11_unwrapkey(card, domain, kek, keklen, encbuf, encbuflen, 0, def_iv, - keybitsize, 0, keybuf, keybufsize, keytype); + keybitsize, 0, keybuf, keybufsize, keytype, xflags); if (rc) { - ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n", + ZCRYPT_DBF_ERR("%s importing key value as new key failed, rc=%d\n", __func__, rc); goto out; } out: - kfree(kek); + mempool_free(mem, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_clr2keyblob); int ep11_kblob2protkey(u16 card, u16 dom, const u8 *keyblob, u32 keybloblen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype) + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags) { struct ep11kblob_header *hdr; struct ep11keyblob *key; @@ -1498,15 +1446,29 @@ int ep11_kblob2protkey(u16 card, u16 dom, } /* !!! hdr is no longer a valid header !!! */ - /* alloc temp working buffer */ + /* need a temp working buffer */ wkbuflen = (keylen + AES_BLOCK_SIZE) & (~(AES_BLOCK_SIZE - 1)); - wkbuf = kmalloc(wkbuflen, GFP_ATOMIC); - if (!wkbuf) - return -ENOMEM; + if (wkbuflen > CPRB_MEMPOOL_ITEM_SIZE) { + /* this should never happen */ + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s wkbuflen %d > cprb mempool item size %d, rc=%d\n", + __func__, (int)wkbuflen, CPRB_MEMPOOL_ITEM_SIZE, rc); + return rc; + } + /* use the cprb mempool to satisfy this short term mem allocation */ + wkbuf = (xflags & ZCRYPT_XFLAG_NOMEMALLOC) ? + mempool_alloc_preallocated(cprb_mempool) : + mempool_alloc(cprb_mempool, GFP_ATOMIC); + if (!wkbuf) { + rc = -ENOMEM; + ZCRYPT_DBF_WARN("%s allocating tmp buffer via cprb mempool failed, rc=%d\n", + __func__, rc); + return rc; + } /* ep11 secure key -> protected key + info */ rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen, - 0, def_iv, wkbuf, &wkbuflen); + 0, def_iv, wkbuf, &wkbuflen, xflags); if (rc) { ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n", __func__, rc); @@ -1573,37 +1535,32 @@ int ep11_kblob2protkey(u16 card, u16 dom, *protkeylen = wki->pkeysize; out: - kfree(wkbuf); + mempool_free(wkbuf, cprb_mempool); return rc; } EXPORT_SYMBOL(ep11_kblob2protkey); -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp) +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags) { struct zcrypt_device_status_ext *device_status; - u32 *_apqns = NULL, _nr_apqns = 0; - int i, card, dom, rc = -ENOMEM; struct ep11_domain_info edi; struct ep11_card_info eci; + u32 _nr_apqns = 0; + int i, card, dom; - /* fetch status of all crypto cards */ - device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT, - sizeof(struct zcrypt_device_status_ext), - GFP_KERNEL); - if (!device_status) - return -ENOMEM; - zcrypt_device_status_mask_ext(device_status); + /* occupy the device status memory */ + mutex_lock(&dev_status_mem_mutex); + memset(dev_status_mem, 0, ZCRYPT_DEV_STATUS_EXT_SIZE); + device_status = (struct zcrypt_device_status_ext *)dev_status_mem; - /* allocate 1k space for up to 256 apqns */ - _apqns = kmalloc_array(256, sizeof(u32), GFP_KERNEL); - if (!_apqns) { - kvfree(device_status); - return -ENOMEM; - } + /* fetch crypto device status into this struct */ + zcrypt_device_status_mask_ext(device_status, + ZCRYPT_DEV_STATUS_CARD_MAX, + ZCRYPT_DEV_STATUS_QUEUE_MAX); /* walk through all the crypto apqnss */ - for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) { + for (i = 0; i < ZCRYPT_DEV_STATUS_ENTRIES; i++) { card = AP_QID_CARD(device_status[i].qid); dom = AP_QID_QUEUE(device_status[i].qid); /* check online state */ @@ -1623,14 +1580,14 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; /* check min api version if given */ if (minapi > 0) { - if (ep11_get_card_info(card, &eci, 0)) + if (ep11_get_card_info(card, &eci, xflags)) continue; if (minapi > eci.API_ord_nr) continue; } /* check wkvp if given */ if (wkvp) { - if (ep11_get_domain_info(card, dom, &edi)) + if (ep11_get_domain_info(card, dom, &edi, xflags)) continue; if (edi.cur_wk_state != '1') continue; @@ -1638,27 +1595,40 @@ int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, continue; } /* apqn passed all filtering criterons, add to the array */ - if (_nr_apqns < 256) - _apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); + if (_nr_apqns < *nr_apqns) + apqns[_nr_apqns++] = (((u16)card) << 16) | ((u16)dom); } - /* nothing found ? */ - if (!_nr_apqns) { - kfree(_apqns); - rc = -ENODEV; - } else { - /* no re-allocation, simple return the _apqns array */ - *apqns = _apqns; - *nr_apqns = _nr_apqns; - rc = 0; - } + *nr_apqns = _nr_apqns; - kvfree(device_status); - return rc; + mutex_unlock(&dev_status_mem_mutex); + + return _nr_apqns ? 0 : -ENODEV; } EXPORT_SYMBOL(ep11_findcard2); -void __exit zcrypt_ep11misc_exit(void) +int __init zcrypt_ep11misc_init(void) +{ + /* Pre-allocate a small memory pool for ep11 cprbs. */ + cprb_mempool = mempool_create_kmalloc_pool(2 * zcrypt_mempool_threshold, + CPRB_MEMPOOL_ITEM_SIZE); + if (!cprb_mempool) + return -ENOMEM; + + /* Pre-allocate one crypto status card struct used in ep11_findcard2() */ + dev_status_mem = kvmalloc(ZCRYPT_DEV_STATUS_EXT_SIZE, GFP_KERNEL); + if (!dev_status_mem) { + mempool_destroy(cprb_mempool); + return -ENOMEM; + } + + return 0; +} + +void zcrypt_ep11misc_exit(void) { - card_cache_free(); + mutex_lock(&dev_status_mem_mutex); + kvfree(dev_status_mem); + mutex_unlock(&dev_status_mem_mutex); + mempool_destroy(cprb_mempool); } diff --git a/drivers/s390/crypto/zcrypt_ep11misc.h b/drivers/s390/crypto/zcrypt_ep11misc.h index 9f1bdffdec6898..b5e6fd861815b3 100644 --- a/drivers/s390/crypto/zcrypt_ep11misc.h +++ b/drivers/s390/crypto/zcrypt_ep11misc.h @@ -104,25 +104,26 @@ struct ep11_domain_info { /* * Provide information about an EP11 card. */ -int ep11_get_card_info(u16 card, struct ep11_card_info *info, int verify); +int ep11_get_card_info(u16 card, struct ep11_card_info *info, u32 xflags); /* * Provide information about a domain within an EP11 card. */ -int ep11_get_domain_info(u16 card, u16 domain, struct ep11_domain_info *info); +int ep11_get_domain_info(u16 card, u16 domain, + struct ep11_domain_info *info, u32 xflags); /* * Generate (random) EP11 AES secure key. */ int ep11_genaeskey(u16 card, u16 domain, u32 keybitsize, u32 keygenflags, - u8 *keybuf, u32 *keybufsize, u32 keybufver); + u8 *keybuf, u32 *keybufsize, u32 keybufver, u32 xflags); /* * Generate EP11 AES secure key with given clear key value. */ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, const u8 *clrkey, u8 *keybuf, u32 *keybufsize, - u32 keytype); + u32 keytype, u32 xflags); /* * Build a list of ep11 apqns meeting the following constrains: @@ -136,22 +137,22 @@ int ep11_clr2keyblob(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags, * key for this domain. When a wkvp is given there will always be a re-fetch * of the domain info for the potential apqn - so this triggers an request * reply to each apqn eligible. - * The array of apqn entries is allocated with kmalloc and returned in *apqns; - * the number of apqns stored into the list is returned in *nr_apqns. One apqn - * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and - * may be casted to struct pkey_apqn. The return value is either 0 for success - * or a negative errno value. If no apqn meeting the criteria is found, - * -ENODEV is returned. + * The caller should set *nr_apqns to the nr of elements available in *apqns. + * On return *nr_apqns is then updated with the nr of apqns filled into *apqns. + * The return value is either 0 for success or a negative errno value. + * If no apqn meeting the criteria is found, -ENODEV is returned. */ -int ep11_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain, - int minhwtype, int minapi, const u8 *wkvp); +int ep11_findcard2(u32 *apqns, u32 *nr_apqns, u16 cardnr, u16 domain, + int minhwtype, int minapi, const u8 *wkvp, u32 xflags); /* * Derive proteced key from EP11 key blob (AES and ECC keys). */ int ep11_kblob2protkey(u16 card, u16 dom, const u8 *key, u32 keylen, - u8 *protkey, u32 *protkeylen, u32 *protkeytype); + u8 *protkey, u32 *protkeylen, u32 *protkeytype, + u32 xflags); +int zcrypt_ep11misc_init(void); void zcrypt_ep11misc_exit(void); #endif /* _ZCRYPT_EP11MISC_H_ */ diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index adc65eddaa1e38..fc0a2a053dc22c 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -438,7 +438,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq, msg->len = sizeof(error_reply); } out: - complete((struct completion *)msg->private); + complete(&msg->response.work); } static atomic_t zcrypt_step = ATOMIC_INIT(0); @@ -449,30 +449,30 @@ static atomic_t zcrypt_step = ATOMIC_INIT(0); * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @mex: pointer to the modexpo request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICAMEX_msg_to_type50MEX_msg(zq, ap_msg, mex); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -485,7 +485,6 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send me cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), @@ -499,30 +498,30 @@ out: * @zq: pointer to zcrypt_queue structure that identifies the * CEXxA device to the request distributor * @crt: pointer to the modexpoc_crt request buffer + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct completion work; int rc; - ap_msg->bufsize = MSGTYPE50_CRB3_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < MSGTYPE50_CRB3_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype50_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &work; rc = ICACRT_msg_to_type50CRT_msg(zq, ap_msg, crt); if (rc) goto out; - init_completion(&work); + init_completion(&ap_msg->response.work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&work); + rc = wait_for_completion_interruptible(&ap_msg->response.work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -535,7 +534,6 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq, } out: - ap_msg->private = NULL; if (rc) pr_debug("send crt cprb at dev=%02x.%04x rc=%d\n", AP_QID_CARD(zq->queue->qid), diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index b64c9d9fc6137e..9cefbb30960fdd 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -31,11 +31,6 @@ #define CEIL4(x) ((((x) + 3) / 4) * 4) -struct response_type { - struct completion work; - int type; -}; - #define CEXXC_RESPONSE_TYPE_ICA 0 #define CEXXC_RESPONSE_TYPE_XCRB 1 #define CEXXC_RESPONSE_TYPE_EP11 2 @@ -856,7 +851,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86x_reply *t86r; int len; @@ -920,7 +915,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = msg->private; + struct ap_response_type *resp_type = &msg->response; struct type86_ep11_reply *t86r; int len; @@ -967,9 +962,7 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, struct ica_rsa_modexpo *mex, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -979,15 +972,15 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icamex_msg_to_type6mex_msgx(zq, ap_msg, mex); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1001,7 +994,6 @@ static long zcrypt_msgtype6_modexpo(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1017,9 +1009,7 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, struct ica_rsa_modexpo_crt *crt, struct ap_message *ap_msg) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_ICA, - }; + struct ap_response_type *resp_type = &ap_msg->response; int rc; ap_msg->msg = (void *)get_zeroed_page(GFP_KERNEL); @@ -1029,15 +1019,15 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = &resp_type; rc = icacrt_msg_to_type6crt_msgx(zq, ap_msg, crt); if (rc) goto out_free; - init_completion(&resp_type.work); + resp_type->type = CEXXC_RESPONSE_TYPE_ICA; + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out_free; - rc = wait_for_completion_interruptible(&resp_type.work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1051,7 +1041,6 @@ static long zcrypt_msgtype6_modexpo_crt(struct zcrypt_queue *zq, out_free: free_page((unsigned long)ap_msg->msg); - ap_msg->private = NULL; ap_msg->msg = NULL; return rc; } @@ -1061,28 +1050,21 @@ out_free: * Prepare a CCA AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_cca_ap_msg(bool userspace, struct ica_xcRB *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned short **dom) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; return xcrb_msg_to_type6cprb_msgx(userspace, ap_msg, xcrb, func_code, dom); } @@ -1097,7 +1079,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -1128,11 +1110,11 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, msg->hdr.fromcardlen1 -= delta; } - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1158,28 +1140,21 @@ out: * Prepare an EP11 AP msg: fetch the required data from userspace, * prepare the AP msg, fill some info into the ap_message struct, * extract some data from the CPRB and give back to the caller. - * This function allocates memory and needs an ap_msg prepared - * by the caller with ap_init_message(). Also the caller has to - * make sure ap_release_message() is always called even on failure. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_msg->bufsize is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. */ int prep_ep11_ap_msg(bool userspace, struct ep11_urb *xcrb, struct ap_message *ap_msg, unsigned int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_EP11, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = atomic_read(&ap_max_msg_size); - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; ap_msg->receive = zcrypt_msgtype6_receive_ep11; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + resp_type->type = CEXXC_RESPONSE_TYPE_EP11; return xcrb_msg_to_type6_ep11cprb_msgx(userspace, ap_msg, xcrb, func_code, domain); } @@ -1197,7 +1172,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * { int rc; unsigned int lfmt; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; struct { struct type6_hdr hdr; struct ep11_cprb cprbx; @@ -1251,11 +1226,11 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * msg->hdr.fromcardlen1 = zq->reply.bufsize - sizeof(struct type86_hdr) - sizeof(struct type86_fmt2_ext); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) @@ -1276,23 +1251,25 @@ out: return rc; } +/* + * Prepare a CEXXC get random request ap message. + * This function assumes that ap_msg has been initialized with + * ap_init_apmsg() and thus a valid buffer with the size of + * ap_max_msg_size is available within ap_msg. Also the caller has + * to make sure ap_release_apmsg() is always called even on failure. + */ int prep_rng_ap_msg(struct ap_message *ap_msg, int *func_code, unsigned int *domain) { - struct response_type resp_type = { - .type = CEXXC_RESPONSE_TYPE_XCRB, - }; + struct ap_response_type *resp_type = &ap_msg->response; - ap_msg->bufsize = AP_DEFAULT_MAX_MSG_SIZE; - ap_msg->msg = kmalloc(ap_msg->bufsize, GFP_KERNEL); - if (!ap_msg->msg) - return -ENOMEM; + if (ap_msg->bufsize < AP_DEFAULT_MAX_MSG_SIZE) + return -EMSGSIZE; ap_msg->receive = zcrypt_msgtype6_receive; ap_msg->psmid = (((unsigned long)current->pid) << 32) + atomic_inc_return(&zcrypt_step); - ap_msg->private = kmemdup(&resp_type, sizeof(resp_type), GFP_KERNEL); - if (!ap_msg->private) - return -ENOMEM; + + resp_type->type = CEXXC_RESPONSE_TYPE_XCRB; rng_type6cprb_msgx(ap_msg, ZCRYPT_RNG_BUFFER_SIZE, domain); @@ -1319,16 +1296,16 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, short int verb_length; short int key_length; } __packed * msg = ap_msg->msg; - struct response_type *rtype = ap_msg->private; + struct ap_response_type *resp_type = &ap_msg->response; int rc; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); - init_completion(&rtype->work); + init_completion(&resp_type->work); rc = ap_queue_message(zq->queue, ap_msg); if (rc) goto out; - rc = wait_for_completion_interruptible(&rtype->work); + rc = wait_for_completion_interruptible(&resp_type->work); if (rc == 0) { rc = ap_msg->rc; if (rc == 0) diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 9e580ef69bdaab..aaa1eea6149b6b 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c @@ -179,7 +179,7 @@ void ctcmpc_dumpit(char *buf, int len) ctcm_pr_debug(" %s (+%s) : %s [%s]\n", addr, boff, bhex, basc); dup = 0; - strcpy(duphex, bhex); + strscpy(duphex, bhex); } else dup++; diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c index 76dffc89c641bf..887d5a6c155b95 100644 --- a/drivers/watchdog/diag288_wdt.c +++ b/drivers/watchdog/diag288_wdt.c @@ -29,26 +29,13 @@ #include <linux/watchdog.h> #include <asm/machine.h> #include <asm/ebcdic.h> +#include <asm/diag288.h> #include <asm/diag.h> #include <linux/io.h> #define MAX_CMDLEN 240 #define DEFAULT_CMD "SYSTEM RESTART" -#define MIN_INTERVAL 15 /* Minimal time supported by diag88 */ -#define MAX_INTERVAL 3600 /* One hour should be enough - pure estimation */ - -#define WDT_DEFAULT_TIMEOUT 30 - -/* Function codes - init, change, cancel */ -#define WDT_FUNC_INIT 0 -#define WDT_FUNC_CHANGE 1 -#define WDT_FUNC_CANCEL 2 -#define WDT_FUNC_CONCEAL 0x80000000 - -/* Action codes for LPAR watchdog */ -#define LPARWDT_RESTART 0 - static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD; static bool conceal_on; static bool nowayout_info = WATCHDOG_NOWAYOUT; @@ -75,22 +62,8 @@ static char *cmd_buf; static int diag288(unsigned int func, unsigned int timeout, unsigned long action, unsigned int len) { - union register_pair r1 = { .even = func, .odd = timeout, }; - union register_pair r3 = { .even = action, .odd = len, }; - int err; - diag_stat_inc(DIAG_STAT_X288); - - err = -EINVAL; - asm volatile( - " diag %[r1],%[r3],0x288\n" - "0: la %[err],0\n" - "1:\n" - EX_TABLE(0b, 1b) - : [err] "+d" (err) - : [r1] "d" (r1.pair), [r3] "d" (r3.pair) - : "cc", "memory"); - return err; + return __diag288(func, timeout, action, len); } static int diag288_str(unsigned int func, unsigned int timeout, char *cmd) @@ -189,8 +162,6 @@ static struct watchdog_device wdt_dev = { static int __init diag288_init(void) { - int ret; - watchdog_set_nowayout(&wdt_dev, nowayout_info); if (machine_is_vm()) { @@ -199,24 +170,6 @@ static int __init diag288_init(void) pr_err("The watchdog cannot be initialized\n"); return -ENOMEM; } - - ret = diag288_str(WDT_FUNC_INIT, MIN_INTERVAL, "BEGIN"); - if (ret != 0) { - pr_err("The watchdog cannot be initialized\n"); - kfree(cmd_buf); - return -EINVAL; - } - } else { - if (diag288(WDT_FUNC_INIT, WDT_DEFAULT_TIMEOUT, - LPARWDT_RESTART, 0)) { - pr_err("The watchdog cannot be initialized\n"); - return -EINVAL; - } - } - - if (diag288(WDT_FUNC_CANCEL, 0, 0, 0)) { - pr_err("The watchdog cannot be deactivated\n"); - return -EINVAL; } return watchdog_register_device(&wdt_dev); @@ -228,5 +181,5 @@ static void __exit diag288_exit(void) kfree(cmd_buf); } -module_init(diag288_init); +module_cpu_feature_match(S390_CPU_FEATURE_D288, diag288_init); module_exit(diag288_exit); |