Home Home > GIT Browse > stable
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Kubecek <mkubecek@suse.cz>2019-11-27 15:18:28 +0100
committerMichal Kubecek <mkubecek@suse.cz>2019-11-27 15:18:28 +0100
commitc1f274573cc7e606cab8f67a02cc133516c470f3 (patch)
treee25433ef37cd447f380a6447362050c9e2bb6b40
parentdae51bda5d04f284bef79d646bd98f08f9ee417b (diff)
parenta827279465788a135280edfa2c3a1f5496518b77 (diff)
Merge branch 'users/ptesarik/SLE15-SP2/for-next' into SLE15-SP2rpm-5.3.13-1--SLE-15-SP2-Full-Beta1rpm-5.3.13-1
Pull s390x backport from Petr Tesařík.
-rw-r--r--config/s390x/default2
-rw-r--r--patches.suse/s390-crypto-fix-unsigned-variable-compared-with-zero43
-rw-r--r--patches.suse/s390-crypto-support-for-sha3-via-cpacf-msa6555
-rw-r--r--patches.suse/s390-paes-prepare-paes-functions-for-large-key-blobs338
-rw-r--r--patches.suse/s390-pkey-add-cca-aes-cipher-key-support1366
-rw-r--r--patches.suse/s390-pkey-add-sysfs-attributes-to-emit-aes-cipher-key-blobs161
-rw-r--r--patches.suse/s390-pkey-fix-memory-leak-within-copy_apqns_from_user31
-rw-r--r--patches.suse/s390-pkey-pkey-cleanup-narrow-in-kernel-api-fix-some-variable-types290
-rw-r--r--patches.suse/s390-qdio-enable-drivers-to-poll-for-output-completions150
-rw-r--r--patches.suse/s390-qdio-implement-iqd-multi-write120
-rw-r--r--patches.suse/s390-qdio-let-drivers-opt-out-from-output-queue-scanning117
-rw-r--r--patches.suse/s390-qeth-add-bql-support-for-iqd-devices95
-rw-r--r--patches.suse/s390-qeth-add-tx-napi-support-for-iqd-devices391
-rw-r--r--patches.suse/s390-qeth-add-xmit_more-support-for-iqd-devices282
-rw-r--r--patches.suse/s390-qeth-collect-accurate-tx-statistics265
-rw-r--r--patches.suse/s390-qeth-use-iqd-multi-write161
-rw-r--r--patches.suse/s390-qeth-when-in-tx-napi-mode-use-napi_consume_skb117
-rw-r--r--patches.suse/s390-zcrypt-add-low-level-functions-for-cca-aes-cipher-keys1028
-rw-r--r--patches.suse/s390-zcrypt-extend-cca_findcard-function-and-helper240
-rw-r--r--patches.suse/s390-zcrypt-fix-wrong-handling-of-cca-cipher-keygenflags37
-rw-r--r--series.conf19
-rw-r--r--supported.conf2
22 files changed, 5810 insertions, 0 deletions
diff --git a/config/s390x/default b/config/s390x/default
index 42bb55e42e..86454cea44 100644
--- a/config/s390x/default
+++ b/config/s390x/default
@@ -3612,6 +3612,8 @@ CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m
+CONFIG_CRYPTO_SHA3_256_S390=m
+CONFIG_CRYPTO_SHA3_512_S390=m
CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_S390_PRNG=m
diff --git a/patches.suse/s390-crypto-fix-unsigned-variable-compared-with-zero b/patches.suse/s390-crypto-fix-unsigned-variable-compared-with-zero
new file mode 100644
index 0000000000..bdd01b1588
--- /dev/null
+++ b/patches.suse/s390-crypto-fix-unsigned-variable-compared-with-zero
@@ -0,0 +1,43 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 14 Nov 2019 15:30:05 +0800
+Subject: s390/crypto: Fix unsigned variable compared with zero
+Git-commit: 0398d4ab1677f7d8cd43aac2aa29a93dfcf9e2e3
+Patch-mainline: v5.5-rc1
+References: jsc#SLE-7545 LTC#179078
+
+s390_crypto_shash_parmsize() return type is int, it
+should not be stored in a unsigned variable, which
+compared with zero.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 3c2eb6b76cab ("s390/crypto: Support for SHA3 via CPACF (MSA6)")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: Joerg Schmidbauer <jschmidb@linux.vnet.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/crypto/sha_common.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/crypto/sha_common.c
++++ b/arch/s390/crypto/sha_common.c
+@@ -74,14 +74,17 @@ int s390_sha_final(struct shash_desc *de
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bsize = crypto_shash_blocksize(desc->tfm);
+ u64 bits;
+- unsigned int n, mbl_offset;
++ unsigned int n;
++ int mbl_offset;
+
+ n = ctx->count % bsize;
+ bits = ctx->count * 8;
+- mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
++ mbl_offset = s390_crypto_shash_parmsize(ctx->func);
+ if (mbl_offset < 0)
+ return -EINVAL;
+
++ mbl_offset = mbl_offset / sizeof(u32);
++
+ /* set total msg bit length (mbl) in CPACF parmblock */
+ switch (ctx->func) {
+ case CPACF_KLMD_SHA_1:
diff --git a/patches.suse/s390-crypto-support-for-sha3-via-cpacf-msa6 b/patches.suse/s390-crypto-support-for-sha3-via-cpacf-msa6
new file mode 100644
index 0000000000..e39ba1121a
--- /dev/null
+++ b/patches.suse/s390-crypto-support-for-sha3-via-cpacf-msa6
@@ -0,0 +1,555 @@
+From: Joerg Schmidbauer <jschmidb@de.ibm.com>
+Date: Wed, 14 Aug 2019 14:56:54 +0200
+Subject: s390/crypto: Support for SHA3 via CPACF (MSA6)
+Git-commit: 3c2eb6b76cabb7d90834798d6455f7f3431fc989
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7545 LTC#179078
+
+This patch introduces sha3 support for s390.
+
+- Rework the s390-specific SHA1 and SHA2 related code to
+ provide the basis for SHA3.
+- Provide two new kernel modules sha3_256_s390 and
+ sha3_512_s390 together with new kernel options.
+
+Signed-off-by: Joerg Schmidbauer <jschmidb@de.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Reviewed-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/configs/debug_defconfig | 2
+ arch/s390/configs/defconfig | 2
+ arch/s390/crypto/Makefile | 2
+ arch/s390/crypto/sha.h | 12 +-
+ arch/s390/crypto/sha3_256_s390.c | 147 ++++++++++++++++++++++++++++++++++++
+ arch/s390/crypto/sha3_512_s390.c | 155 ++++++++++++++++++++++++++++++++++++++
+ arch/s390/crypto/sha_common.c | 75 ++++++++++++------
+ arch/s390/include/asm/cpacf.h | 8 +
+ drivers/crypto/Kconfig | 20 ++++
+ 9 files changed, 395 insertions(+), 28 deletions(-)
+
+--- a/arch/s390/configs/debug_defconfig
++++ b/arch/s390/configs/debug_defconfig
+@@ -717,6 +717,8 @@ CONFIG_CRYPTO_PAES_S390=m
+ CONFIG_CRYPTO_SHA1_S390=m
+ CONFIG_CRYPTO_SHA256_S390=m
+ CONFIG_CRYPTO_SHA512_S390=m
++CONFIG_CRYPTO_SHA3_256_S390=m
++CONFIG_CRYPTO_SHA3_512_S390=m
+ CONFIG_CRYPTO_DES_S390=m
+ CONFIG_CRYPTO_AES_S390=m
+ CONFIG_CRYPTO_GHASH_S390=m
+--- a/arch/s390/configs/defconfig
++++ b/arch/s390/configs/defconfig
+@@ -710,6 +710,8 @@ CONFIG_CRYPTO_PAES_S390=m
+ CONFIG_CRYPTO_SHA1_S390=m
+ CONFIG_CRYPTO_SHA256_S390=m
+ CONFIG_CRYPTO_SHA512_S390=m
++CONFIG_CRYPTO_SHA3_256_S390=m
++CONFIG_CRYPTO_SHA3_512_S390=m
+ CONFIG_CRYPTO_DES_S390=m
+ CONFIG_CRYPTO_AES_S390=m
+ CONFIG_CRYPTO_GHASH_S390=m
+--- a/arch/s390/crypto/Makefile
++++ b/arch/s390/crypto/Makefile
+@@ -6,6 +6,8 @@
+ obj-$(CONFIG_CRYPTO_SHA1_S390) += sha1_s390.o sha_common.o
+ obj-$(CONFIG_CRYPTO_SHA256_S390) += sha256_s390.o sha_common.o
+ obj-$(CONFIG_CRYPTO_SHA512_S390) += sha512_s390.o sha_common.o
++obj-$(CONFIG_CRYPTO_SHA3_256_S390) += sha3_256_s390.o sha_common.o
++obj-$(CONFIG_CRYPTO_SHA3_512_S390) += sha3_512_s390.o sha_common.o
+ obj-$(CONFIG_CRYPTO_DES_S390) += des_s390.o
+ obj-$(CONFIG_CRYPTO_AES_S390) += aes_s390.o
+ obj-$(CONFIG_CRYPTO_PAES_S390) += paes_s390.o
+--- a/arch/s390/crypto/sha.h
++++ b/arch/s390/crypto/sha.h
+@@ -12,15 +12,17 @@
+
+ #include <linux/crypto.h>
+ #include <crypto/sha.h>
++#include <crypto/sha3.h>
+
+ /* must be big enough for the largest SHA variant */
+-#define SHA_MAX_STATE_SIZE (SHA512_DIGEST_SIZE / 4)
+-#define SHA_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
++#define SHA3_STATE_SIZE 200
++#define CPACF_MAX_PARMBLOCK_SIZE SHA3_STATE_SIZE
++#define SHA_MAX_BLOCK_SIZE SHA3_224_BLOCK_SIZE
+
+ struct s390_sha_ctx {
+- u64 count; /* message length in bytes */
+- u32 state[SHA_MAX_STATE_SIZE];
+- u8 buf[2 * SHA_MAX_BLOCK_SIZE];
++ u64 count; /* message length in bytes */
++ u32 state[CPACF_MAX_PARMBLOCK_SIZE / sizeof(u32)];
++ u8 buf[SHA_MAX_BLOCK_SIZE];
+ int func; /* KIMD function to use */
+ };
+
+--- /dev/null
++++ b/arch/s390/crypto/sha3_256_s390.c
+@@ -0,0 +1,147 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Cryptographic API.
++ *
++ * s390 implementation of the SHA256 and SHA224 Secure Hash Algorithm.
++ *
++ * s390 Version:
++ * Copyright IBM Corp. 2019
++ * Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com)
++ */
++#include <crypto/internal/hash.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/cpufeature.h>
++#include <crypto/sha.h>
++#include <crypto/sha3.h>
++#include <asm/cpacf.h>
++
++#include "sha.h"
++
++static int sha3_256_init(struct shash_desc *desc)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++
++ memset(sctx->state, 0, sizeof(sctx->state));
++ sctx->count = 0;
++ sctx->func = CPACF_KIMD_SHA3_256;
++
++ return 0;
++}
++
++static int sha3_256_export(struct shash_desc *desc, void *out)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ struct sha3_state *octx = out;
++
++ octx->rsiz = sctx->count;
++ memcpy(octx->st, sctx->state, sizeof(octx->st));
++ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
++
++ return 0;
++}
++
++static int sha3_256_import(struct shash_desc *desc, const void *in)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ const struct sha3_state *ictx = in;
++
++ sctx->count = ictx->rsiz;
++ memcpy(sctx->state, ictx->st, sizeof(ictx->st));
++ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
++ sctx->func = CPACF_KIMD_SHA3_256;
++
++ return 0;
++}
++
++static int sha3_224_import(struct shash_desc *desc, const void *in)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ const struct sha3_state *ictx = in;
++
++ sctx->count = ictx->rsiz;
++ memcpy(sctx->state, ictx->st, sizeof(ictx->st));
++ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
++ sctx->func = CPACF_KIMD_SHA3_224;
++
++ return 0;
++}
++
++static struct shash_alg sha3_256_alg = {
++ .digestsize = SHA3_256_DIGEST_SIZE, /* = 32 */
++ .init = sha3_256_init,
++ .update = s390_sha_update,
++ .final = s390_sha_final,
++ .export = sha3_256_export,
++ .import = sha3_256_import,
++ .descsize = sizeof(struct s390_sha_ctx),
++ .statesize = sizeof(struct sha3_state),
++ .base = {
++ .cra_name = "sha3-256",
++ .cra_driver_name = "sha3-256-s390",
++ .cra_priority = 300,
++ .cra_blocksize = SHA3_256_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++};
++
++static int sha3_224_init(struct shash_desc *desc)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++
++ memset(sctx->state, 0, sizeof(sctx->state));
++ sctx->count = 0;
++ sctx->func = CPACF_KIMD_SHA3_224;
++
++ return 0;
++}
++
++static struct shash_alg sha3_224_alg = {
++ .digestsize = SHA3_224_DIGEST_SIZE,
++ .init = sha3_224_init,
++ .update = s390_sha_update,
++ .final = s390_sha_final,
++ .export = sha3_256_export, /* same as for 256 */
++ .import = sha3_224_import, /* function code different! */
++ .descsize = sizeof(struct s390_sha_ctx),
++ .statesize = sizeof(struct sha3_state),
++ .base = {
++ .cra_name = "sha3-224",
++ .cra_driver_name = "sha3-224-s390",
++ .cra_priority = 300,
++ .cra_blocksize = SHA3_224_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++};
++
++static int __init sha3_256_s390_init(void)
++{
++ int ret;
++
++ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA3_256))
++ return -ENODEV;
++
++ ret = crypto_register_shash(&sha3_256_alg);
++ if (ret < 0)
++ goto out;
++
++ ret = crypto_register_shash(&sha3_224_alg);
++ if (ret < 0)
++ crypto_unregister_shash(&sha3_256_alg);
++out:
++ return ret;
++}
++
++static void __exit sha3_256_s390_fini(void)
++{
++ crypto_unregister_shash(&sha3_224_alg);
++ crypto_unregister_shash(&sha3_256_alg);
++}
++
++module_cpu_feature_match(MSA, sha3_256_s390_init);
++module_exit(sha3_256_s390_fini);
++
++MODULE_ALIAS_CRYPTO("sha3-256");
++MODULE_ALIAS_CRYPTO("sha3-224");
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SHA3-256 and SHA3-224 Secure Hash Algorithm");
+--- /dev/null
++++ b/arch/s390/crypto/sha3_512_s390.c
+@@ -0,0 +1,155 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Cryptographic API.
++ *
++ * s390 implementation of the SHA512 and SHA384 Secure Hash Algorithm.
++ *
++ * Copyright IBM Corp. 2019
++ * Author(s): Joerg Schmidbauer (jschmidb@de.ibm.com)
++ */
++#include <crypto/internal/hash.h>
++#include <linux/init.h>
++#include <linux/module.h>
++#include <linux/cpufeature.h>
++#include <crypto/sha.h>
++#include <crypto/sha3.h>
++#include <asm/cpacf.h>
++
++#include "sha.h"
++
++static int sha3_512_init(struct shash_desc *desc)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++
++ memset(sctx->state, 0, sizeof(sctx->state));
++ sctx->count = 0;
++ sctx->func = CPACF_KIMD_SHA3_512;
++
++ return 0;
++}
++
++static int sha3_512_export(struct shash_desc *desc, void *out)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ struct sha3_state *octx = out;
++
++ octx->rsiz = sctx->count;
++ octx->rsizw = sctx->count >> 32;
++
++ memcpy(octx->st, sctx->state, sizeof(octx->st));
++ memcpy(octx->buf, sctx->buf, sizeof(octx->buf));
++
++ return 0;
++}
++
++static int sha3_512_import(struct shash_desc *desc, const void *in)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ const struct sha3_state *ictx = in;
++
++ if (unlikely(ictx->rsizw))
++ return -ERANGE;
++ sctx->count = ictx->rsiz;
++
++ memcpy(sctx->state, ictx->st, sizeof(ictx->st));
++ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
++ sctx->func = CPACF_KIMD_SHA3_512;
++
++ return 0;
++}
++
++static int sha3_384_import(struct shash_desc *desc, const void *in)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++ const struct sha3_state *ictx = in;
++
++ if (unlikely(ictx->rsizw))
++ return -ERANGE;
++ sctx->count = ictx->rsiz;
++
++ memcpy(sctx->state, ictx->st, sizeof(ictx->st));
++ memcpy(sctx->buf, ictx->buf, sizeof(ictx->buf));
++ sctx->func = CPACF_KIMD_SHA3_384;
++
++ return 0;
++}
++
++static struct shash_alg sha3_512_alg = {
++ .digestsize = SHA3_512_DIGEST_SIZE,
++ .init = sha3_512_init,
++ .update = s390_sha_update,
++ .final = s390_sha_final,
++ .export = sha3_512_export,
++ .import = sha3_512_import,
++ .descsize = sizeof(struct s390_sha_ctx),
++ .statesize = sizeof(struct sha3_state),
++ .base = {
++ .cra_name = "sha3-512",
++ .cra_driver_name = "sha3-512-s390",
++ .cra_priority = 300,
++ .cra_blocksize = SHA3_512_BLOCK_SIZE,
++ .cra_module = THIS_MODULE,
++ }
++};
++
++MODULE_ALIAS_CRYPTO("sha3-512");
++
++static int sha3_384_init(struct shash_desc *desc)
++{
++ struct s390_sha_ctx *sctx = shash_desc_ctx(desc);
++
++ memset(sctx->state, 0, sizeof(sctx->state));
++ sctx->count = 0;
++ sctx->func = CPACF_KIMD_SHA3_384;
++
++ return 0;
++}
++
++static struct shash_alg sha3_384_alg = {
++ .digestsize = SHA3_384_DIGEST_SIZE,
++ .init = sha3_384_init,
++ .update = s390_sha_update,
++ .final = s390_sha_final,
++ .export = sha3_512_export, /* same as for 512 */
++ .import = sha3_384_import, /* function code different! */
++ .descsize = sizeof(struct s390_sha_ctx),
++ .statesize = sizeof(struct sha3_state),
++ .base = {
++ .cra_name = "sha3-384",
++ .cra_driver_name = "sha3-384-s390",
++ .cra_priority = 300,
++ .cra_blocksize = SHA3_384_BLOCK_SIZE,
++ .cra_ctxsize = sizeof(struct s390_sha_ctx),
++ .cra_module = THIS_MODULE,
++ }
++};
++
++MODULE_ALIAS_CRYPTO("sha3-384");
++
++static int __init init(void)
++{
++ int ret;
++
++ if (!cpacf_query_func(CPACF_KIMD, CPACF_KIMD_SHA3_512))
++ return -ENODEV;
++ ret = crypto_register_shash(&sha3_512_alg);
++ if (ret < 0)
++ goto out;
++ ret = crypto_register_shash(&sha3_384_alg);
++ if (ret < 0)
++ crypto_unregister_shash(&sha3_512_alg);
++out:
++ return ret;
++}
++
++static void __exit fini(void)
++{
++ crypto_unregister_shash(&sha3_512_alg);
++ crypto_unregister_shash(&sha3_384_alg);
++}
++
++module_cpu_feature_match(MSA, init);
++module_exit(fini);
++
++MODULE_LICENSE("GPL");
++MODULE_DESCRIPTION("SHA3-512 and SHA3-384 Secure Hash Algorithm");
+--- a/arch/s390/crypto/sha_common.c
++++ b/arch/s390/crypto/sha_common.c
+@@ -20,7 +20,7 @@ int s390_sha_update(struct shash_desc *d
+ unsigned int index, n;
+
+ /* how much is already in the buffer? */
+- index = ctx->count & (bsize - 1);
++ index = ctx->count % bsize;
+ ctx->count += len;
+
+ if ((index + len) < bsize)
+@@ -37,7 +37,7 @@ int s390_sha_update(struct shash_desc *d
+
+ /* process as many blocks as possible */
+ if (len >= bsize) {
+- n = len & ~(bsize - 1);
++ n = (len / bsize) * bsize;
+ cpacf_kimd(ctx->func, ctx->state, data, n);
+ data += n;
+ len -= n;
+@@ -50,34 +50,63 @@ store:
+ }
+ EXPORT_SYMBOL_GPL(s390_sha_update);
+
++static int s390_crypto_shash_parmsize(int func)
++{
++ switch (func) {
++ case CPACF_KLMD_SHA_1:
++ return 20;
++ case CPACF_KLMD_SHA_256:
++ return 32;
++ case CPACF_KLMD_SHA_512:
++ return 64;
++ case CPACF_KLMD_SHA3_224:
++ case CPACF_KLMD_SHA3_256:
++ case CPACF_KLMD_SHA3_384:
++ case CPACF_KLMD_SHA3_512:
++ return 200;
++ default:
++ return -EINVAL;
++ }
++}
++
+ int s390_sha_final(struct shash_desc *desc, u8 *out)
+ {
+ struct s390_sha_ctx *ctx = shash_desc_ctx(desc);
+ unsigned int bsize = crypto_shash_blocksize(desc->tfm);
+ u64 bits;
+- unsigned int index, end, plen;
++ unsigned int n, mbl_offset;
+
+- /* SHA-512 uses 128 bit padding length */
+- plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;
+-
+- /* must perform manual padding */
+- index = ctx->count & (bsize - 1);
+- end = (index < bsize - plen) ? bsize : (2 * bsize);
+-
+- /* start pad with 1 */
+- ctx->buf[index] = 0x80;
+- index++;
+-
+- /* pad with zeros */
+- memset(ctx->buf + index, 0x00, end - index - 8);
+-
+- /*
+- * Append message length. Well, SHA-512 wants a 128 bit length value,
+- * nevertheless we use u64, should be enough for now...
+- */
++ n = ctx->count % bsize;
+ bits = ctx->count * 8;
+- memcpy(ctx->buf + end - 8, &bits, sizeof(bits));
+- cpacf_kimd(ctx->func, ctx->state, ctx->buf, end);
++ mbl_offset = s390_crypto_shash_parmsize(ctx->func) / sizeof(u32);
++ if (mbl_offset < 0)
++ return -EINVAL;
++
++ /* set total msg bit length (mbl) in CPACF parmblock */
++ switch (ctx->func) {
++ case CPACF_KLMD_SHA_1:
++ case CPACF_KLMD_SHA_256:
++ memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
++ break;
++ case CPACF_KLMD_SHA_512:
++ /*
++ * the SHA512 parmblock has a 128-bit mbl field, clear
++ * high-order u64 field, copy bits to low-order u64 field
++ */
++ memset(ctx->state + mbl_offset, 0x00, sizeof(bits));
++ mbl_offset += sizeof(u64) / sizeof(u32);
++ memcpy(ctx->state + mbl_offset, &bits, sizeof(bits));
++ break;
++ case CPACF_KLMD_SHA3_224:
++ case CPACF_KLMD_SHA3_256:
++ case CPACF_KLMD_SHA3_384:
++ case CPACF_KLMD_SHA3_512:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ cpacf_klmd(ctx->func, ctx->state, ctx->buf, n);
+
+ /* copy digest to out */
+ memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm));
+--- a/arch/s390/include/asm/cpacf.h
++++ b/arch/s390/include/asm/cpacf.h
+@@ -93,6 +93,10 @@
+ #define CPACF_KIMD_SHA_1 0x01
+ #define CPACF_KIMD_SHA_256 0x02
+ #define CPACF_KIMD_SHA_512 0x03
++#define CPACF_KIMD_SHA3_224 0x20
++#define CPACF_KIMD_SHA3_256 0x21
++#define CPACF_KIMD_SHA3_384 0x22
++#define CPACF_KIMD_SHA3_512 0x23
+ #define CPACF_KIMD_GHASH 0x41
+
+ /*
+@@ -103,6 +107,10 @@
+ #define CPACF_KLMD_SHA_1 0x01
+ #define CPACF_KLMD_SHA_256 0x02
+ #define CPACF_KLMD_SHA_512 0x03
++#define CPACF_KLMD_SHA3_224 0x20
++#define CPACF_KLMD_SHA3_256 0x21
++#define CPACF_KLMD_SHA3_384 0x22
++#define CPACF_KLMD_SHA3_512 0x23
+
+ /*
+ * function codes for the KMAC (COMPUTE MESSAGE AUTHENTICATION CODE)
+--- a/drivers/crypto/Kconfig
++++ b/drivers/crypto/Kconfig
+@@ -145,6 +145,26 @@ config CRYPTO_SHA512_S390
+
+ It is available as of z10.
+
++config CRYPTO_SHA3_256_S390
++ tristate "SHA3_224 and SHA3_256 digest algorithm"
++ depends on S390
++ select CRYPTO_HASH
++ help
++ This is the s390 hardware accelerated implementation of the
++ SHA3_256 secure hash standard.
++
++ It is available as of z14.
++
++config CRYPTO_SHA3_512_S390
++ tristate "SHA3_384 and SHA3_512 digest algorithm"
++ depends on S390
++ select CRYPTO_HASH
++ help
++ This is the s390 hardware accelerated implementation of the
++ SHA3_512 secure hash standard.
++
++ It is available as of z14.
++
+ config CRYPTO_DES_S390
+ tristate "DES and Triple DES cipher algorithms"
+ depends on S390
diff --git a/patches.suse/s390-paes-prepare-paes-functions-for-large-key-blobs b/patches.suse/s390-paes-prepare-paes-functions-for-large-key-blobs
new file mode 100644
index 0000000000..347fde7e3e
--- /dev/null
+++ b/patches.suse/s390-paes-prepare-paes-functions-for-large-key-blobs
@@ -0,0 +1,338 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Fri, 19 Jul 2019 15:22:26 +0200
+Subject: s390/paes: Prepare paes functions for large key blobs
+Git-commit: 416f79c23dbe47e0e223efc06d3487e1d90a92ee
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+The context used to store the key blob used a fixed 80 bytes
+buffer. And all the set_key functions did not even check the given key
+size. With CCA variable length AES cipher keys there come key blobs
+with about 136 bytes and maybe in the future there will arise the need
+to store even bigger key blobs.
+
+This patch reworks the paes set_key functions and the context
+buffers to work with small key blobs (<= 128 bytes) directly in the
+context buffer and larger blobs by allocating additional memory and
+storing the pointer in the context buffer. If there has been memory
+allocated for storing a key blob, it also needs to be freed on release
+of the tfm. So all the paes ciphers now have a init and exit function
+implemented for this job.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/crypto/paes_s390.c | 184 +++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 160 insertions(+), 24 deletions(-)
+
+--- a/arch/s390/crypto/paes_s390.c
++++ b/arch/s390/crypto/paes_s390.c
+@@ -5,7 +5,7 @@
+ * s390 implementation of the AES Cipher Algorithm with protected keys.
+ *
+ * s390 Version:
+- * Copyright IBM Corp. 2017
++ * Copyright IBM Corp. 2017,2019
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
+ */
+@@ -25,16 +25,59 @@
+ #include <asm/cpacf.h>
+ #include <asm/pkey.h>
+
++/*
++ * Key blobs smaller/bigger than these defines are rejected
++ * by the common code even before the individual setkey function
++ * is called. As paes can handle different kinds of key blobs
++ * and padding is also possible, the limits need to be generous.
++ */
++#define PAES_MIN_KEYSIZE 64
++#define PAES_MAX_KEYSIZE 256
++
+ static u8 *ctrblk;
+ static DEFINE_SPINLOCK(ctrblk_lock);
+
+ static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;
+
+ struct key_blob {
+- __u8 key[MAXKEYBLOBSIZE];
++ /*
++ * Small keys will be stored in the keybuf. Larger keys are
++ * stored in extra allocated memory. In both cases does
++ * key point to the memory where the key is stored.
++ * The code distinguishes by checking keylen against
++ * sizeof(keybuf). See the two following helper functions.
++ */
++ u8 *key;
++ u8 keybuf[128];
+ unsigned int keylen;
+ };
+
++static inline int _copy_key_to_kb(struct key_blob *kb,
++ const u8 *key,
++ unsigned int keylen)
++{
++ if (keylen <= sizeof(kb->keybuf))
++ kb->key = kb->keybuf;
++ else {
++ kb->key = kmalloc(keylen, GFP_KERNEL);
++ if (!kb->key)
++ return -ENOMEM;
++ }
++ memcpy(kb->key, key, keylen);
++ kb->keylen = keylen;
++
++ return 0;
++}
++
++static inline void _free_kb_keybuf(struct key_blob *kb)
++{
++ if (kb->key && kb->key != kb->keybuf
++ && kb->keylen > sizeof(kb->keybuf)) {
++ kfree(kb->key);
++ kb->key = NULL;
++ }
++}
++
+ struct s390_paes_ctx {
+ struct key_blob kb;
+ struct pkey_protkey pk;
+@@ -80,13 +123,33 @@ static int __paes_set_key(struct s390_pa
+ return ctx->fc ? 0 : -EINVAL;
+ }
+
++static int ecb_paes_init(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ ctx->kb.key = NULL;
++
++ return 0;
++}
++
++static void ecb_paes_exit(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ _free_kb_keybuf(&ctx->kb);
++}
++
+ static int ecb_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+ {
++ int rc;
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- memcpy(ctx->kb.key, in_key, key_len);
+- ctx->kb.keylen = key_len;
++ _free_kb_keybuf(&ctx->kb);
++ rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
++ if (rc)
++ return rc;
++
+ if (__paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+@@ -148,10 +211,12 @@ static struct crypto_alg ecb_paes_alg =
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ecb_paes_alg.cra_list),
++ .cra_init = ecb_paes_init,
++ .cra_exit = ecb_paes_exit,
+ .cra_u = {
+ .blkcipher = {
+- .min_keysize = MINKEYBLOBSIZE,
+- .max_keysize = MAXKEYBLOBSIZE,
++ .min_keysize = PAES_MIN_KEYSIZE,
++ .max_keysize = PAES_MAX_KEYSIZE,
+ .setkey = ecb_paes_set_key,
+ .encrypt = ecb_paes_encrypt,
+ .decrypt = ecb_paes_decrypt,
+@@ -159,6 +224,22 @@ static struct crypto_alg ecb_paes_alg =
+ }
+ };
+
++static int cbc_paes_init(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ ctx->kb.key = NULL;
++
++ return 0;
++}
++
++static void cbc_paes_exit(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ _free_kb_keybuf(&ctx->kb);
++}
++
+ static int __cbc_paes_set_key(struct s390_paes_ctx *ctx)
+ {
+ unsigned long fc;
+@@ -180,10 +261,14 @@ static int __cbc_paes_set_key(struct s39
+ static int cbc_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+ {
++ int rc;
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- memcpy(ctx->kb.key, in_key, key_len);
+- ctx->kb.keylen = key_len;
++ _free_kb_keybuf(&ctx->kb);
++ rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
++ if (rc)
++ return rc;
++
+ if (__cbc_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+@@ -252,10 +337,12 @@ static struct crypto_alg cbc_paes_alg =
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(cbc_paes_alg.cra_list),
++ .cra_init = cbc_paes_init,
++ .cra_exit = cbc_paes_exit,
+ .cra_u = {
+ .blkcipher = {
+- .min_keysize = MINKEYBLOBSIZE,
+- .max_keysize = MAXKEYBLOBSIZE,
++ .min_keysize = PAES_MIN_KEYSIZE,
++ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = cbc_paes_set_key,
+ .encrypt = cbc_paes_encrypt,
+@@ -264,6 +351,24 @@ static struct crypto_alg cbc_paes_alg =
+ }
+ };
+
++static int xts_paes_init(struct crypto_tfm *tfm)
++{
++ struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ ctx->kb[0].key = NULL;
++ ctx->kb[1].key = NULL;
++
++ return 0;
++}
++
++static void xts_paes_exit(struct crypto_tfm *tfm)
++{
++ struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ _free_kb_keybuf(&ctx->kb[0]);
++ _free_kb_keybuf(&ctx->kb[1]);
++}
++
+ static int __xts_paes_set_key(struct s390_pxts_ctx *ctx)
+ {
+ unsigned long fc;
+@@ -287,20 +392,27 @@ static int __xts_paes_set_key(struct s39
+ }
+
+ static int xts_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+- unsigned int key_len)
++ unsigned int xts_key_len)
+ {
++ int rc;
+ struct s390_pxts_ctx *ctx = crypto_tfm_ctx(tfm);
+ u8 ckey[2 * AES_MAX_KEY_SIZE];
+- unsigned int ckey_len, keytok_len;
++ unsigned int ckey_len, key_len;
+
+- if (key_len % 2)
++ if (xts_key_len % 2)
+ return -EINVAL;
+
+- keytok_len = key_len / 2;
+- memcpy(ctx->kb[0].key, in_key, keytok_len);
+- ctx->kb[0].keylen = keytok_len;
+- memcpy(ctx->kb[1].key, in_key + keytok_len, keytok_len);
+- ctx->kb[1].keylen = keytok_len;
++ key_len = xts_key_len / 2;
++
++ _free_kb_keybuf(&ctx->kb[0]);
++ _free_kb_keybuf(&ctx->kb[1]);
++ rc = _copy_key_to_kb(&ctx->kb[0], in_key, key_len);
++ if (rc)
++ return rc;
++ rc = _copy_key_to_kb(&ctx->kb[1], in_key + key_len, key_len);
++ if (rc)
++ return rc;
++
+ if (__xts_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+@@ -394,10 +506,12 @@ static struct crypto_alg xts_paes_alg =
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(xts_paes_alg.cra_list),
++ .cra_init = xts_paes_init,
++ .cra_exit = xts_paes_exit,
+ .cra_u = {
+ .blkcipher = {
+- .min_keysize = 2 * MINKEYBLOBSIZE,
+- .max_keysize = 2 * MAXKEYBLOBSIZE,
++ .min_keysize = 2 * PAES_MIN_KEYSIZE,
++ .max_keysize = 2 * PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = xts_paes_set_key,
+ .encrypt = xts_paes_encrypt,
+@@ -406,6 +520,22 @@ static struct crypto_alg xts_paes_alg =
+ }
+ };
+
++static int ctr_paes_init(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ ctx->kb.key = NULL;
++
++ return 0;
++}
++
++static void ctr_paes_exit(struct crypto_tfm *tfm)
++{
++ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
++
++ _free_kb_keybuf(&ctx->kb);
++}
++
+ static int __ctr_paes_set_key(struct s390_paes_ctx *ctx)
+ {
+ unsigned long fc;
+@@ -428,10 +558,14 @@ static int __ctr_paes_set_key(struct s39
+ static int ctr_paes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+ unsigned int key_len)
+ {
++ int rc;
+ struct s390_paes_ctx *ctx = crypto_tfm_ctx(tfm);
+
+- memcpy(ctx->kb.key, in_key, key_len);
+- ctx->kb.keylen = key_len;
++ _free_kb_keybuf(&ctx->kb);
++ rc = _copy_key_to_kb(&ctx->kb, in_key, key_len);
++ if (rc)
++ return rc;
++
+ if (__ctr_paes_set_key(ctx)) {
+ tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ return -EINVAL;
+@@ -541,10 +675,12 @@ static struct crypto_alg ctr_paes_alg =
+ .cra_type = &crypto_blkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_list = LIST_HEAD_INIT(ctr_paes_alg.cra_list),
++ .cra_init = ctr_paes_init,
++ .cra_exit = ctr_paes_exit,
+ .cra_u = {
+ .blkcipher = {
+- .min_keysize = MINKEYBLOBSIZE,
+- .max_keysize = MAXKEYBLOBSIZE,
++ .min_keysize = PAES_MIN_KEYSIZE,
++ .max_keysize = PAES_MAX_KEYSIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = ctr_paes_set_key,
+ .encrypt = ctr_paes_encrypt,
diff --git a/patches.suse/s390-pkey-add-cca-aes-cipher-key-support b/patches.suse/s390-pkey-add-cca-aes-cipher-key-support
new file mode 100644
index 0000000000..242df9d1ba
--- /dev/null
+++ b/patches.suse/s390-pkey-add-cca-aes-cipher-key-support
@@ -0,0 +1,1366 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Wed, 19 Jun 2019 14:26:05 +0200
+Subject: s390/pkey: add CCA AES cipher key support
+Git-commit: f2bbc96e7cfad3891b7bf9bd3e566b9b7ab4553d
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+Introduce new ioctls and structs to be used with these new ioctls
+which are able to handle CCA AES secure keys and CCA AES cipher keys:
+
+PKEY_GENSECK2: Generate secure key, version 2.
+ Generate either a CCA AES secure key or a CCA AES cipher key.
+
+PKEY_CLR2SECK2: Generate secure key from clear key value, version 2.
+ Construct a CCA AES secure key or CCA AES cipher key from a given
+ clear key value.
+
+PKEY_VERIFYKEY2: Verify the given secure key, version 2.
+ Check for correct key type. If cardnr and domain are given, also
+ check if this apqn is able to handle this type of key. If cardnr and
+ domain are 0xFFFF, on return these values are filled with an apqn
+ able to handle this key. The function also checks for the master key
+ verification patterns of the key matching to the current or
+ alternate mkvp of the apqn. CCA AES cipher keys are also checked
+ for CPACF export allowed (CPRTCPAC flag). Currently CCA AES secure
+ keys and CCA AES cipher keys are supported (may get extended in the
+ future).
+
+PKEY_KBLOB2PROTK2: Transform a key blob (of any type) into
+ a protected key, version 2. Difference to version 1 is only that
+ this new ioctl has additional parameters to provide a list of
+ apqns to be used for the transformation.
+
+PKEY_APQNS4K: Generate a list of APQNs based on the key blob given.
+ Is able to find out which type of secure key is given (CCA AES
+ secure key or CCA AES cipher key) and tries to find all matching
+ crypto cards based on the MKVP and maybe other criterias (like CCA
+ AES cipher keys need a CEX6C or higher). The list of APQNs is
+ further filtered by the key's mkvp which needs to match to either
+ the current mkvp or the alternate mkvp (which is the old mkvp on CCA
+ adapters) of the apqns. The flags argument may be used to limit the
+ matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the
+ current mkvp of each apqn is compared. Likewise with the
+ PKEY_FLAGS_MATCH_ALT_MKVP. If both are given it is assumed to return
+ apqns where either the current or the alternate mkvp matches. If no
+ matching APQN is found, the ioctl returns with 0 but the
+ apqn_entries value is 0.
+
+PKEY_APQNS4KT: Generate a list of APQNs based on the key type given.
+ Build a list of APQNs based on the given key type and maybe further
+ restrict the list by given master key verification patterns.
+ For different key types there may be different ways to match the
+ master key verification patterns. For CCA keys (CCA data key and CCA
+ cipher key) the first 8 bytes of cur_mkvp refer to the current mkvp
+ value of the apqn and the first 8 bytes of the alt_mkvp refer to the
+ old mkvp. The flags argument controls if the apqns current and/or
+ alternate mkvp should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is
+ given, only the current mkvp of each apqn is compared. Likewise with
+ the PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to
+ return apqns where either the current or the alternate mkvp
+ matches. If no matching APQN is found, the ioctl returns with 0 but
+ the apqn_entries value is 0.
+
+These new ioctls are now prepared for another new type of secure key
+blob which may come in the future. They all use a pointer to the key
+blob and a key blob length information instead of some hardcoded byte
+array. They all use the new enums pkey_key_type, pkey_key_size and
+pkey_key_info for getting/setting key type, key size and additional
+info about the key. All but the PKEY_VERIFY2 ioctl now work based on a
+list of apqns. This list is walked through trying to perform the
+operation on exactly this apqn without any further checking (like card
+type or online state). If the apqn fails, simple the next one in the
+list is tried until success (return 0) or the end of the list is
+reached (return -1 with errno ENODEV). All apqns in the list need to
+be exact apqns (0xFFFF as any card or domain is not allowed). There
+are two new ioctls which can be used to build a list of apqns based on
+a key or key type and maybe restricted by match to a current or
+alternate master key verifcation pattern.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/uapi/asm/pkey.h | 259 ++++++++++++-
+ drivers/s390/crypto/pkey_api.c | 652 +++++++++++++++++++++++++++++++++--
+ drivers/s390/crypto/zcrypt_ccamisc.c | 58 +--
+ drivers/s390/crypto/zcrypt_ccamisc.h | 8
+ 4 files changed, 900 insertions(+), 77 deletions(-)
+
+--- a/arch/s390/include/uapi/asm/pkey.h
++++ b/arch/s390/include/uapi/asm/pkey.h
+@@ -2,7 +2,7 @@
+ /*
+ * Userspace interface to the pkey device driver
+ *
+- * Copyright IBM Corp. 2017
++ * Copyright IBM Corp. 2017, 2019
+ *
+ * Author: Harald Freudenberger <freude@de.ibm.com>
+ *
+@@ -20,38 +20,74 @@
+
+ #define PKEY_IOCTL_MAGIC 'p'
+
+-#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
+-#define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */
+-#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
+-#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
+-
+-#define MINKEYBLOBSIZE SECKEYBLOBSIZE /* Minimum size of a key blob */
+-#define MAXKEYBLOBSIZE PROTKEYBLOBSIZE /* Maximum size of a key blob */
++#define SECKEYBLOBSIZE 64 /* secure key blob size is always 64 bytes */
++#define PROTKEYBLOBSIZE 80 /* protected key blob size is always 80 bytes */
++#define MAXPROTKEYSIZE 64 /* a protected key blob may be up to 64 bytes */
++#define MAXCLRKEYSIZE 32 /* a clear key value may be up to 32 bytes */
++#define MAXAESCIPHERKEYSIZE 136 /* our aes cipher keys have always 136 bytes */
++
++/* Minimum and maximum size of a key blob */
++#define MINKEYBLOBSIZE SECKEYBLOBSIZE
++#define MAXKEYBLOBSIZE MAXAESCIPHERKEYSIZE
+
+ /* defines for the type field within the pkey_protkey struct */
+-#define PKEY_KEYTYPE_AES_128 1
+-#define PKEY_KEYTYPE_AES_192 2
+-#define PKEY_KEYTYPE_AES_256 3
++#define PKEY_KEYTYPE_AES_128 1
++#define PKEY_KEYTYPE_AES_192 2
++#define PKEY_KEYTYPE_AES_256 3
++
++/* the newer ioctls use a pkey_key_type enum for type information */
++enum pkey_key_type {
++ PKEY_TYPE_CCA_DATA = (__u32) 1,
++ PKEY_TYPE_CCA_CIPHER = (__u32) 2,
++};
++
++/* the newer ioctls use a pkey_key_size enum for key size information */
++enum pkey_key_size {
++ PKEY_SIZE_AES_128 = (__u32) 128,
++ PKEY_SIZE_AES_192 = (__u32) 192,
++ PKEY_SIZE_AES_256 = (__u32) 256,
++ PKEY_SIZE_UNKNOWN = (__u32) 0xFFFFFFFF,
++};
++
++/* some of the newer ioctls use these flags */
++#define PKEY_FLAGS_MATCH_CUR_MKVP 0x00000002
++#define PKEY_FLAGS_MATCH_ALT_MKVP 0x00000004
++
++/* keygenflags defines for CCA AES cipher keys */
++#define PKEY_KEYGEN_XPRT_SYM 0x00008000
++#define PKEY_KEYGEN_XPRT_UASY 0x00004000
++#define PKEY_KEYGEN_XPRT_AASY 0x00002000
++#define PKEY_KEYGEN_XPRT_RAW 0x00001000
++#define PKEY_KEYGEN_XPRT_CPAC 0x00000800
++#define PKEY_KEYGEN_XPRT_DES 0x00000080
++#define PKEY_KEYGEN_XPRT_AES 0x00000040
++#define PKEY_KEYGEN_XPRT_RSA 0x00000008
++
++/* Struct to hold apqn target info (card/domain pair) */
++struct pkey_apqn {
++ __u16 card;
++ __u16 domain;
++};
+
+-/* Struct to hold a secure key blob */
++/* Struct to hold a CCA AES secure key blob */
+ struct pkey_seckey {
+ __u8 seckey[SECKEYBLOBSIZE]; /* the secure key blob */
+ };
+
+ /* Struct to hold protected key and length info */
+ struct pkey_protkey {
+- __u32 type; /* key type, one of the PKEY_KEYTYPE values */
++ __u32 type; /* key type, one of the PKEY_KEYTYPE_AES values */
+ __u32 len; /* bytes actually stored in protkey[] */
+ __u8 protkey[MAXPROTKEYSIZE]; /* the protected key blob */
+ };
+
+-/* Struct to hold a clear key value */
++/* Struct to hold an AES clear key value */
+ struct pkey_clrkey {
+ __u8 clrkey[MAXCLRKEYSIZE]; /* 16, 24, or 32 byte clear key value */
+ };
+
+ /*
+- * Generate secure key
++ * Generate CCA AES secure key.
+ */
+ struct pkey_genseck {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+@@ -62,7 +98,7 @@ struct pkey_genseck {
+ #define PKEY_GENSECK _IOWR(PKEY_IOCTL_MAGIC, 0x01, struct pkey_genseck)
+
+ /*
+- * Construct secure key from clear key value
++ * Construct CCA AES secure key from clear key value
+ */
+ struct pkey_clr2seck {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+@@ -74,7 +110,7 @@ struct pkey_clr2seck {
+ #define PKEY_CLR2SECK _IOWR(PKEY_IOCTL_MAGIC, 0x02, struct pkey_clr2seck)
+
+ /*
+- * Fabricate protected key from a secure key
++ * Fabricate AES protected key from a CCA AES secure key
+ */
+ struct pkey_sec2protk {
+ __u16 cardnr; /* in: card to use or FFFF for any */
+@@ -85,7 +121,7 @@ struct pkey_sec2protk {
+ #define PKEY_SEC2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x03, struct pkey_sec2protk)
+
+ /*
+- * Fabricate protected key from an clear key value
++ * Fabricate AES protected key from clear key value
+ */
+ struct pkey_clr2protk {
+ __u32 keytype; /* in: key type to generate */
+@@ -96,7 +132,7 @@ struct pkey_clr2protk {
+
+ /*
+ * Search for matching crypto card based on the Master Key
+- * Verification Pattern provided inside a secure key.
++ * Verification Pattern provided inside a CCA AES secure key.
+ */
+ struct pkey_findcard {
+ struct pkey_seckey seckey; /* in: the secure key blob */
+@@ -115,7 +151,7 @@ struct pkey_skey2pkey {
+ #define PKEY_SKEY2PKEY _IOWR(PKEY_IOCTL_MAGIC, 0x06, struct pkey_skey2pkey)
+
+ /*
+- * Verify the given secure key for being able to be useable with
++ * Verify the given CCA AES secure key for being able to be useable with
+ * the pkey module. Check for correct key type and check for having at
+ * least one crypto card being able to handle this key (master key
+ * or old master key verification pattern matches).
+@@ -134,7 +170,7 @@ struct pkey_verifykey {
+ #define PKEY_VERIFY_ATTR_OLD_MKVP 0x00000100 /* key has old MKVP value */
+
+ /*
+- * Generate (AES) random protected key.
++ * Generate AES random protected key.
+ */
+ struct pkey_genprotk {
+ __u32 keytype; /* in: key type to generate */
+@@ -144,7 +180,7 @@ struct pkey_genprotk {
+ #define PKEY_GENPROTK _IOWR(PKEY_IOCTL_MAGIC, 0x08, struct pkey_genprotk)
+
+ /*
+- * Verify an (AES) protected key.
++ * Verify an AES protected key.
+ */
+ struct pkey_verifyprotk {
+ struct pkey_protkey protkey; /* in: the protected key to verify */
+@@ -160,7 +196,184 @@ struct pkey_kblob2pkey {
+ __u32 keylen; /* in: the key blob length */
+ struct pkey_protkey protkey; /* out: the protected key */
+ };
+-
+ #define PKEY_KBLOB2PROTK _IOWR(PKEY_IOCTL_MAGIC, 0x0A, struct pkey_kblob2pkey)
+
++/*
++ * Generate secure key, version 2.
++ * Generate either a CCA AES secure key or a CCA AES cipher key.
++ * There needs to be a list of apqns given with at least one entry in there.
++ * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
++ * is not supported. The implementation walks through the list of apqns and
++ * tries to send the request to each apqn without any further checking (like
++ * card type or online state). If the apqn fails, simple the next one in the
++ * list is tried until success (return 0) or the end of the list is reached
++ * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
++ * generate a list of apqns based on the key type to generate.
++ * The keygenflags argument is passed to the low level generation functions
++ * individual for the key type and has a key type specific meaning. Currently
++ * only CCA AES cipher keys react to this parameter: Use one or more of the
++ * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
++ * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
++ */
++struct pkey_genseck2 {
++ struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets*/
++ __u32 apqn_entries; /* in: # of apqn target list entries */
++ enum pkey_key_type type; /* in: key type to generate */
++ enum pkey_key_size size; /* in: key size to generate */
++ __u32 keygenflags; /* in: key generation flags */
++ __u8 __user *key; /* in: pointer to key blob buffer */
++ __u32 keylen; /* in: available key blob buffer size */
++ /* out: actual key blob size */
++};
++#define PKEY_GENSECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x11, struct pkey_genseck2)
++
++/*
++ * Generate secure key from clear key value, version 2.
++ * Construct a CCA AES secure key or CCA AES cipher key from a given clear key
++ * value.
++ * There needs to be a list of apqns given with at least one entry in there.
++ * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
++ * is not supported. The implementation walks through the list of apqns and
++ * tries to send the request to each apqn without any further checking (like
++ * card type or online state). If the apqn fails, simple the next one in the
++ * list is tried until success (return 0) or the end of the list is reached
++ * (return -1 with errno ENODEV). You may use the PKEY_APQNS4KT ioctl to
++ * generate a list of apqns based on the key type to generate.
++ * The keygenflags argument is passed to the low level generation functions
++ * individual for the key type and has a key type specific meaning. Currently
++ * only CCA AES cipher keys react to this parameter: Use one or more of the
++ * PKEY_KEYGEN_* flags to widen the export possibilities. By default a cipher
++ * key is only exportable for CPACF (PKEY_KEYGEN_XPRT_CPAC).
++ */
++struct pkey_clr2seck2 {
++ struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
++ __u32 apqn_entries; /* in: # of apqn target list entries */
++ enum pkey_key_type type; /* in: key type to generate */
++ enum pkey_key_size size; /* in: key size to generate */
++ __u32 keygenflags; /* in: key generation flags */
++ struct pkey_clrkey clrkey; /* in: the clear key value */
++ __u8 __user *key; /* in: pointer to key blob buffer */
++ __u32 keylen; /* in: available key blob buffer size */
++ /* out: actual key blob size */
++};
++#define PKEY_CLR2SECK2 _IOWR(PKEY_IOCTL_MAGIC, 0x12, struct pkey_clr2seck2)
++
++/*
++ * Verify the given secure key, version 2.
++ * Check for correct key type. If cardnr and domain are given (are not
++ * 0xFFFF) also check if this apqn is able to handle this type of key.
++ * If cardnr and/or domain is 0xFFFF, on return these values are filled
++ * with one apqn able to handle this key.
++ * The function also checks for the master key verification patterns
++ * of the key matching to the current or alternate mkvp of the apqn.
++ * Currently CCA AES secure keys and CCA AES cipher keys are supported.
++ * The flags field is updated with some additional info about the apqn mkvp
++ * match: If the current mkvp matches to the key's mkvp then the
++ * PKEY_FLAGS_MATCH_CUR_MKVP bit is set, if the alternate mkvp matches to
++ * the key's mkvp the PKEY_FLAGS_MATCH_ALT_MKVP is set. For CCA keys the
++ * alternate mkvp is the old master key verification pattern.
++ * CCA AES secure keys are also checked to have the CPACF export allowed
++ * bit enabled (XPRTCPAC) in the kmf1 field.
++ * The ioctl returns 0 as long as the given or found apqn matches to
++ * matches with the current or alternate mkvp to the key's mkvp. If the given
++ * apqn does not match or there is no such apqn found, -1 with errno
++ * ENODEV is returned.
++ */
++struct pkey_verifykey2 {
++ __u8 __user *key; /* in: pointer to key blob */
++ __u32 keylen; /* in: key blob size */
++ __u16 cardnr; /* in/out: card number */
++ __u16 domain; /* in/out: domain number */
++ enum pkey_key_type type; /* out: the key type */
++ enum pkey_key_size size; /* out: the key size */
++ __u32 flags; /* out: additional key info flags */
++};
++#define PKEY_VERIFYKEY2 _IOWR(PKEY_IOCTL_MAGIC, 0x17, struct pkey_verifykey2)
++
++/*
++ * Transform a key blob (of any type) into a protected key, version 2.
++ * There needs to be a list of apqns given with at least one entry in there.
++ * All apqns in the list need to be exact apqns, 0xFFFF as ANY card or domain
++ * is not supported. The implementation walks through the list of apqns and
++ * tries to send the request to each apqn without any further checking (like
++ * card type or online state). If the apqn fails, simple the next one in the
++ * list is tried until success (return 0) or the end of the list is reached
++ * (return -1 with errno ENODEV). You may use the PKEY_APQNS4K ioctl to
++ * generate a list of apqns based on the key.
++ */
++struct pkey_kblob2pkey2 {
++ __u8 __user *key; /* in: pointer to key blob */
++ __u32 keylen; /* in: key blob size */
++ struct pkey_apqn __user *apqns; /* in: ptr to list of apqn targets */
++ __u32 apqn_entries; /* in: # of apqn target list entries */
++ struct pkey_protkey protkey; /* out: the protected key */
++};
++#define PKEY_KBLOB2PROTK2 _IOWR(PKEY_IOCTL_MAGIC, 0x1A, struct pkey_kblob2pkey2)
++
++/*
++ * Build a list of APQNs based on a key blob given.
++ * Is able to find out which type of secure key is given (CCA AES secure
++ * key or CCA AES cipher key) and tries to find all matching crypto cards
++ * based on the MKVP and maybe other criterias (like CCA AES cipher keys
++ * need a CEX5C or higher). The list of APQNs is further filtered by the key's
++ * mkvp which needs to match to either the current mkvp or the alternate mkvp
++ * (which is the old mkvp on CCA adapters) of the apqns. The flags argument may
++ * be used to limit the matching apqns. If the PKEY_FLAGS_MATCH_CUR_MKVP is
++ * given, only the current mkvp of each apqn is compared. Likewise with the
++ * PKEY_FLAGS_MATCH_ALT_MKVP. If both are given, it is assumed to
++ * return apqns where either the current or the alternate mkvp
++ * matches. At least one of the matching flags needs to be given.
++ * The list of matching apqns is stored into the space given by the apqns
++ * argument and the number of stored entries goes into apqn_entries. If the list
++ * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
++ * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
++ * but the number of apqn targets does not fit into the list, the apqn_targets
++ * field is updatedd with the number of reqired entries but there are no apqn
++ * values stored in the list and the ioctl returns with ENOSPC. If no matching
++ * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
++ */
++struct pkey_apqns4key {
++ __u8 __user *key; /* in: pointer to key blob */
++ __u32 keylen; /* in: key blob size */
++ __u32 flags; /* in: match controlling flags */
++ struct pkey_apqn __user *apqns; /* in/out: ptr to list of apqn targets*/
++ __u32 apqn_entries; /* in: max # of apqn entries in the list */
++ /* out: # apqns stored into the list */
++};
++#define PKEY_APQNS4K _IOWR(PKEY_IOCTL_MAGIC, 0x1B, struct pkey_apqns4key)
++
++/*
++ * Build a list of APQNs based on a key type given.
++ * Build a list of APQNs based on a given key type and maybe further
++ * restrict the list by given master key verification patterns.
++ * For different key types there may be different ways to match the
++ * master key verification patterns. For CCA keys (CCA data key and CCA
++ * cipher key) the first 8 bytes of cur_mkvp refer to the current mkvp value
++ * of the apqn and the first 8 bytes of the alt_mkvp refer to the old mkvp.
++ * The flags argument controls if the apqns current and/or alternate mkvp
++ * should match. If the PKEY_FLAGS_MATCH_CUR_MKVP is given, only the current
++ * mkvp of each apqn is compared. Likewise with the PKEY_FLAGS_MATCH_ALT_MKVP.
++ * If both are given, it is assumed to return apqns where either the
++ * current or the alternate mkvp matches. If no match flag is given
++ * (flags is 0) the mkvp values are ignored for the match process.
++ * The list of matching apqns is stored into the space given by the apqns
++ * argument and the number of stored entries goes into apqn_entries. If the list
++ * is empty (apqn_entries is 0) the apqn_entries field is updated to the number
++ * of apqn targets found and the ioctl returns with 0. If apqn_entries is > 0
++ * but the number of apqn targets does not fit into the list, the apqn_targets
++ * field is updatedd with the number of reqired entries but there are no apqn
++ * values stored in the list and the ioctl returns with ENOSPC. If no matching
++ * APQN is found, the ioctl returns with 0 but the apqn_entries value is 0.
++ */
++struct pkey_apqns4keytype {
++ enum pkey_key_type type; /* in: key type */
++ __u8 cur_mkvp[32]; /* in: current mkvp */
++ __u8 alt_mkvp[32]; /* in: alternate mkvp */
++ __u32 flags; /* in: match controlling flags */
++ struct pkey_apqn __user *apqns; /* in/out: ptr to list of apqn targets*/
++ __u32 apqn_entries; /* in: max # of apqn entries in the list */
++ /* out: # apqns stored into the list */
++};
++#define PKEY_APQNS4KT _IOWR(PKEY_IOCTL_MAGIC, 0x1C, struct pkey_apqns4keytype)
++
+ #endif /* _UAPI_PKEY_H */
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -30,6 +30,9 @@ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("IBM Corporation");
+ MODULE_DESCRIPTION("s390 protected key interface");
+
++#define KEYBLOBBUFSIZE 8192 /* key buffer size used for internal processing */
++#define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */
++
+ /* mask of available pckmo subfunctions, fetched once at module init */
+ static cpacf_mask_t pckmo_functions;
+
+@@ -126,27 +129,39 @@ static int pkey_clr2protkey(u32 keytype,
+ /*
+ * Find card and transform secure key into protected key.
+ */
+-static int pkey_skey2pkey(const struct pkey_seckey *seckey,
+- struct pkey_protkey *pkey)
++static int pkey_skey2pkey(const u8 *key, struct pkey_protkey *pkey)
+ {
+- u16 cardnr, domain;
+ int rc, verify;
++ u16 cardnr, domain;
++ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+ /*
+- * The cca_sec2protkey call may fail when a card has been
++ * The cca_xxx2protkey call may fail when a card has been
+ * addressed where the master key was changed after last fetch
+ * of the mkvp into the cache. Try 3 times: First witout verify
+ * then with verify and last round with verify and old master
+ * key verification pattern match not ignored.
+ */
+ for (verify = 0; verify < 3; verify++) {
+- rc = cca_findcard(seckey->seckey, &cardnr, &domain, verify);
++ rc = cca_findcard(key, &cardnr, &domain, verify);
+ if (rc < 0)
+ continue;
+ if (rc > 0 && verify < 2)
+ continue;
+- rc = cca_sec2protkey(cardnr, domain, seckey->seckey,
+- pkey->protkey, &pkey->len, &pkey->type);
++ switch (hdr->version) {
++ case TOKVER_CCA_AES:
++ rc = cca_sec2protkey(cardnr, domain,
++ key, pkey->protkey,
++ &pkey->len, &pkey->type);
++ break;
++ case TOKVER_CCA_VLSC:
++ rc = cca_cipher2protkey(cardnr, domain,
++ key, pkey->protkey,
++ &pkey->len, &pkey->type);
++ break;
++ default:
++ return -EINVAL;
++ }
+ if (rc == 0)
+ break;
+ }
+@@ -324,14 +339,18 @@ static int pkey_ccainttok2pkey(const u8
+ case TOKVER_CCA_AES:
+ if (keylen != sizeof(struct secaeskeytoken))
+ return -EINVAL;
+-
+- return pkey_skey2pkey((struct pkey_seckey *)key,
+- protkey);
++ break;
++ case TOKVER_CCA_VLSC:
++ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
++ return -EINVAL;
++ break;
+ default:
+ DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
+ return -EINVAL;
+ }
++
++ return pkey_skey2pkey(key, protkey);
+ }
+
+ /*
+@@ -340,28 +359,394 @@ static int pkey_ccainttok2pkey(const u8
+ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+ {
++ int rc;
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+
+- if (keylen < sizeof(struct keytoken_header))
++ if (keylen < sizeof(struct keytoken_header)) {
++ DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
+ return -EINVAL;
++ }
+
+ switch (hdr->type) {
+ case TOKTYPE_NON_CCA:
+- return pkey_nonccatok2pkey(key, keylen, protkey);
++ rc = pkey_nonccatok2pkey(key, keylen, protkey);
++ break;
+ case TOKTYPE_CCA_INTERNAL:
+- return pkey_ccainttok2pkey(key, keylen, protkey);
++ rc = pkey_ccainttok2pkey(key, keylen, protkey);
++ break;
+ default:
+- DEBUG_ERR("%s unknown/unsupported blob type %d\n", __func__,
+- hdr->type);
++ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
++ __func__, hdr->type);
+ return -EINVAL;
+ }
++
++ DEBUG_DBG("%s rc=%d\n", __func__, rc);
++ return rc;
++
+ }
+ EXPORT_SYMBOL(pkey_keyblob2pkey);
+
++static int pkey_genseckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
++ enum pkey_key_type ktype, enum pkey_key_size ksize,
++ u32 kflags, u8 *keybuf, size_t *keybufsize)
++{
++ int i, card, dom, rc;
++
++ /* check for at least one apqn given */
++ if (!apqns || !nr_apqns)
++ return -EINVAL;
++
++ /* check key type and size */
++ switch (ktype) {
++ case PKEY_TYPE_CCA_DATA:
++ case PKEY_TYPE_CCA_CIPHER:
++ if (*keybufsize < SECKEYBLOBSIZE)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ switch (ksize) {
++ case PKEY_SIZE_AES_128:
++ case PKEY_SIZE_AES_192:
++ case PKEY_SIZE_AES_256:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* simple try all apqns from the list */
++ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
++ card = apqns[i].card;
++ dom = apqns[i].domain;
++ if (ktype == PKEY_TYPE_CCA_DATA) {
++ rc = cca_genseckey(card, dom, ksize, keybuf);
++ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
++ } else /* TOKVER_CCA_VLSC */
++ rc = cca_gencipherkey(card, dom, ksize, kflags,
++ keybuf, keybufsize);
++ if (rc == 0)
++ break;
++ }
++
++ return rc;
++}
++
++static int pkey_clr2seckey2(const struct pkey_apqn *apqns, size_t nr_apqns,
++ enum pkey_key_type ktype, enum pkey_key_size ksize,
++ u32 kflags, const u8 *clrkey,
++ u8 *keybuf, size_t *keybufsize)
++{
++ int i, card, dom, rc;
++
++ /* check for at least one apqn given */
++ if (!apqns || !nr_apqns)
++ return -EINVAL;
++
++ /* check key type and size */
++ switch (ktype) {
++ case PKEY_TYPE_CCA_DATA:
++ case PKEY_TYPE_CCA_CIPHER:
++ if (*keybufsize < SECKEYBLOBSIZE)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ switch (ksize) {
++ case PKEY_SIZE_AES_128:
++ case PKEY_SIZE_AES_192:
++ case PKEY_SIZE_AES_256:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ /* simple try all apqns from the list */
++ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
++ card = apqns[i].card;
++ dom = apqns[i].domain;
++ if (ktype == PKEY_TYPE_CCA_DATA) {
++ rc = cca_clr2seckey(card, dom, ksize,
++ clrkey, keybuf);
++ *keybufsize = (rc ? 0 : SECKEYBLOBSIZE);
++ } else /* TOKVER_CCA_VLSC */
++ rc = cca_clr2cipherkey(card, dom, ksize, kflags,
++ clrkey, keybuf, keybufsize);
++ if (rc == 0)
++ break;
++ }
++
++ return rc;
++}
++
++static int pkey_verifykey2(const u8 *key, size_t keylen,
++ u16 *cardnr, u16 *domain,
++ enum pkey_key_type *ktype,
++ enum pkey_key_size *ksize, u32 *flags)
++{
++ int rc;
++ u32 _nr_apqns, *_apqns = NULL;
++ struct keytoken_header *hdr = (struct keytoken_header *)key;
++
++ if (keylen < sizeof(struct keytoken_header) ||
++ hdr->type != TOKTYPE_CCA_INTERNAL)
++ return -EINVAL;
++
++ if (hdr->version == TOKVER_CCA_AES) {
++ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
++
++ rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
++ if (rc)
++ goto out;
++ if (ktype)
++ *ktype = PKEY_TYPE_CCA_DATA;
++ if (ksize)
++ *ksize = (enum pkey_key_size) t->bitsize;
++
++ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
++ ZCRYPT_CEX3C, t->mkvp, 0, 1);
++ if (rc == 0 && flags)
++ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
++ if (rc == -ENODEV) {
++ rc = cca_findcard2(&_apqns, &_nr_apqns,
++ *cardnr, *domain,
++ ZCRYPT_CEX3C, 0, t->mkvp, 1);
++ if (rc == 0 && flags)
++ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
++ }
++ if (rc)
++ goto out;
++
++ *cardnr = ((struct pkey_apqn *)_apqns)->card;
++ *domain = ((struct pkey_apqn *)_apqns)->domain;
++
++ } else if (hdr->version == TOKVER_CCA_VLSC) {
++ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
++
++ rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
++ if (rc)
++ goto out;
++ if (ktype)
++ *ktype = PKEY_TYPE_CCA_CIPHER;
++ if (ksize) {
++ *ksize = PKEY_SIZE_UNKNOWN;
++ if (!t->plfver && t->wpllen == 512)
++ *ksize = PKEY_SIZE_AES_128;
++ else if (!t->plfver && t->wpllen == 576)
++ *ksize = PKEY_SIZE_AES_192;
++ else if (!t->plfver && t->wpllen == 640)
++ *ksize = PKEY_SIZE_AES_256;
++ }
++
++ rc = cca_findcard2(&_apqns, &_nr_apqns, *cardnr, *domain,
++ ZCRYPT_CEX6, t->mkvp0, 0, 1);
++ if (rc == 0 && flags)
++ *flags = PKEY_FLAGS_MATCH_CUR_MKVP;
++ if (rc == -ENODEV) {
++ rc = cca_findcard2(&_apqns, &_nr_apqns,
++ *cardnr, *domain,
++ ZCRYPT_CEX6, 0, t->mkvp0, 1);
++ if (rc == 0 && flags)
++ *flags = PKEY_FLAGS_MATCH_ALT_MKVP;
++ }
++ if (rc)
++ goto out;
++
++ *cardnr = ((struct pkey_apqn *)_apqns)->card;
++ *domain = ((struct pkey_apqn *)_apqns)->domain;
++
++ } else
++ rc = -EINVAL;
++
++out:
++ kfree(_apqns);
++ return rc;
++}
++
++static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
++ const u8 *key, size_t keylen,
++ struct pkey_protkey *pkey)
++{
++ int i, card, dom, rc;
++ struct keytoken_header *hdr = (struct keytoken_header *)key;
++
++ /* check for at least one apqn given */
++ if (!apqns || !nr_apqns)
++ return -EINVAL;
++
++ if (keylen < sizeof(struct keytoken_header))
++ return -EINVAL;
++
++ switch (hdr->type) {
++ case TOKTYPE_NON_CCA:
++ return pkey_nonccatok2pkey(key, keylen, pkey);
++ case TOKTYPE_CCA_INTERNAL:
++ switch (hdr->version) {
++ case TOKVER_CCA_AES:
++ if (keylen != sizeof(struct secaeskeytoken))
++ return -EINVAL;
++ if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
++ return -EINVAL;
++ break;
++ case TOKVER_CCA_VLSC:
++ if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
++ return -EINVAL;
++ if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
++ return -EINVAL;
++ break;
++ default:
++ DEBUG_ERR("%s unknown CCA internal token version %d\n",
++ __func__, hdr->version);
++ return -EINVAL;
++ }
++ break;
++ default:
++ DEBUG_ERR("%s unknown/unsupported blob type %d\n",
++ __func__, hdr->type);
++ return -EINVAL;
++ }
++
++ /* simple try all apqns from the list */
++ for (i = 0, rc = -ENODEV; i < nr_apqns; i++) {
++ card = apqns[i].card;
++ dom = apqns[i].domain;
++ if (hdr->version == TOKVER_CCA_AES)
++ rc = cca_sec2protkey(card, dom, key, pkey->protkey,
++ &pkey->len, &pkey->type);
++ else /* TOKVER_CCA_VLSC */
++ rc = cca_cipher2protkey(card, dom, key, pkey->protkey,
++ &pkey->len, &pkey->type);
++ if (rc == 0)
++ break;
++ }
++
++ return rc;
++}
++
++static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags,
++ struct pkey_apqn *apqns, size_t *nr_apqns)
++{
++ int rc = EINVAL;
++ u32 _nr_apqns, *_apqns = NULL;
++ struct keytoken_header *hdr = (struct keytoken_header *)key;
++
++ if (keylen < sizeof(struct keytoken_header) ||
++ hdr->type != TOKTYPE_CCA_INTERNAL ||
++ flags == 0)
++ return -EINVAL;
++
++ if (hdr->version == TOKVER_CCA_AES || hdr->version == TOKVER_CCA_VLSC) {
++ int minhwtype = ZCRYPT_CEX3C;
++ u64 cur_mkvp = 0, old_mkvp = 0;
++
++ if (hdr->version == TOKVER_CCA_AES) {
++ struct secaeskeytoken *t = (struct secaeskeytoken *)key;
++
++ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
++ cur_mkvp = t->mkvp;
++ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
++ old_mkvp = t->mkvp;
++ } else {
++ struct cipherkeytoken *t = (struct cipherkeytoken *)key;
++
++ minhwtype = ZCRYPT_CEX6;
++ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
++ cur_mkvp = t->mkvp0;
++ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
++ old_mkvp = t->mkvp0;
++ }
++ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
++ minhwtype, cur_mkvp, old_mkvp, 1);
++ if (rc)
++ goto out;
++ if (apqns) {
++ if (*nr_apqns < _nr_apqns)
++ rc = -ENOSPC;
++ else
++ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
++ }
++ *nr_apqns = _nr_apqns;
++ }
++
++out:
++ kfree(_apqns);
++ return rc;
++}
++
++static int pkey_apqns4keytype(enum pkey_key_type ktype,
++ u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags,
++ struct pkey_apqn *apqns, size_t *nr_apqns)
++{
++ int rc = -EINVAL;
++ u32 _nr_apqns, *_apqns = NULL;
++
++ if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) {
++ u64 cur_mkvp = 0, old_mkvp = 0;
++ int minhwtype = ZCRYPT_CEX3C;
++
++ if (flags & PKEY_FLAGS_MATCH_CUR_MKVP)
++ cur_mkvp = *((u64 *) cur_mkvp);
++ if (flags & PKEY_FLAGS_MATCH_ALT_MKVP)
++ old_mkvp = *((u64 *) alt_mkvp);
++ if (ktype == PKEY_TYPE_CCA_CIPHER)
++ minhwtype = ZCRYPT_CEX6;
++ rc = cca_findcard2(&_apqns, &_nr_apqns, 0xFFFF, 0xFFFF,
++ minhwtype, cur_mkvp, old_mkvp, 1);
++ if (rc)
++ goto out;
++ if (apqns) {
++ if (*nr_apqns < _nr_apqns)
++ rc = -ENOSPC;
++ else
++ memcpy(apqns, _apqns, _nr_apqns * sizeof(u32));
++ }
++ *nr_apqns = _nr_apqns;
++ }
++
++out:
++ kfree(_apqns);
++ return rc;
++}
++
+ /*
+ * File io functions
+ */
+
++static void *_copy_key_from_user(void __user *ukey, size_t keylen)
++{
++ void *kkey;
++
++ if (!ukey || keylen < MINKEYBLOBSIZE || keylen > KEYBLOBBUFSIZE)
++ return ERR_PTR(-EINVAL);
++ kkey = kmalloc(keylen, GFP_KERNEL);
++ if (!kkey)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(kkey, ukey, keylen)) {
++ kfree(kkey);
++ return ERR_PTR(-EFAULT);
++ }
++
++ return kkey;
++}
++
++static void *_copy_apqns_from_user(void __user *uapqns, size_t nr_apqns)
++{
++ void *kapqns = NULL;
++ size_t nbytes;
++
++ if (uapqns && nr_apqns > 0) {
++ nbytes = nr_apqns * sizeof(struct pkey_apqn);
++ kapqns = kmalloc(nbytes, GFP_KERNEL);
++ if (!kapqns)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(kapqns, uapqns, nbytes))
++ return ERR_PTR(-EFAULT);
++ }
++
++ return kapqns;
++}
++
+ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+ {
+@@ -452,7 +837,7 @@ static long pkey_unlocked_ioctl(struct f
+
+ if (copy_from_user(&ksp, usp, sizeof(ksp)))
+ return -EFAULT;
+- rc = pkey_skey2pkey(&ksp.seckey, &ksp.protkey);
++ rc = pkey_skey2pkey(ksp.seckey.seckey, &ksp.protkey);
+ DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+ if (rc)
+ break;
+@@ -502,24 +887,148 @@ static long pkey_unlocked_ioctl(struct f
+ case PKEY_KBLOB2PROTK: {
+ struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey ktp;
+- u8 __user *ukey;
+ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
+- if (ktp.keylen < MINKEYBLOBSIZE ||
+- ktp.keylen > MAXKEYBLOBSIZE)
+- return -EINVAL;
+- ukey = ktp.key;
+- kkey = kmalloc(ktp.keylen, GFP_KERNEL);
+- if (kkey == NULL)
++ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
++ if (IS_ERR(kkey))
++ return PTR_ERR(kkey);
++ rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
++ DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
++ kfree(kkey);
++ if (rc)
++ break;
++ if (copy_to_user(utp, &ktp, sizeof(ktp)))
++ return -EFAULT;
++ break;
++ }
++ case PKEY_GENSECK2: {
++ struct pkey_genseck2 __user *ugs = (void __user *) arg;
++ struct pkey_genseck2 kgs;
++ struct pkey_apqn *apqns;
++ size_t klen = KEYBLOBBUFSIZE;
++ u8 *kkey;
++
++ if (copy_from_user(&kgs, ugs, sizeof(kgs)))
++ return -EFAULT;
++ apqns = _copy_apqns_from_user(kgs.apqns, kgs.apqn_entries);
++ if (IS_ERR(apqns))
++ return PTR_ERR(apqns);
++ kkey = kmalloc(klen, GFP_KERNEL);
++ if (!kkey) {
++ kfree(apqns);
++ return -ENOMEM;
++ }
++ rc = pkey_genseckey2(apqns, kgs.apqn_entries,
++ kgs.type, kgs.size, kgs.keygenflags,
++ kkey, &klen);
++ DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
++ kfree(apqns);
++ if (rc) {
++ kfree(kkey);
++ break;
++ }
++ if (kgs.key) {
++ if (kgs.keylen < klen) {
++ kfree(kkey);
++ return -EINVAL;
++ }
++ if (copy_to_user(kgs.key, kkey, klen)) {
++ kfree(kkey);
++ return -EFAULT;
++ }
++ }
++ kgs.keylen = klen;
++ if (copy_to_user(ugs, &kgs, sizeof(kgs)))
++ rc = -EFAULT;
++ kfree(kkey);
++ break;
++ }
++ case PKEY_CLR2SECK2: {
++ struct pkey_clr2seck2 __user *ucs = (void __user *) arg;
++ struct pkey_clr2seck2 kcs;
++ struct pkey_apqn *apqns;
++ size_t klen = KEYBLOBBUFSIZE;
++ u8 *kkey;
++
++ if (copy_from_user(&kcs, ucs, sizeof(kcs)))
++ return -EFAULT;
++ apqns = _copy_apqns_from_user(kcs.apqns, kcs.apqn_entries);
++ if (IS_ERR(apqns))
++ return PTR_ERR(apqns);
++ kkey = kmalloc(klen, GFP_KERNEL);
++ if (!kkey) {
++ kfree(apqns);
+ return -ENOMEM;
+- if (copy_from_user(kkey, ukey, ktp.keylen)) {
++ }
++ rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
++ kcs.type, kcs.size, kcs.keygenflags,
++ kcs.clrkey.clrkey, kkey, &klen);
++ DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
++ kfree(apqns);
++ if (rc) {
+ kfree(kkey);
++ break;
++ }
++ if (kcs.key) {
++ if (kcs.keylen < klen) {
++ kfree(kkey);
++ return -EINVAL;
++ }
++ if (copy_to_user(kcs.key, kkey, klen)) {
++ kfree(kkey);
++ return -EFAULT;
++ }
++ }
++ kcs.keylen = klen;
++ if (copy_to_user(ucs, &kcs, sizeof(kcs)))
++ rc = -EFAULT;
++ memzero_explicit(&kcs, sizeof(kcs));
++ kfree(kkey);
++ break;
++ }
++ case PKEY_VERIFYKEY2: {
++ struct pkey_verifykey2 __user *uvk = (void __user *) arg;
++ struct pkey_verifykey2 kvk;
++ u8 *kkey;
++
++ if (copy_from_user(&kvk, uvk, sizeof(kvk)))
++ return -EFAULT;
++ kkey = _copy_key_from_user(kvk.key, kvk.keylen);
++ if (IS_ERR(kkey))
++ return PTR_ERR(kkey);
++ rc = pkey_verifykey2(kkey, kvk.keylen,
++ &kvk.cardnr, &kvk.domain,
++ &kvk.type, &kvk.size, &kvk.flags);
++ DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
++ kfree(kkey);
++ if (rc)
++ break;
++ if (copy_to_user(uvk, &kvk, sizeof(kvk)))
++ return -EFAULT;
++ break;
++ }
++ case PKEY_KBLOB2PROTK2: {
++ struct pkey_kblob2pkey2 __user *utp = (void __user *) arg;
++ struct pkey_kblob2pkey2 ktp;
++ struct pkey_apqn *apqns = NULL;
++ u8 *kkey;
++
++ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
++ apqns = _copy_apqns_from_user(ktp.apqns, ktp.apqn_entries);
++ if (IS_ERR(apqns))
++ return PTR_ERR(apqns);
++ kkey = _copy_key_from_user(ktp.key, ktp.keylen);
++ if (IS_ERR(kkey)) {
++ kfree(apqns);
++ return PTR_ERR(kkey);
+ }
+- rc = pkey_keyblob2pkey(kkey, ktp.keylen, &ktp.protkey);
+- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
++ rc = pkey_keyblob2pkey2(apqns, ktp.apqn_entries,
++ kkey, ktp.keylen, &ktp.protkey);
++ DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
++ kfree(apqns);
+ kfree(kkey);
+ if (rc)
+ break;
+@@ -527,6 +1036,97 @@ static long pkey_unlocked_ioctl(struct f
+ return -EFAULT;
+ break;
+ }
++ case PKEY_APQNS4K: {
++ struct pkey_apqns4key __user *uak = (void __user *) arg;
++ struct pkey_apqns4key kak;
++ struct pkey_apqn *apqns = NULL;
++ size_t nr_apqns, len;
++ u8 *kkey;
++
++ if (copy_from_user(&kak, uak, sizeof(kak)))
++ return -EFAULT;
++ nr_apqns = kak.apqn_entries;
++ if (nr_apqns) {
++ apqns = kmalloc_array(nr_apqns,
++ sizeof(struct pkey_apqn),
++ GFP_KERNEL);
++ if (!apqns)
++ return -ENOMEM;
++ }
++ kkey = _copy_key_from_user(kak.key, kak.keylen);
++ if (IS_ERR(kkey)) {
++ kfree(apqns);
++ return PTR_ERR(kkey);
++ }
++ rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
++ apqns, &nr_apqns);
++ DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
++ kfree(kkey);
++ if (rc && rc != -ENOSPC) {
++ kfree(apqns);
++ break;
++ }
++ if (!rc && kak.apqns) {
++ if (nr_apqns > kak.apqn_entries) {
++ kfree(apqns);
++ return -EINVAL;
++ }
++ len = nr_apqns * sizeof(struct pkey_apqn);
++ if (len) {
++ if (copy_to_user(kak.apqns, apqns, len)) {
++ kfree(apqns);
++ return -EFAULT;
++ }
++ }
++ }
++ kak.apqn_entries = nr_apqns;
++ if (copy_to_user(uak, &kak, sizeof(kak)))
++ rc = -EFAULT;
++ kfree(apqns);
++ break;
++ }
++ case PKEY_APQNS4KT: {
++ struct pkey_apqns4keytype __user *uat = (void __user *) arg;
++ struct pkey_apqns4keytype kat;
++ struct pkey_apqn *apqns = NULL;
++ size_t nr_apqns, len;
++
++ if (copy_from_user(&kat, uat, sizeof(kat)))
++ return -EFAULT;
++ nr_apqns = kat.apqn_entries;
++ if (nr_apqns) {
++ apqns = kmalloc_array(nr_apqns,
++ sizeof(struct pkey_apqn),
++ GFP_KERNEL);
++ if (!apqns)
++ return -ENOMEM;
++ }
++ rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
++ kat.flags, apqns, &nr_apqns);
++ DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
++ if (rc && rc != -ENOSPC) {
++ kfree(apqns);
++ break;
++ }
++ if (!rc && kat.apqns) {
++ if (nr_apqns > kat.apqn_entries) {
++ kfree(apqns);
++ return -EINVAL;
++ }
++ len = nr_apqns * sizeof(struct pkey_apqn);
++ if (len) {
++ if (copy_to_user(kat.apqns, apqns, len)) {
++ kfree(apqns);
++ return -EFAULT;
++ }
++ }
++ }
++ kat.apqn_entries = nr_apqns;
++ if (copy_to_user(uat, &kat, sizeof(kat)))
++ rc = -EFAULT;
++ kfree(apqns);
++ break;
++ }
+ default:
+ /* unknown/unsupported ioctl cmd */
+ return -ENOTTY;
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -270,7 +270,7 @@ static inline int _zcrypt_send_cprb(stru
+ * Generate (random) CCA AES DATA secure key.
+ */
+ int cca_genseckey(u16 cardnr, u16 domain,
+- u32 keytype, u8 seckey[SECKEYBLOBSIZE])
++ u32 keybitsize, u8 seckey[SECKEYBLOBSIZE])
+ {
+ int i, rc, keysize;
+ int seckeysize;
+@@ -325,22 +325,25 @@ int cca_genseckey(u16 cardnr, u16 domain
+ preqparm->rule_array_len = sizeof(preqparm->rule_array_len);
+ preqparm->lv1.len = sizeof(struct lv1);
+ memcpy(preqparm->lv1.key_form, "OP ", 8);
+- switch (keytype) {
+- case PKEY_KEYTYPE_AES_128:
++ switch (keybitsize) {
++ case PKEY_SIZE_AES_128:
++ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ memcpy(preqparm->lv1.key_length, "KEYLN16 ", 8);
+ break;
+- case PKEY_KEYTYPE_AES_192:
++ case PKEY_SIZE_AES_192:
++ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ memcpy(preqparm->lv1.key_length, "KEYLN24 ", 8);
+ break;
+- case PKEY_KEYTYPE_AES_256:
++ case PKEY_SIZE_AES_256:
++ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
+ break;
+ default:
+- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+- __func__, keytype);
++ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
++ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -408,7 +411,7 @@ EXPORT_SYMBOL(cca_genseckey);
+ /*
+ * Generate an CCA AES DATA secure key with given key value.
+ */
+-int cca_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
++int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 seckey[SECKEYBLOBSIZE])
+ {
+ int rc, keysize, seckeysize;
+@@ -462,19 +465,22 @@ int cca_clr2seckey(u16 cardnr, u16 domai
+ memcpy(preqparm->rule_array, "AES ", 8);
+ preqparm->rule_array_len =
+ sizeof(preqparm->rule_array_len) + sizeof(preqparm->rule_array);
+- switch (keytype) {
+- case PKEY_KEYTYPE_AES_128:
++ switch (keybitsize) {
++ case PKEY_SIZE_AES_128:
++ case PKEY_KEYTYPE_AES_128: /* older ioctls used this */
+ keysize = 16;
+ break;
+- case PKEY_KEYTYPE_AES_192:
++ case PKEY_SIZE_AES_192:
++ case PKEY_KEYTYPE_AES_192: /* older ioctls used this */
+ keysize = 24;
+ break;
+- case PKEY_KEYTYPE_AES_256:
++ case PKEY_SIZE_AES_256:
++ case PKEY_KEYTYPE_AES_256: /* older ioctls used this */
+ keysize = 32;
+ break;
+ default:
+- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
+- __func__, keytype);
++ DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
++ __func__, keybitsize);
+ rc = -EINVAL;
+ goto out;
+ }
+@@ -545,8 +551,7 @@ EXPORT_SYMBOL(cca_clr2seckey);
+ */
+ int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+- u8 *protkey, u32 *protkeylen,
+- u32 *keytype)
++ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
+ {
+ int rc;
+ u8 *mem;
+@@ -656,21 +661,21 @@ int cca_sec2protkey(u16 cardnr, u16 doma
+ switch (prepparm->lv3.keyblock.len) {
+ case 16+32:
+ /* AES 128 protected key */
+- if (keytype)
+- *keytype = PKEY_KEYTYPE_AES_128;
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_128;
+ break;
+ case 24+32:
+ /* AES 192 protected key */
+- if (keytype)
+- *keytype = PKEY_KEYTYPE_AES_192;
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_192;
+ break;
+ case 32+32:
+ /* AES 256 protected key */
+- if (keytype)
+- *keytype = PKEY_KEYTYPE_AES_256;
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_256;
+ break;
+ default:
+- DEBUG_ERR("%s unknown/unsupported keytype %d\n",
++ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.keyblock.len);
+ rc = -EIO;
+ goto out;
+@@ -1645,6 +1650,7 @@ static int findcard(u64 mkvp, u16 *pcard
+ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
+ {
+ u64 mkvp;
++ int minhwtype = 0;
+ const struct keytoken_header *hdr = (struct keytoken_header *) key;
+
+ if (hdr->type != TOKTYPE_CCA_INTERNAL)
+@@ -1654,11 +1660,15 @@ int cca_findcard(const u8 *key, u16 *pca
+ case TOKVER_CCA_AES:
+ mkvp = ((struct secaeskeytoken *)key)->mkvp;
+ break;
++ case TOKVER_CCA_VLSC:
++ mkvp = ((struct cipherkeytoken *)key)->mkvp0;
++ minhwtype = AP_DEVICE_TYPE_CEX6;
++ break;
+ default:
+ return -EINVAL;
+ }
+
+- return findcard(mkvp, pcardnr, pdomain, verify, 0);
++ return findcard(mkvp, pcardnr, pdomain, verify, minhwtype);
+ }
+ EXPORT_SYMBOL(cca_findcard);
+
+--- a/drivers/s390/crypto/zcrypt_ccamisc.h
++++ b/drivers/s390/crypto/zcrypt_ccamisc.h
+@@ -124,12 +124,12 @@ int cca_check_secaescipherkey(debug_info
+ /*
+ * Generate (random) CCA AES DATA secure key.
+ */
+-int cca_genseckey(u16 cardnr, u16 domain, u32 keytype, u8 *seckey);
++int cca_genseckey(u16 cardnr, u16 domain, u32 keybitsize, u8 *seckey);
+
+ /*
+ * Generate CCA AES DATA secure key with given clear key value.
+ */
+-int cca_clr2seckey(u16 cardnr, u16 domain, u32 keytype,
++int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
+ const u8 *clrkey, u8 *seckey);
+
+ /*
+@@ -137,8 +137,7 @@ int cca_clr2seckey(u16 cardnr, u16 domai
+ */
+ int cca_sec2protkey(u16 cardnr, u16 domain,
+ const u8 seckey[SECKEYBLOBSIZE],
+- u8 *protkey, u32 *protkeylen,
+- u32 *protkeytype);
++ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
+
+ /*
+ * Generate (random) CCA AES CIPHER secure key.
+@@ -169,6 +168,7 @@ int cca_query_crypto_facility(u16 cardnr
+ /*
+ * Search for a matching crypto card based on the Master Key
+ * Verification Pattern provided inside a secure key.
++ * Works with CCA AES data and cipher keys.
+ * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+ * 1 if OLD MKVP matches.
+ */
diff --git a/patches.suse/s390-pkey-add-sysfs-attributes-to-emit-aes-cipher-key-blobs b/patches.suse/s390-pkey-add-sysfs-attributes-to-emit-aes-cipher-key-blobs
new file mode 100644
index 0000000000..76d7d92ee9
--- /dev/null
+++ b/patches.suse/s390-pkey-add-sysfs-attributes-to-emit-aes-cipher-key-blobs
@@ -0,0 +1,161 @@
+From: Ingo Franzki <ifranzki@linux.ibm.com>
+Date: Tue, 20 Aug 2019 14:57:20 +0200
+Subject: s390/pkey: Add sysfs attributes to emit AES CIPHER key blobs
+Git-commit: f71fee2711a788b94ff0acb02fbd2bfe2de7e0a3
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+Now that the pkey kernel module also supports CCA AES CIPHER keys:
+Add binary read-only sysfs attributes for the pkey module
+that can be used to read random CCA AES CIPHER secure keys from,
+similar to the already existing sysfs attributes for AES DATA and
+random protected keys. Keys are read from these attributes using
+a cat-like interface.
+
+A typical use case for those keys is to encrypt a swap device
+using the paes cipher. During processing of /etc/crypttab, the
+CCA random AES CIPHER secure key to encrypt the swap device is
+read from one of the attributes.
+
+The following attributes are added:
+ ccacipher/ccacipher_aes_128
+ ccacipher/ccacipher_aes_192
+ ccacipher/ccacipher_aes_256
+ ccacipher/ccacipher_aes_128_xts
+ ccacipher/ccacipher_aes_256_xts
+Each attribute emits a secure key blob for the corresponding
+key size and cipher mode.
+
+Signed-off-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Reviewed-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/pkey_api.c | 113 +++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 113 insertions(+)
+
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1363,9 +1363,122 @@ static struct attribute_group ccadata_at
+ .bin_attrs = ccadata_attrs,
+ };
+
++#define CCACIPHERTOKENSIZE (sizeof(struct cipherkeytoken) + 80)
++
++/*
++ * Sysfs attribute read function for all secure key ccacipher binary attributes.
++ * The implementation can not deal with partial reads, because a new random
++ * secure key blob is generated with each read. In case of partial reads
++ * (i.e. off != 0 or count < key blob size) -EINVAL is returned.
++ */
++static ssize_t pkey_ccacipher_aes_attr_read(enum pkey_key_size keybits,
++ bool is_xts, char *buf, loff_t off,
++ size_t count)
++{
++ size_t keysize;
++ int rc;
++
++ if (off != 0 || count < CCACIPHERTOKENSIZE)
++ return -EINVAL;
++ if (is_xts)
++ if (count < 2 * CCACIPHERTOKENSIZE)
++ return -EINVAL;
++
++ keysize = CCACIPHERTOKENSIZE;
++ rc = cca_gencipherkey(-1, -1, keybits, 0, buf, &keysize);
++ if (rc)
++ return rc;
++ memset(buf + keysize, 0, CCACIPHERTOKENSIZE - keysize);
++
++ if (is_xts) {
++ keysize = CCACIPHERTOKENSIZE;
++ rc = cca_gencipherkey(-1, -1, keybits, 0,
++ buf + CCACIPHERTOKENSIZE, &keysize);
++ if (rc)
++ return rc;
++ memset(buf + CCACIPHERTOKENSIZE + keysize, 0,
++ CCACIPHERTOKENSIZE - keysize);
++
++ return 2 * CCACIPHERTOKENSIZE;
++ }
++
++ return CCACIPHERTOKENSIZE;
++}
++
++static ssize_t ccacipher_aes_128_read(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off,
++ size_t count)
++{
++ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, false, buf,
++ off, count);
++}
++
++static ssize_t ccacipher_aes_192_read(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off,
++ size_t count)
++{
++ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_192, false, buf,
++ off, count);
++}
++
++static ssize_t ccacipher_aes_256_read(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off,
++ size_t count)
++{
++ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, false, buf,
++ off, count);
++}
++
++static ssize_t ccacipher_aes_128_xts_read(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off,
++ size_t count)
++{
++ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_128, true, buf,
++ off, count);
++}
++
++static ssize_t ccacipher_aes_256_xts_read(struct file *filp,
++ struct kobject *kobj,
++ struct bin_attribute *attr,
++ char *buf, loff_t off,
++ size_t count)
++{
++ return pkey_ccacipher_aes_attr_read(PKEY_SIZE_AES_256, true, buf,
++ off, count);
++}
++
++static BIN_ATTR_RO(ccacipher_aes_128, CCACIPHERTOKENSIZE);
++static BIN_ATTR_RO(ccacipher_aes_192, CCACIPHERTOKENSIZE);
++static BIN_ATTR_RO(ccacipher_aes_256, CCACIPHERTOKENSIZE);
++static BIN_ATTR_RO(ccacipher_aes_128_xts, 2 * CCACIPHERTOKENSIZE);
++static BIN_ATTR_RO(ccacipher_aes_256_xts, 2 * CCACIPHERTOKENSIZE);
++
++static struct bin_attribute *ccacipher_attrs[] = {
++ &bin_attr_ccacipher_aes_128,
++ &bin_attr_ccacipher_aes_192,
++ &bin_attr_ccacipher_aes_256,
++ &bin_attr_ccacipher_aes_128_xts,
++ &bin_attr_ccacipher_aes_256_xts,
++ NULL
++};
++
++static struct attribute_group ccacipher_attr_group = {
++ .name = "ccacipher",
++ .bin_attrs = ccacipher_attrs,
++};
++
+ static const struct attribute_group *pkey_attr_groups[] = {
+ &protkey_attr_group,
+ &ccadata_attr_group,
++ &ccacipher_attr_group,
+ NULL,
+ };
+
diff --git a/patches.suse/s390-pkey-fix-memory-leak-within-copy_apqns_from_user b/patches.suse/s390-pkey-fix-memory-leak-within-copy_apqns_from_user
new file mode 100644
index 0000000000..fbdb1b6423
--- /dev/null
+++ b/patches.suse/s390-pkey-fix-memory-leak-within-copy_apqns_from_user
@@ -0,0 +1,31 @@
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Tue, 12 Nov 2019 09:19:26 +0100
+Subject: s390/pkey: fix memory leak within _copy_apqns_from_user()
+Git-commit: f9cac4fd8878929c6ebff0bd272317905d77c38a
+Patch-mainline: v5.5-rc1
+References: jsc#SLE-7533 LTC#178844
+
+Fixes: f2bbc96e7cfad ("s390/pkey: add CCA AES cipher key support")
+Reported-by: Markus Elfring <Markus.Elfring@web.de>
+Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/pkey_api.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -740,8 +740,10 @@ static void *_copy_apqns_from_user(void
+ kapqns = kmalloc(nbytes, GFP_KERNEL);
+ if (!kapqns)
+ return ERR_PTR(-ENOMEM);
+- if (copy_from_user(kapqns, uapqns, nbytes))
++ if (copy_from_user(kapqns, uapqns, nbytes)) {
++ kfree(kapqns);
+ return ERR_PTR(-EFAULT);
++ }
+ }
+
+ return kapqns;
diff --git a/patches.suse/s390-pkey-pkey-cleanup-narrow-in-kernel-api-fix-some-variable-types b/patches.suse/s390-pkey-pkey-cleanup-narrow-in-kernel-api-fix-some-variable-types
new file mode 100644
index 0000000000..ad0bd2d7ac
--- /dev/null
+++ b/patches.suse/s390-pkey-pkey-cleanup-narrow-in-kernel-api-fix-some-variable-types
@@ -0,0 +1,290 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Wed, 3 Jul 2019 13:09:03 +0200
+Subject: s390/pkey: pkey cleanup: narrow in-kernel API, fix some variable types
+Git-commit: 183cb46954dd204e3578a25ad1284aab3debec52
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+There are a lot of pkey functions exported as in-kernel callable
+API functions but not used at all. This patch narrows down the
+pkey in-kernel API to what is currently only used and exploited.
+
+Within the kernel just use u32 without any leading __u32. Also
+functions declared in a header file in arch/s390/include/asm
+don't need a comment 'In-kernel API', this is by definition,
+otherwise the header file would be in arch/s390/include/uapi/asm.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/pkey.h | 114 -----------------------------------------
+ drivers/s390/crypto/pkey_api.c | 37 +++++--------
+ 2 files changed, 18 insertions(+), 133 deletions(-)
+
+--- a/arch/s390/include/asm/pkey.h
++++ b/arch/s390/include/asm/pkey.h
+@@ -2,7 +2,7 @@
+ /*
+ * Kernelspace interface to the pkey device driver
+ *
+- * Copyright IBM Corp. 2016
++ * Copyright IBM Corp. 2016,2019
+ *
+ * Author: Harald Freudenberger <freude@de.ibm.com>
+ *
+@@ -16,123 +16,13 @@
+ #include <uapi/asm/pkey.h>
+
+ /*
+- * Generate (AES) random secure key.
+- * @param cardnr may be -1 (use default card)
+- * @param domain may be -1 (use default domain)
+- * @param keytype one of the PKEY_KEYTYPE values
+- * @param seckey pointer to buffer receiving the secure key
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_genseckey(__u16 cardnr, __u16 domain,
+- __u32 keytype, struct pkey_seckey *seckey);
+-
+-/*
+- * Generate (AES) secure key with given key value.
+- * @param cardnr may be -1 (use default card)
+- * @param domain may be -1 (use default domain)
+- * @param keytype one of the PKEY_KEYTYPE values
+- * @param clrkey pointer to buffer with clear key data
+- * @param seckey pointer to buffer receiving the secure key
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_clr2seckey(__u16 cardnr, __u16 domain, __u32 keytype,
+- const struct pkey_clrkey *clrkey,
+- struct pkey_seckey *seckey);
+-
+-/*
+- * Derive (AES) proteced key from the (AES) secure key blob.
+- * @param cardnr may be -1 (use default card)
+- * @param domain may be -1 (use default domain)
+- * @param seckey pointer to buffer with the input secure key
+- * @param protkey pointer to buffer receiving the protected key and
+- * additional info (type, length)
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_sec2protkey(__u16 cardnr, __u16 domain,
+- const struct pkey_seckey *seckey,
+- struct pkey_protkey *protkey);
+-
+-/*
+- * Derive (AES) protected key from a given clear key value.
+- * @param keytype one of the PKEY_KEYTYPE values
+- * @param clrkey pointer to buffer with clear key data
+- * @param protkey pointer to buffer receiving the protected key and
+- * additional info (type, length)
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_clr2protkey(__u32 keytype,
+- const struct pkey_clrkey *clrkey,
+- struct pkey_protkey *protkey);
+-
+-/*
+- * Search for a matching crypto card based on the Master Key
+- * Verification Pattern provided inside a secure key.
+- * @param seckey pointer to buffer with the input secure key
+- * @param cardnr pointer to cardnr, receives the card number on success
+- * @param domain pointer to domain, receives the domain number on success
+- * @param verify if set, always verify by fetching verification pattern
+- * from card
+- * @return 0 on success, negative errno value on failure. If no card could be
+- * found, -ENODEV is returned.
+- */
+-int pkey_findcard(const struct pkey_seckey *seckey,
+- __u16 *cardnr, __u16 *domain, int verify);
+-
+-/*
+- * Find card and transform secure key to protected key.
+- * @param seckey pointer to buffer with the input secure key
+- * @param protkey pointer to buffer receiving the protected key and
+- * additional info (type, length)
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_skey2pkey(const struct pkey_seckey *seckey,
+- struct pkey_protkey *protkey);
+-
+-/*
+- * Verify the given secure key for being able to be useable with
+- * the pkey module. Check for correct key type and check for having at
+- * least one crypto card being able to handle this key (master key
+- * or old master key verification pattern matches).
+- * Return some info about the key: keysize in bits, keytype (currently
+- * only AES), flag if key is wrapped with an old MKVP.
+- * @param seckey pointer to buffer with the input secure key
+- * @param pcardnr pointer to cardnr, receives the card number on success
+- * @param pdomain pointer to domain, receives the domain number on success
+- * @param pkeysize pointer to keysize, receives the bitsize of the key
+- * @param pattributes pointer to attributes, receives additional info
+- * PKEY_VERIFY_ATTR_AES if the key is an AES key
+- * PKEY_VERIFY_ATTR_OLD_MKVP if key has old mkvp stored in
+- * @return 0 on success, negative errno value on failure. If no card could
+- * be found which is able to handle this key, -ENODEV is returned.
+- */
+-int pkey_verifykey(const struct pkey_seckey *seckey,
+- u16 *pcardnr, u16 *pdomain,
+- u16 *pkeysize, u32 *pattributes);
+-
+-/*
+- * In-kernel API: Generate (AES) random protected key.
+- * @param keytype one of the PKEY_KEYTYPE values
+- * @param protkey pointer to buffer receiving the protected key
+- * @return 0 on success, negative errno value on failure
+- */
+-int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey);
+-
+-/*
+- * In-kernel API: Verify an (AES) protected key.
+- * @param protkey pointer to buffer containing the protected key to verify
+- * @return 0 on success, negative errno value on failure. In case the protected
+- * key is not valid -EKEYREJECTED is returned
+- */
+-int pkey_verifyprotkey(const struct pkey_protkey *protkey);
+-
+-/*
+ * In-kernel API: Transform an key blob (of any type) into a protected key.
+ * @param key pointer to a buffer containing the key blob
+ * @param keylen size of the key blob in bytes
+ * @param protkey pointer to buffer receiving the protected key
+ * @return 0 on success, negative errno value on failure
+ */
+-int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
++int pkey_keyblob2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey);
+
+ #endif /* _KAPI_PKEY_H */
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -2,7 +2,7 @@
+ /*
+ * pkey device driver
+ *
+- * Copyright IBM Corp. 2017
++ * Copyright IBM Corp. 2017,2019
+ * Author(s): Harald Freudenberger
+ */
+
+@@ -71,9 +71,9 @@ struct protaeskeytoken {
+ /*
+ * Create a protected key from a clear key value.
+ */
+-int pkey_clr2protkey(u32 keytype,
+- const struct pkey_clrkey *clrkey,
+- struct pkey_protkey *protkey)
++static int pkey_clr2protkey(u32 keytype,
++ const struct pkey_clrkey *clrkey,
++ struct pkey_protkey *protkey)
+ {
+ long fc;
+ int keysize;
+@@ -122,13 +122,12 @@ int pkey_clr2protkey(u32 keytype,
+
+ return 0;
+ }
+-EXPORT_SYMBOL(pkey_clr2protkey);
+
+ /*
+ * Find card and transform secure key into protected key.
+ */
+-int pkey_skey2pkey(const struct pkey_seckey *seckey,
+- struct pkey_protkey *pkey)
++static int pkey_skey2pkey(const struct pkey_seckey *seckey,
++ struct pkey_protkey *pkey)
+ {
+ u16 cardnr, domain;
+ int rc, verify;
+@@ -157,14 +156,13 @@ int pkey_skey2pkey(const struct pkey_sec
+
+ return rc;
+ }
+-EXPORT_SYMBOL(pkey_skey2pkey);
+
+ /*
+ * Verify key and give back some info about the key.
+ */
+-int pkey_verifykey(const struct pkey_seckey *seckey,
+- u16 *pcardnr, u16 *pdomain,
+- u16 *pkeysize, u32 *pattributes)
++static int pkey_verifykey(const struct pkey_seckey *seckey,
++ u16 *pcardnr, u16 *pdomain,
++ u16 *pkeysize, u32 *pattributes)
+ {
+ struct secaeskeytoken *t = (struct secaeskeytoken *) seckey;
+ u16 cardnr, domain;
+@@ -201,12 +199,11 @@ out:
+ DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ return rc;
+ }
+-EXPORT_SYMBOL(pkey_verifykey);
+
+ /*
+ * Generate a random protected key
+ */
+-int pkey_genprotkey(__u32 keytype, struct pkey_protkey *protkey)
++static int pkey_genprotkey(u32 keytype, struct pkey_protkey *protkey)
+ {
+ struct pkey_clrkey clrkey;
+ int keysize;
+@@ -241,12 +238,11 @@ int pkey_genprotkey(__u32 keytype, struc
+
+ return 0;
+ }
+-EXPORT_SYMBOL(pkey_genprotkey);
+
+ /*
+ * Verify if a protected key is still valid
+ */
+-int pkey_verifyprotkey(const struct pkey_protkey *protkey)
++static int pkey_verifyprotkey(const struct pkey_protkey *protkey)
+ {
+ unsigned long fc;
+ struct {
+@@ -287,12 +283,11 @@ int pkey_verifyprotkey(const struct pkey
+
+ return 0;
+ }
+-EXPORT_SYMBOL(pkey_verifyprotkey);
+
+ /*
+ * Transform a non-CCA key token into a protected key
+ */
+-static int pkey_nonccatok2pkey(const __u8 *key, __u32 keylen,
++static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+ {
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+@@ -320,7 +315,7 @@ static int pkey_nonccatok2pkey(const __u
+ /*
+ * Transform a CCA internal key token into a protected key
+ */
+-static int pkey_ccainttok2pkey(const __u8 *key, __u32 keylen,
++static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+ {
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+@@ -342,7 +337,7 @@ static int pkey_ccainttok2pkey(const __u
+ /*
+ * Transform a key blob (of any type) into a protected key
+ */
+-int pkey_keyblob2pkey(const __u8 *key, __u32 keylen,
++int pkey_keyblob2pkey(const u8 *key, u32 keylen,
+ struct pkey_protkey *protkey)
+ {
+ struct keytoken_header *hdr = (struct keytoken_header *)key;
+@@ -507,8 +502,8 @@ static long pkey_unlocked_ioctl(struct f
+ case PKEY_KBLOB2PROTK: {
+ struct pkey_kblob2pkey __user *utp = (void __user *) arg;
+ struct pkey_kblob2pkey ktp;
+- __u8 __user *ukey;
+- __u8 *kkey;
++ u8 __user *ukey;
++ u8 *kkey;
+
+ if (copy_from_user(&ktp, utp, sizeof(ktp)))
+ return -EFAULT;
diff --git a/patches.suse/s390-qdio-enable-drivers-to-poll-for-output-completions b/patches.suse/s390-qdio-enable-drivers-to-poll-for-output-completions
new file mode 100644
index 0000000000..4c39cdd78a
--- /dev/null
+++ b/patches.suse/s390-qdio-enable-drivers-to-poll-for-output-completions
@@ -0,0 +1,150 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:47 +0200
+Subject: s390/qdio: enable drivers to poll for Output completions
+Git-commit: 7c47f5afdeef763599f1ae22d29b8c3904c58315
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+While commit d36deae75011 ("qdio: extend API to allow polling") enhanced
+the qdio layer so that drivers can poll their Input Queues, we don't
+have the corresponding infrastructure for Output Queues yet.
+
+Factor out a helper that scans a single QDIO Queue, so that qeth can
+implement TX NAPI on top of it.
+While doing so, remove the duplicated tracking of the next-to-scan index
+(q->first_to_check vs q->first_to_kick) in this code path.
+
+qdio_handle_aobs() needs to move slightly upwards in the code hierarchy,
+so that it's still called from the polling path.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Acked-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/qdio.h | 3 ++
+ drivers/s390/cio/qdio_main.c | 64 ++++++++++++++++++++++++++++---------------
+ 2 files changed, 46 insertions(+), 21 deletions(-)
+
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -416,6 +416,9 @@ extern int do_QDIO(struct ccw_device *,
+ extern int qdio_start_irq(struct ccw_device *, int);
+ extern int qdio_stop_irq(struct ccw_device *, int);
+ extern int qdio_get_next_buffers(struct ccw_device *, int, int *, int *);
++extern int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr,
++ bool is_input, unsigned int *bufnr,
++ unsigned int *error);
+ extern int qdio_shutdown(struct ccw_device *, int);
+ extern int qdio_free(struct ccw_device *);
+ extern int qdio_get_ssqd_desc(struct ccw_device *, struct qdio_ssqd_desc *);
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -647,8 +647,6 @@ static void qdio_kick_handler(struct qdi
+ qperf_inc(q, outbound_handler);
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
+ start, count);
+- if (q->u.out.use_cq)
+- qdio_handle_aobs(q, start, count);
+ }
+
+ q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count,
+@@ -774,8 +772,11 @@ static inline int qdio_outbound_q_moved(
+
+ count = get_outbound_buffer_frontier(q, start);
+
+- if (count)
++ if (count) {
+ DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr);
++ if (q->u.out.use_cq)
++ qdio_handle_aobs(q, start, count);
++ }
+
+ return count;
+ }
+@@ -1655,6 +1656,44 @@ rescan:
+ }
+ EXPORT_SYMBOL(qdio_start_irq);
+
++static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr,
++ unsigned int *error)
++{
++ unsigned int start = q->first_to_check;
++ int count;
++
++ count = q->is_input_q ? qdio_inbound_q_moved(q, start) :
++ qdio_outbound_q_moved(q, start);
++ if (count == 0)
++ return 0;
++
++ *bufnr = start;
++ *error = q->qdio_error;
++
++ /* for the next time */
++ q->first_to_check = add_buf(start, count);
++ q->qdio_error = 0;
++
++ return count;
++}
++
++int qdio_inspect_queue(struct ccw_device *cdev, unsigned int nr, bool is_input,
++ unsigned int *bufnr, unsigned int *error)
++{
++ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
++ struct qdio_q *q;
++
++ if (!irq_ptr)
++ return -ENODEV;
++ q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr];
++
++ if (need_siga_sync(q))
++ qdio_siga_sync_q(q);
++
++ return __qdio_inspect_queue(q, bufnr, error);
++}
++EXPORT_SYMBOL_GPL(qdio_inspect_queue);
++
+ /**
+ * qdio_get_next_buffers - process input buffers
+ * @cdev: associated ccw_device for the qdio subchannel
+@@ -1672,13 +1711,10 @@ int qdio_get_next_buffers(struct ccw_dev
+ {
+ struct qdio_q *q;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
+- unsigned int start;
+- int count;
+
+ if (!irq_ptr)
+ return -ENODEV;
+ q = irq_ptr->input_qs[nr];
+- start = q->first_to_check;
+
+ /*
+ * Cannot rely on automatic sync after interrupt since queues may
+@@ -1689,25 +1725,11 @@ int qdio_get_next_buffers(struct ccw_dev
+
+ qdio_check_outbound_pci_queues(irq_ptr);
+
+- count = qdio_inbound_q_moved(q, start);
+- if (count == 0)
+- return 0;
+-
+- start = add_buf(start, count);
+- q->first_to_check = start;
+-
+ /* Note: upper-layer MUST stop processing immediately here ... */
+ if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
+ return -EIO;
+
+- *bufnr = q->first_to_kick;
+- *error = q->qdio_error;
+-
+- /* for the next time */
+- q->first_to_kick = add_buf(q->first_to_kick, count);
+- q->qdio_error = 0;
+-
+- return count;
++ return __qdio_inspect_queue(q, bufnr, error);
+ }
+ EXPORT_SYMBOL(qdio_get_next_buffers);
+
diff --git a/patches.suse/s390-qdio-implement-iqd-multi-write b/patches.suse/s390-qdio-implement-iqd-multi-write
new file mode 100644
index 0000000000..1de8bd121b
--- /dev/null
+++ b/patches.suse/s390-qdio-implement-iqd-multi-write
@@ -0,0 +1,120 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Thu, 31 Oct 2019 13:42:14 +0100
+Subject: s390/qdio: implement IQD Multi-Write
+Git-commit: b7f143d093e10cd39ae4a22d2f57ac853017f49e
+Patch-mainline: v5.5-rc1
+References: jsc#SLE-7795 LTC#179220
+
+This allows IQD drivers to send out multiple SBALs with a single SIGA
+instruction.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/cio/qdio.h | 1 +
+ drivers/s390/cio/qdio_main.c | 31 +++++++++++++++----------------
+ 2 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -82,6 +82,7 @@ enum qdio_irq_states {
+ #define QDIO_SIGA_WRITE 0x00
+ #define QDIO_SIGA_READ 0x01
+ #define QDIO_SIGA_SYNC 0x02
++#define QDIO_SIGA_WRITEM 0x03
+ #define QDIO_SIGA_WRITEQ 0x04
+ #define QDIO_SIGA_QEBSM_FLAG 0x80
+
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -310,18 +310,19 @@ static inline int qdio_siga_sync_q(struc
+ return qdio_siga_sync(q, q->mask, 0);
+ }
+
+-static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit,
+- unsigned long aob)
++static int qdio_siga_output(struct qdio_q *q, unsigned int count,
++ unsigned int *busy_bit, unsigned long aob)
+ {
+ unsigned long schid = *((u32 *) &q->irq_ptr->schid);
+ unsigned int fc = QDIO_SIGA_WRITE;
+ u64 start_time = 0;
+ int retries = 0, cc;
+- unsigned long laob = 0;
+
+- if (aob) {
+- fc = QDIO_SIGA_WRITEQ;
+- laob = aob;
++ if (queue_type(q) == QDIO_IQDIO_QFMT && !multicast_outbound(q)) {
++ if (count > 1)
++ fc = QDIO_SIGA_WRITEM;
++ else if (aob)
++ fc = QDIO_SIGA_WRITEQ;
+ }
+
+ if (is_qebsm(q)) {
+@@ -329,7 +330,7 @@ static int qdio_siga_output(struct qdio_
+ fc |= QDIO_SIGA_QEBSM_FLAG;
+ }
+ again:
+- cc = do_siga_output(schid, q->mask, busy_bit, fc, laob);
++ cc = do_siga_output(schid, q->mask, busy_bit, fc, aob);
+
+ /* hipersocket busy condition */
+ if (unlikely(*busy_bit)) {
+@@ -781,7 +782,8 @@ static inline int qdio_outbound_q_moved(
+ return count;
+ }
+
+-static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob)
++static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
++ unsigned long aob)
+ {
+ int retries = 0, cc;
+ unsigned int busy_bit;
+@@ -793,7 +795,7 @@ static int qdio_kick_outbound_q(struct q
+ retry:
+ qperf_inc(q, siga_write);
+
+- cc = qdio_siga_output(q, &busy_bit, aob);
++ cc = qdio_siga_output(q, count, &busy_bit, aob);
+ switch (cc) {
+ case 0:
+ break;
+@@ -1526,7 +1528,7 @@ set:
+ * @count: how many buffers are filled
+ */
+ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+- int bufnr, int count)
++ unsigned int bufnr, unsigned int count)
+ {
+ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
+ unsigned char state = 0;
+@@ -1549,13 +1551,10 @@ static int handle_outbound(struct qdio_q
+ if (queue_type(q) == QDIO_IQDIO_QFMT) {
+ unsigned long phys_aob = 0;
+
+- /* One SIGA-W per buffer required for unicast HSI */
+- WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
+-
+- if (q->u.out.use_cq)
++ if (q->u.out.use_cq && count == 1)
+ phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr);
+
+- rc = qdio_kick_outbound_q(q, phys_aob);
++ rc = qdio_kick_outbound_q(q, count, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+@@ -1564,7 +1563,7 @@ static int handle_outbound(struct qdio_q
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
+ } else {
+- rc = qdio_kick_outbound_q(q, 0);
++ rc = qdio_kick_outbound_q(q, count, 0);
+ }
+
+ /* Let drivers implement their own completion scanning: */
diff --git a/patches.suse/s390-qdio-let-drivers-opt-out-from-output-queue-scanning b/patches.suse/s390-qdio-let-drivers-opt-out-from-output-queue-scanning
new file mode 100644
index 0000000000..d239697533
--- /dev/null
+++ b/patches.suse/s390-qdio-let-drivers-opt-out-from-output-queue-scanning
@@ -0,0 +1,117 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:48 +0200
+Subject: s390/qdio: let drivers opt-out from Output Queue scanning
+Git-commit: 313dc689b16c08b081939ee9b87dac3736c780e3
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+If a driver wants to use the new Output Queue poll code, then the qdio
+layer must disable its internal Queue scanning. Let the driver select
+this mode by passing a special scan_threshold of 0.
+
+As the scan_threshold is the same for all Output Queues, also move it
+into the main qdio_irq struct. This allows for fast opt-out checking, a
+driver is expected to operate either _all_ or none of its Output Queues
+in polling mode.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Acked-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/qdio.h | 2 +-
+ drivers/s390/cio/qdio.h | 3 +--
+ drivers/s390/cio/qdio_main.c | 11 ++++++++---
+ drivers/s390/cio/qdio_setup.c | 2 +-
+ 4 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -359,7 +359,7 @@ struct qdio_initialize {
+ qdio_handler_t *output_handler;
+ void (**queue_start_poll_array) (struct ccw_device *, int,
+ unsigned long);
+- int scan_threshold;
++ unsigned int scan_threshold;
+ unsigned long int_parm;
+ struct qdio_buffer **input_sbal_addr_array;
+ struct qdio_buffer **output_sbal_addr_array;
+--- a/drivers/s390/cio/qdio.h
++++ b/drivers/s390/cio/qdio.h
+@@ -206,8 +206,6 @@ struct qdio_output_q {
+ struct qdio_outbuf_state *sbal_state;
+ /* timer to check for more outbound work */
+ struct timer_list timer;
+- /* used SBALs before tasklet schedule */
+- int scan_threshold;
+ };
+
+ /*
+@@ -295,6 +293,7 @@ struct qdio_irq {
+ struct qdio_ssqd_desc ssqd_desc;
+ void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
+
++ unsigned int scan_threshold; /* used SBALs before tasklet schedule */
+ int perf_stat_enabled;
+
+ struct qdr *qdr;
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -880,7 +880,7 @@ static inline void qdio_check_outbound_p
+ struct qdio_q *out;
+ int i;
+
+- if (!pci_out_supported(irq))
++ if (!pci_out_supported(irq) || !irq->scan_threshold)
+ return;
+
+ for_each_output_queue(irq, out, i)
+@@ -973,7 +973,7 @@ static void qdio_int_handler_pci(struct
+ }
+ }
+
+- if (!pci_out_supported(irq_ptr))
++ if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
+ return;
+
+ for_each_output_queue(irq_ptr, q, i) {
+@@ -1528,6 +1528,7 @@ set:
+ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ int bufnr, int count)
+ {
++ const unsigned int scan_threshold = q->irq_ptr->scan_threshold;
+ unsigned char state = 0;
+ int used, rc = 0;
+
+@@ -1566,8 +1567,12 @@ static int handle_outbound(struct qdio_q
+ rc = qdio_kick_outbound_q(q, 0);
+ }
+
++ /* Let drivers implement their own completion scanning: */
++ if (!scan_threshold)
++ return rc;
++
+ /* in case of SIGA errors we must process the error immediately */
+- if (used >= q->u.out.scan_threshold || rc)
++ if (used >= scan_threshold || rc)
+ qdio_tasklet_schedule(q);
+ else
+ /* free the SBALs in case of no further traffic */
+--- a/drivers/s390/cio/qdio_setup.c
++++ b/drivers/s390/cio/qdio_setup.c
+@@ -248,7 +248,6 @@ static void setup_queues(struct qdio_irq
+ output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
+
+ q->is_input_q = 0;
+- q->u.out.scan_threshold = qdio_init->scan_threshold;
+ setup_storage_lists(q, irq_ptr, output_sbal_array, i);
+ output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+
+@@ -474,6 +473,7 @@ int qdio_setup_irq(struct qdio_initializ
+ irq_ptr->nr_input_qs = init_data->no_input_qs;
+ irq_ptr->nr_output_qs = init_data->no_output_qs;
+ irq_ptr->cdev = init_data->cdev;
++ irq_ptr->scan_threshold = init_data->scan_threshold;
+ ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
+ setup_queues(irq_ptr, init_data);
+
diff --git a/patches.suse/s390-qeth-add-bql-support-for-iqd-devices b/patches.suse/s390-qeth-add-bql-support-for-iqd-devices
new file mode 100644
index 0000000000..9a1c37576d
--- /dev/null
+++ b/patches.suse/s390-qeth-add-bql-support-for-iqd-devices
@@ -0,0 +1,95 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:52 +0200
+Subject: s390/qeth: add BQL support for IQD devices
+Git-commit: 96bd6c94bdf9de38b0fa0ec679fe40013f1c4576
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+Each TX buffer may contain multiple skbs. So just accumulate the sent
+byte count in the buffer struct, and later use the same count when
+completing the buffer.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/net/qeth_core.h | 1 +
+ drivers/s390/net/qeth_core_main.c | 16 +++++++++++++++-
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -426,6 +426,7 @@ struct qeth_qdio_out_buffer {
+ struct qdio_buffer *buffer;
+ atomic_t state;
+ int next_element_to_fill;
++ unsigned int bytes;
+ struct sk_buff_head skb_list;
+ int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER];
+
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -1159,6 +1159,7 @@ static void qeth_clear_output_buffer(str
+
+ qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
+ buf->next_element_to_fill = 0;
++ buf->bytes = 0;
+ atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
+ }
+
+@@ -2671,6 +2672,7 @@ int qeth_init_qdio_queues(struct qeth_ca
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
++ netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
+ }
+ return 0;
+ }
+@@ -3789,6 +3791,7 @@ static int qeth_do_send_packet_fast(stru
+ {
+ int index = queue->next_buf_to_fill;
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
++ unsigned int bytes = qdisc_pkt_len(skb);
+ struct netdev_queue *txq;
+ bool stopped = false;
+
+@@ -3810,6 +3813,9 @@ static int qeth_do_send_packet_fast(stru
+ }
+
+ qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
++ netdev_tx_sent_queue(txq, bytes);
++ buffer->bytes += bytes;
++
+ qeth_flush_buffers(queue, index, 1);
+
+ if (stopped && !qeth_out_queue_is_full(queue))
+@@ -5203,6 +5209,8 @@ static int qeth_tx_poll(struct napi_stru
+
+ while (1) {
+ unsigned int start, error, i;
++ unsigned int packets = 0;
++ unsigned int bytes = 0;
+ int completed;
+
+ if (qeth_out_queue_is_empty(queue)) {
+@@ -5228,13 +5236,19 @@ static int qeth_tx_poll(struct napi_stru
+ }
+
+ for (i = start; i < start + completed; i++) {
++ struct qeth_qdio_out_buffer *buffer;
+ unsigned int bidx = QDIO_BUFNR(i);
+
+- qeth_handle_send_error(card, queue->bufs[bidx], error);
++ buffer = queue->bufs[bidx];
++ packets += skb_queue_len(&buffer->skb_list);
++ bytes += buffer->bytes;
++
++ qeth_handle_send_error(card, buffer, error);
+ qeth_iqd_tx_complete(queue, bidx, error, budget);
+ qeth_cleanup_handled_pending(queue, bidx, false);
+ }
+
++ netdev_tx_completed_queue(txq, packets, bytes);
+ atomic_sub(completed, &queue->used_buffers);
+ work_done += completed;
+
diff --git a/patches.suse/s390-qeth-add-tx-napi-support-for-iqd-devices b/patches.suse/s390-qeth-add-tx-napi-support-for-iqd-devices
new file mode 100644
index 0000000000..02141ded8d
--- /dev/null
+++ b/patches.suse/s390-qeth-add-tx-napi-support-for-iqd-devices
@@ -0,0 +1,391 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:50 +0200
+Subject: s390/qeth: add TX NAPI support for IQD devices
+Git-commit: e53edf743d26b39dfd78af43ff97620a4ac13ffc
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+Due to their large MTU and potentially low utilization of TX buffers,
+IQD devices in particular require fast TX recycling. This makes them
+a prime candidate for a TX NAPI path in qeth.
+
+qeth_tx_poll() uses the recently introduced qdio_inspect_queue() helper
+to poll the TX queue for completed buffers. To avoid hogging the CPU for
+too long, we yield to the stack after completing an entire queue's worth
+of buffers.
+While IQD is expected to transfer its buffers synchronously (and thus
+doesn't support TX interrupts), a timer covers for the odd case where a
+TX buffer doesn't complete synchronously. Currently this timer should
+only ever fire for
+(1) the mcast queue,
+(2) the occasional race, where the NAPI poll code observes an update to
+ queue->used_buffers while the TX doorbell hasn't been issued yet.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/qdio.h | 1
+ drivers/s390/net/qeth_core.h | 26 ++++
+ drivers/s390/net/qeth_core_main.c | 202 ++++++++++++++++++++++++++++----------
+ drivers/s390/net/qeth_ethtool.c | 2
+ 4 files changed, 183 insertions(+), 48 deletions(-)
+
+--- a/arch/s390/include/asm/qdio.h
++++ b/arch/s390/include/asm/qdio.h
+@@ -16,6 +16,7 @@
+ #define QDIO_MAX_QUEUES_PER_IRQ 4
+ #define QDIO_MAX_BUFFERS_PER_Q 128
+ #define QDIO_MAX_BUFFERS_MASK (QDIO_MAX_BUFFERS_PER_Q - 1)
++#define QDIO_BUFNR(num) ((num) & QDIO_MAX_BUFFERS_MASK)
+ #define QDIO_MAX_ELEMENTS_PER_BUFFER 16
+ #define QDIO_SBAL_SIZE 256
+
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -22,6 +22,7 @@
+ #include <linux/hashtable.h>
+ #include <linux/ip.h>
+ #include <linux/refcount.h>
++#include <linux/timer.h>
+ #include <linux/wait.h>
+ #include <linux/workqueue.h>
+
+@@ -474,6 +475,8 @@ struct qeth_out_q_stats {
+ u64 tso_bytes;
+ u64 packing_mode_switch;
+ u64 stopped;
++ u64 completion_yield;
++ u64 completion_timer;
+
+ /* rtnl_link_stats64 */
+ u64 tx_packets;
+@@ -482,6 +485,8 @@ struct qeth_out_q_stats {
+ u64 tx_dropped;
+ };
+
++#define QETH_TX_TIMER_USECS 500
++
+ struct qeth_qdio_out_q {
+ struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
+ struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
+@@ -500,13 +505,34 @@ struct qeth_qdio_out_q {
+ atomic_t used_buffers;
+ /* indicates whether PCI flag must be set (or if one is outstanding) */
+ atomic_t set_pci_flags_count;
++ struct napi_struct napi;
++ struct timer_list timer;
+ };
+
++#define qeth_for_each_output_queue(card, q, i) \
++ for (i = 0; i < card->qdio.no_out_queues && \
++ (q = card->qdio.out_qs[i]); i++)
++
++#define qeth_napi_to_out_queue(n) container_of(n, struct qeth_qdio_out_q, napi)
++
++static inline void qeth_tx_arm_timer(struct qeth_qdio_out_q *queue)
++{
++ if (timer_pending(&queue->timer))
++ return;
++ mod_timer(&queue->timer, usecs_to_jiffies(QETH_TX_TIMER_USECS) +
++ jiffies);
++}
++
+ static inline bool qeth_out_queue_is_full(struct qeth_qdio_out_q *queue)
+ {
+ return atomic_read(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q;
+ }
+
++static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
++{
++ return atomic_read(&queue->used_buffers) == 0;
++}
++
+ struct qeth_qdio_info {
+ atomic_t state;
+ /* input */
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2282,6 +2282,14 @@ static struct qeth_qdio_out_q *qeth_allo
+ return q;
+ }
+
++static void qeth_tx_completion_timer(struct timer_list *timer)
++{
++ struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
++
++ napi_schedule(&queue->napi);
++ QETH_TXQ_STAT_INC(queue, completion_timer);
++}
++
+ static int qeth_alloc_qdio_queues(struct qeth_card *card)
+ {
+ int i, j;
+@@ -2303,17 +2311,22 @@ static int qeth_alloc_qdio_queues(struct
+
+ /* outbound */
+ for (i = 0; i < card->qdio.no_out_queues; ++i) {
+- card->qdio.out_qs[i] = qeth_alloc_output_queue();
+- if (!card->qdio.out_qs[i])
++ struct qeth_qdio_out_q *queue;
++
++ queue = qeth_alloc_output_queue();
++ if (!queue)
+ goto out_freeoutq;
+ QETH_CARD_TEXT_(card, 2, "outq %i", i);
+- QETH_CARD_HEX(card, 2, &card->qdio.out_qs[i], sizeof(void *));
+- card->qdio.out_qs[i]->card = card;
+- card->qdio.out_qs[i]->queue_no = i;
++ QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
++ card->qdio.out_qs[i] = queue;
++ queue->card = card;
++ queue->queue_no = i;
++ timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
++
+ /* give outbound qeth_qdio_buffers their qdio_buffers */
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
+- WARN_ON(card->qdio.out_qs[i]->bufs[j] != NULL);
+- if (qeth_init_qdio_out_buf(card->qdio.out_qs[i], j))
++ WARN_ON(queue->bufs[j]);
++ if (qeth_init_qdio_out_buf(queue, j))
+ goto out_freeoutqbufs;
+ }
+ }
+@@ -3225,6 +3238,7 @@ static int qeth_switch_to_nonpacking_if_
+ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
+ int count)
+ {
++ struct qeth_card *card = queue->card;
+ struct qeth_qdio_out_buffer *buf;
+ int rc;
+ int i;
+@@ -3273,6 +3287,11 @@ static void qeth_flush_buffers(struct qe
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
+ rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
+ queue->queue_no, index, count);
++
++ /* Fake the TX completion interrupt: */
++ if (IS_IQD(card))
++ napi_schedule(&queue->napi);
++
+ if (rc) {
+ /* ignore temporary SIGA errors without busy condition */
+ if (rc == -ENOBUFS)
+@@ -3451,48 +3470,12 @@ static void qeth_qdio_output_handler(str
+ int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ buffer = queue->bufs[bidx];
+ qeth_handle_send_error(card, buffer, qdio_error);
+-
+- if (queue->bufstates &&
+- (queue->bufstates[bidx].flags &
+- QDIO_OUTBUF_STATE_FLAG_PENDING) != 0) {
+- WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
+-
+- if (atomic_cmpxchg(&buffer->state,
+- QETH_QDIO_BUF_PRIMED,
+- QETH_QDIO_BUF_PENDING) ==
+- QETH_QDIO_BUF_PRIMED) {
+- qeth_notify_skbs(queue, buffer,
+- TX_NOTIFY_PENDING);
+- }
+- QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx);
+-
+- /* prepare the queue slot for re-use: */
+- qeth_scrub_qdio_buffer(buffer->buffer,
+- queue->max_elements);
+- if (qeth_init_qdio_out_buf(queue, bidx)) {
+- QETH_CARD_TEXT(card, 2, "outofbuf");
+- qeth_schedule_recovery(card);
+- }
+- } else {
+- if (card->options.cq == QETH_CQ_ENABLED) {
+- enum iucv_tx_notify n;
+-
+- n = qeth_compute_cq_notification(
+- buffer->buffer->element[15].sflags, 0);
+- qeth_notify_skbs(queue, buffer, n);
+- }
+-
+- qeth_clear_output_buffer(queue, buffer, qdio_error);
+- }
+- qeth_cleanup_handled_pending(queue, bidx, 0);
++ qeth_clear_output_buffer(queue, buffer, qdio_error);
+ }
++
+ atomic_sub(count, &queue->used_buffers);
+- /* check if we need to do something on this outbound queue */
+- if (!IS_IQD(card))
+- qeth_check_outbound_queue(queue);
++ qeth_check_outbound_queue(queue);
+
+- if (IS_IQD(card))
+- __queue = qeth_iqd_translate_txq(dev, __queue);
+ txq = netdev_get_tx_queue(dev, __queue);
+ /* xmit may have observed the full-condition, but not yet stopped the
+ * txq. In which case the code below won't trigger. So before returning,
+@@ -4757,7 +4740,7 @@ static int qeth_qdio_establish(struct qe
+ init_data.input_sbal_addr_array = in_sbal_ptrs;
+ init_data.output_sbal_addr_array = out_sbal_ptrs;
+ init_data.output_sbal_state_array = card->qdio.out_bufstates;
+- init_data.scan_threshold = IS_IQD(card) ? 1 : 32;
++ init_data.scan_threshold = IS_IQD(card) ? 0 : 32;
+
+ if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
+ QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
+@@ -5171,6 +5154,99 @@ out:
+ }
+ EXPORT_SYMBOL_GPL(qeth_poll);
+
++static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
++ unsigned int bidx, bool error)
++{
++ struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
++ u8 sflags = buffer->buffer->element[15].sflags;
++ struct qeth_card *card = queue->card;
++
++ if (queue->bufstates && (queue->bufstates[bidx].flags &
++ QDIO_OUTBUF_STATE_FLAG_PENDING)) {
++ WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
++
++ if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
++ QETH_QDIO_BUF_PENDING) ==
++ QETH_QDIO_BUF_PRIMED)
++ qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
++
++ QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
++
++ /* prepare the queue slot for re-use: */
++ qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
++ if (qeth_init_qdio_out_buf(queue, bidx)) {
++ QETH_CARD_TEXT(card, 2, "outofbuf");
++ qeth_schedule_recovery(card);
++ }
++
++ return;
++ }
++
++ if (card->options.cq == QETH_CQ_ENABLED)
++ qeth_notify_skbs(queue, buffer,
++ qeth_compute_cq_notification(sflags, 0));
++ qeth_clear_output_buffer(queue, buffer, error);
++}
++
++static int qeth_tx_poll(struct napi_struct *napi, int budget)
++{
++ struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
++ unsigned int queue_no = queue->queue_no;
++ struct qeth_card *card = queue->card;
++ struct net_device *dev = card->dev;
++ unsigned int work_done = 0;
++ struct netdev_queue *txq;
++
++ txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
++
++ while (1) {
++ unsigned int start, error, i;
++ int completed;
++
++ if (qeth_out_queue_is_empty(queue)) {
++ napi_complete(napi);
++ return 0;
++ }
++
++ /* Give the CPU a breather: */
++ if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
++ QETH_TXQ_STAT_INC(queue, completion_yield);
++ if (napi_complete_done(napi, 0))
++ napi_schedule(napi);
++ return 0;
++ }
++
++ completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
++ &start, &error);
++ if (completed <= 0) {
++ /* Ensure we see TX completion for pending work: */
++ if (napi_complete_done(napi, 0))
++ qeth_tx_arm_timer(queue);
++ return 0;
++ }
++
++ for (i = start; i < start + completed; i++) {
++ unsigned int bidx = QDIO_BUFNR(i);
++
++ qeth_handle_send_error(card, queue->bufs[bidx], error);
++ qeth_iqd_tx_complete(queue, bidx, error);
++ qeth_cleanup_handled_pending(queue, bidx, false);
++ }
++
++ atomic_sub(completed, &queue->used_buffers);
++ work_done += completed;
++
++ /* xmit may have observed the full-condition, but not yet
++ * stopped the txq. In which case the code below won't trigger.
++ * So before returning, xmit will re-check the txq's fill level
++ * and wake it up if needed.
++ */
++ if (netif_tx_queue_stopped(txq) &&
++ !qeth_out_queue_is_full(queue))
++ netif_tx_wake_queue(txq);
++ }
++}
++
+ static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
+ {
+ if (!cmd->hdr.return_code)
+@@ -6117,6 +6193,17 @@ int qeth_open(struct net_device *dev)
+ napi_enable(&card->napi);
+ local_bh_disable();
+ napi_schedule(&card->napi);
++ if (IS_IQD(card)) {
++ struct qeth_qdio_out_q *queue;
++ unsigned int i;
++
++ qeth_for_each_output_queue(card, queue, i) {
++ netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
++ QETH_NAPI_WEIGHT);
++ napi_enable(&queue->napi);
++ napi_schedule(&queue->napi);
++ }
++ }
+ /* kick-start the NAPI softirq: */
+ local_bh_enable();
+ return 0;
+@@ -6128,7 +6215,26 @@ int qeth_stop(struct net_device *dev)
+ struct qeth_card *card = dev->ml_priv;
+
+ QETH_CARD_TEXT(card, 4, "qethstop");
+- netif_tx_disable(dev);
++ if (IS_IQD(card)) {
++ struct qeth_qdio_out_q *queue;
++ unsigned int i;
++
++ /* Quiesce the NAPI instances: */
++ qeth_for_each_output_queue(card, queue, i) {
++ napi_disable(&queue->napi);
++ del_timer_sync(&queue->timer);
++ }
++
++ /* Stop .ndo_start_xmit, might still access queue->napi. */
++ netif_tx_disable(dev);
++
++ /* Queues may get re-allocated, so remove the NAPIs here. */
++ qeth_for_each_output_queue(card, queue, i)
++ netif_napi_del(&queue->napi);
++ } else {
++ netif_tx_disable(dev);
++ }
++
+ napi_disable(&card->napi);
+ return 0;
+ }
+--- a/drivers/s390/net/qeth_ethtool.c
++++ b/drivers/s390/net/qeth_ethtool.c
+@@ -39,6 +39,8 @@ static const struct qeth_stats txq_stats
+ QETH_TXQ_STAT("TSO bytes", tso_bytes),
+ QETH_TXQ_STAT("Packing mode switches", packing_mode_switch),
+ QETH_TXQ_STAT("Queue stopped", stopped),
++ QETH_TXQ_STAT("Completion yield", completion_yield),
++ QETH_TXQ_STAT("Completion timer", completion_timer),
+ };
+
+ static const struct qeth_stats card_stats[] = {
diff --git a/patches.suse/s390-qeth-add-xmit_more-support-for-iqd-devices b/patches.suse/s390-qeth-add-xmit_more-support-for-iqd-devices
new file mode 100644
index 0000000000..b613838782
--- /dev/null
+++ b/patches.suse/s390-qeth-add-xmit_more-support-for-iqd-devices
@@ -0,0 +1,282 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:53 +0200
+Subject: s390/qeth: add xmit_more support for IQD devices
+Git-commit: 9549d70a2d71526b8dc41cc0b255219ba46e5bf7
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+IQD devices offer limited support for bulking: all frames in a TX buffer
+need to have the same target. qeth_iqd_may_bulk() implements this
+constraint, and allows us to defer the TX doorbell until
+(a) the buffer is full (since each buffer needs its own doorbell), or
+(b) the entire TX queue is full, or
+(b) we reached the BQL limit.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/net/qeth_core.h | 24 +++++++
+ drivers/s390/net/qeth_core_main.c | 128 +++++++++++++++++++++++++-------------
+ 2 files changed, 109 insertions(+), 43 deletions(-)
+
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -378,6 +378,28 @@ enum qeth_header_ids {
+ #define QETH_HDR_EXT_CSUM_TRANSP_REQ 0x20
+ #define QETH_HDR_EXT_UDP 0x40 /*bit off for TCP*/
+
++static inline bool qeth_l2_same_vlan(struct qeth_hdr_layer2 *h1,
++ struct qeth_hdr_layer2 *h2)
++{
++ return !((h1->flags[2] ^ h2->flags[2]) & QETH_LAYER2_FLAG_VLAN) &&
++ h1->vlan_id == h2->vlan_id;
++}
++
++static inline bool qeth_l3_iqd_same_vlan(struct qeth_hdr_layer3 *h1,
++ struct qeth_hdr_layer3 *h2)
++{
++ return !((h1->ext_flags ^ h2->ext_flags) & QETH_HDR_EXT_VLAN_FRAME) &&
++ h1->vlan_id == h2->vlan_id;
++}
++
++static inline bool qeth_l3_same_next_hop(struct qeth_hdr_layer3 *h1,
++ struct qeth_hdr_layer3 *h2)
++{
++ return !((h1->flags ^ h2->flags) & QETH_HDR_IPV6) &&
++ ipv6_addr_equal(&h1->next_hop.ipv6_addr,
++ &h2->next_hop.ipv6_addr);
++}
++
+ enum qeth_qdio_info_states {
+ QETH_QDIO_UNINITIALIZED,
+ QETH_QDIO_ALLOCATED,
+@@ -508,6 +530,8 @@ struct qeth_qdio_out_q {
+ atomic_t set_pci_flags_count;
+ struct napi_struct napi;
+ struct timer_list timer;
++ struct qeth_hdr *prev_hdr;
++ u8 bulk_start;
+ };
+
+ #define qeth_for_each_output_queue(card, q, i) \
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2669,6 +2669,8 @@ int qeth_init_qdio_queues(struct qeth_ca
+ queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
+ queue->next_buf_to_fill = 0;
+ queue->do_pack = 0;
++ queue->prev_hdr = NULL;
++ queue->bulk_start = 0;
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+@@ -3313,6 +3315,14 @@ static void qeth_flush_buffers(struct qe
+ }
+ }
+
++static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
++{
++ qeth_flush_buffers(queue, queue->bulk_start, 1);
++
++ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
++ queue->prev_hdr = NULL;
++}
++
+ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+ {
+ int index;
+@@ -3668,9 +3678,32 @@ check_layout:
+ return 0;
+ }
+
+-static void __qeth_fill_buffer(struct sk_buff *skb,
+- struct qeth_qdio_out_buffer *buf,
+- bool is_first_elem, unsigned int offset)
++static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
++ struct qeth_qdio_out_buffer *buffer,
++ struct sk_buff *curr_skb,
++ struct qeth_hdr *curr_hdr)
++{
++ struct qeth_hdr *prev_hdr = queue->prev_hdr;
++
++ if (!prev_hdr)
++ return true;
++
++ /* All packets must have the same target: */
++ if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
++ struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
++
++ return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
++ eth_hdr(curr_skb)->h_dest) &&
++ qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
++ }
++
++ return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
++ qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
++}
++
++static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
++ struct qeth_qdio_out_buffer *buf,
++ bool is_first_elem, unsigned int offset)
+ {
+ struct qdio_buffer *buffer = buf->buffer;
+ int element = buf->next_element_to_fill;
+@@ -3727,24 +3760,21 @@ static void __qeth_fill_buffer(struct sk
+ if (buffer->element[element - 1].eflags)
+ buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
+ buf->next_element_to_fill = element;
++ return element;
+ }
+
+ /**
+ * qeth_fill_buffer() - map skb into an output buffer
+- * @queue: QDIO queue to submit the buffer on
+ * @buf: buffer to transport the skb
+ * @skb: skb to map into the buffer
+ * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated
+ * from qeth_core_header_cache.
+ * @offset: when mapping the skb, start at skb->data + offset
+ * @hd_len: if > 0, build a dedicated header element of this size
+- * flush: Prepare the buffer to be flushed, regardless of its fill level.
+ */
+-static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+- struct qeth_qdio_out_buffer *buf,
+- struct sk_buff *skb, struct qeth_hdr *hdr,
+- unsigned int offset, unsigned int hd_len,
+- bool flush)
++static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
++ struct sk_buff *skb, struct qeth_hdr *hdr,
++ unsigned int offset, unsigned int hd_len)
+ {
+ struct qdio_buffer *buffer = buf->buffer;
+ bool is_first_elem = true;
+@@ -3764,36 +3794,22 @@ static int qeth_fill_buffer(struct qeth_
+ buf->next_element_to_fill++;
+ }
+
+- __qeth_fill_buffer(skb, buf, is_first_elem, offset);
+-
+- if (!queue->do_pack) {
+- QETH_CARD_TEXT(queue->card, 6, "fillbfnp");
+- } else {
+- QETH_CARD_TEXT(queue->card, 6, "fillbfpa");
+-
+- QETH_TXQ_STAT_INC(queue, skbs_pack);
+- /* If the buffer still has free elements, keep using it. */
+- if (!flush &&
+- buf->next_element_to_fill < queue->max_elements)
+- return 0;
+- }
+-
+- /* flush out the buffer */
+- atomic_set(&buf->state, QETH_QDIO_BUF_PRIMED);
+- queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
+- QDIO_MAX_BUFFERS_PER_Q;
+- return 1;
++ return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
+ }
+
+-static int qeth_do_send_packet_fast(struct qeth_qdio_out_q *queue,
+- struct sk_buff *skb, struct qeth_hdr *hdr,
+- unsigned int offset, unsigned int hd_len)
++static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
++ struct sk_buff *skb, unsigned int elements,
++ struct qeth_hdr *hdr, unsigned int offset,
++ unsigned int hd_len)
+ {
+- int index = queue->next_buf_to_fill;
+- struct qeth_qdio_out_buffer *buffer = queue->bufs[index];
++ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
+ unsigned int bytes = qdisc_pkt_len(skb);
++ unsigned int next_element;
+ struct netdev_queue *txq;
+ bool stopped = false;
++ bool flush;
++
++ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+ * get a free buffer.
+@@ -3801,9 +3817,19 @@ static int qeth_do_send_packet_fast(stru
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+ return -EBUSY;
+
+- txq = netdev_get_tx_queue(queue->card->dev, skb_get_queue_mapping(skb));
++ if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
++ !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
++ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
++ qeth_flush_queue(queue);
++ buffer = queue->bufs[queue->bulk_start];
+
+- if (atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
++ /* Sanity-check again: */
++ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
++ return -EBUSY;
++ }
++
++ if (buffer->next_element_to_fill == 0 &&
++ atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
+ /* If a TX completion happens right _here_ and misses to wake
+ * the txq, then our re-check below will catch the race.
+ */
+@@ -3812,11 +3838,17 @@ static int qeth_do_send_packet_fast(stru
+ stopped = true;
+ }
+
+- qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len, stopped);
+- netdev_tx_sent_queue(txq, bytes);
++ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
+ buffer->bytes += bytes;
++ queue->prev_hdr = hdr;
+
+- qeth_flush_buffers(queue, index, 1);
++ flush = __netdev_tx_sent_queue(txq, bytes,
++ !stopped && netdev_xmit_more());
++
++ if (flush || next_element >= queue->max_elements) {
++ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
++ qeth_flush_queue(queue);
++ }
+
+ if (stopped && !qeth_out_queue_is_full(queue))
+ netif_tx_start_queue(txq);
+@@ -3829,6 +3861,7 @@ int qeth_do_send_packet(struct qeth_card
+ int elements_needed)
+ {
+ struct qeth_qdio_out_buffer *buffer;
++ unsigned int next_element;
+ struct netdev_queue *txq;
+ bool stopped = false;
+ int start_index;
+@@ -3891,8 +3924,17 @@ int qeth_do_send_packet(struct qeth_card
+ stopped = true;
+ }
+
+- flush_count += qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len,
+- stopped);
++ next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
++
++ if (queue->do_pack)
++ QETH_TXQ_STAT_INC(queue, skbs_pack);
++ if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
++ flush_count++;
++ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
++ queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
++ QDIO_MAX_BUFFERS_PER_Q;
++ }
++
+ if (flush_count)
+ qeth_flush_buffers(queue, start_index, flush_count);
+ else if (!atomic_read(&queue->set_pci_flags_count))
+@@ -3988,8 +4030,8 @@ int qeth_xmit(struct qeth_card *card, st
+ frame_len - proto_len, skb, proto_len);
+
+ if (IS_IQD(card)) {
+- rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+- hd_len);
++ rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
++ hd_len);
+ } else {
+ /* TODO: drop skb_orphan() once TX completion is fast enough */
+ skb_orphan(skb);
diff --git a/patches.suse/s390-qeth-collect-accurate-tx-statistics b/patches.suse/s390-qeth-collect-accurate-tx-statistics
new file mode 100644
index 0000000000..d4dd572e50
--- /dev/null
+++ b/patches.suse/s390-qeth-collect-accurate-tx-statistics
@@ -0,0 +1,265 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:49 +0200
+Subject: s390/qeth: collect accurate TX statistics
+Git-commit: eeac0e20a173dd9407e7092b3ddb45917249d68d
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+This consolidates the SW statistics code, and improves it to
+(1) account for the header overhead of each segment on a TSO skb,
+(2) count dangling packets as in-error (during eg. shutdown), and
+(3) only count offloads when the skb was successfully transmitted.
+
+We also count each segment of an TSO skb as one packet - except for
+tx_dropped, to be consistent with dev->tx_dropped.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/net/qeth_core.h | 1
+ drivers/s390/net/qeth_core_main.c | 66 +++++++++++++++++++++++---------------
+ drivers/s390/net/qeth_l2_main.c | 12 ++----
+ drivers/s390/net/qeth_l3_main.c | 9 +----
+ 4 files changed, 49 insertions(+), 39 deletions(-)
+
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -30,6 +30,7 @@
+ #include <net/ipv6.h>
+ #include <net/if_inet6.h>
+ #include <net/addrconf.h>
++#include <net/sch_generic.h>
+ #include <net/tcp.h>
+
+ #include <asm/debug.h>
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -70,7 +70,7 @@ static void qeth_free_qdio_queues(struct
+ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification);
+-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
++static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error);
+ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+ static void qeth_close_dev_handler(struct work_struct *work)
+@@ -410,7 +410,7 @@ static void qeth_cleanup_handled_pending
+ /* release here to avoid interleaving between
+ outbound tasklet and inbound tasklet
+ regarding notifications and lifecycle */
+- qeth_release_skbs(c);
++ qeth_tx_complete_buf(c, forced_cleanup);
+
+ c = f->next_pending;
+ WARN_ON_ONCE(head->next_pending != f);
+@@ -1094,22 +1094,51 @@ static void qeth_notify_skbs(struct qeth
+ }
+ }
+
+-static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
++static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error)
+ {
++ struct qeth_qdio_out_q *queue = buf->q;
+ struct sk_buff *skb;
+
+ /* release may never happen from within CQ tasklet scope */
+ WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+- qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
++ qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
++
++ /* Empty buffer? */
++ if (buf->next_element_to_fill == 0)
++ return;
++
++ QETH_TXQ_STAT_INC(queue, bufs);
++ QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
++ while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
++ unsigned int bytes = qdisc_pkt_len(skb);
++ bool is_tso = skb_is_gso(skb);
++ unsigned int packets;
++
++ packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
++ if (error) {
++ QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
++ } else {
++ QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
++ QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
++ QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
++ if (skb_is_nonlinear(skb))
++ QETH_TXQ_STAT_INC(queue, skbs_sg);
++ if (is_tso) {
++ QETH_TXQ_STAT_INC(queue, skbs_tso);
++ QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
++ }
++ }
+
+- while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+ consume_skb(skb);
++ }
+ }
+
+ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+- struct qeth_qdio_out_buffer *buf)
++ struct qeth_qdio_out_buffer *buf,
++ bool error)
+ {
+ int i;
+
+@@ -1117,7 +1146,7 @@ static void qeth_clear_output_buffer(str
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ atomic_dec(&queue->set_pci_flags_count);
+
+- qeth_release_skbs(buf);
++ qeth_tx_complete_buf(buf, error);
+
+ for (i = 0; i < queue->max_elements; ++i) {
+ if (buf->buffer->element[i].addr && buf->is_header[i])
+@@ -1139,7 +1168,7 @@ static void qeth_drain_output_queue(stru
+ if (!q->bufs[j])
+ continue;
+ qeth_cleanup_handled_pending(q, j, 1);
+- qeth_clear_output_buffer(q, q->bufs[j]);
++ qeth_clear_output_buffer(q, q->bufs[j], true);
+ if (free) {
+ kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+ q->bufs[j] = NULL;
+@@ -3239,14 +3268,12 @@ static void qeth_flush_buffers(struct qe
+ }
+ }
+
+- QETH_TXQ_STAT_ADD(queue, bufs, count);
+ qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
+ if (atomic_read(&queue->set_pci_flags_count))
+ qdio_flags |= QDIO_FLAG_PCI_OUT;
+ rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
+ queue->queue_no, index, count);
+ if (rc) {
+- QETH_TXQ_STAT_ADD(queue, tx_errors, count);
+ /* ignore temporary SIGA errors without busy condition */
+ if (rc == -ENOBUFS)
+ return;
+@@ -3455,7 +3482,7 @@ static void qeth_qdio_output_handler(str
+ qeth_notify_skbs(queue, buffer, n);
+ }
+
+- qeth_clear_output_buffer(queue, buffer);
++ qeth_clear_output_buffer(queue, buffer, qdio_error);
+ }
+ qeth_cleanup_handled_pending(queue, bidx, 0);
+ }
+@@ -3941,7 +3968,6 @@ int qeth_xmit(struct qeth_card *card, st
+ unsigned int hd_len = 0;
+ unsigned int elements;
+ int push_len, rc;
+- bool is_sg;
+
+ if (is_tso) {
+ hw_hdr_len = sizeof(struct qeth_hdr_tso);
+@@ -3970,7 +3996,6 @@ int qeth_xmit(struct qeth_card *card, st
+ qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
+ frame_len - proto_len, skb, proto_len);
+
+- is_sg = skb_is_nonlinear(skb);
+ if (IS_IQD(card)) {
+ rc = qeth_do_send_packet_fast(queue, skb, hdr, data_offset,
+ hd_len);
+@@ -3981,18 +4006,9 @@ int qeth_xmit(struct qeth_card *card, st
+ hd_len, elements);
+ }
+
+- if (!rc) {
+- QETH_TXQ_STAT_ADD(queue, buf_elements, elements);
+- if (is_sg)
+- QETH_TXQ_STAT_INC(queue, skbs_sg);
+- if (is_tso) {
+- QETH_TXQ_STAT_INC(queue, skbs_tso);
+- QETH_TXQ_STAT_ADD(queue, tso_bytes, frame_len);
+- }
+- } else {
+- if (!push_len)
+- kmem_cache_free(qeth_core_header_cache, hdr);
+- }
++ if (rc && !push_len)
++ kmem_cache_free(qeth_core_header_cache, hdr);
++
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(qeth_xmit);
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -175,10 +175,8 @@ static void qeth_l2_fill_header(struct q
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_L2_TSO;
+ } else {
+ hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2;
+- if (skb->ip_summed == CHECKSUM_PARTIAL) {
++ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ qeth_tx_csum(skb, &hdr->hdr.l2.flags[1], ipv);
+- QETH_TXQ_STAT_INC(queue, skbs_csum);
+- }
+ }
+
+ /* set byte byte 3 to casting flags */
+@@ -588,9 +586,10 @@ static netdev_tx_t qeth_l2_hard_start_xm
+ struct qeth_card *card = dev->ml_priv;
+ u16 txq = skb_get_queue_mapping(skb);
+ struct qeth_qdio_out_q *queue;
+- int tx_bytes = skb->len;
+ int rc;
+
++ if (!skb_is_gso(skb))
++ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ if (IS_IQD(card))
+ txq = qeth_iqd_translate_txq(dev, txq);
+ queue = card->qdio.out_qs[txq];
+@@ -601,11 +600,8 @@ static netdev_tx_t qeth_l2_hard_start_xm
+ rc = qeth_xmit(card, skb, queue, qeth_get_ip_version(skb),
+ qeth_l2_fill_header);
+
+- if (!rc) {
+- QETH_TXQ_STAT_INC(queue, tx_packets);
+- QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
++ if (!rc)
+ return NETDEV_TX_OK;
+- }
+
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
+ kfree_skb(skb);
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -1967,7 +1967,6 @@ static void qeth_l3_fill_header(struct q
+ /* some HW requires combined L3+L4 csum offload: */
+ if (ipv == 4)
+ hdr->hdr.l3.ext_flags |= QETH_HDR_EXT_CSUM_HDR_REQ;
+- QETH_TXQ_STAT_INC(queue, skbs_csum);
+ }
+ }
+
+@@ -2054,9 +2053,10 @@ static netdev_tx_t qeth_l3_hard_start_xm
+ u16 txq = skb_get_queue_mapping(skb);
+ int ipv = qeth_get_ip_version(skb);
+ struct qeth_qdio_out_q *queue;
+- int tx_bytes = skb->len;
+ int rc;
+
++ if (!skb_is_gso(skb))
++ qdisc_skb_cb(skb)->pkt_len = skb->len;
+ if (IS_IQD(card)) {
+ queue = card->qdio.out_qs[qeth_iqd_translate_txq(dev, txq)];
+
+@@ -2079,11 +2079,8 @@ static netdev_tx_t qeth_l3_hard_start_xm
+ else
+ rc = qeth_xmit(card, skb, queue, ipv, qeth_l3_fill_header);
+
+- if (!rc) {
+- QETH_TXQ_STAT_INC(queue, tx_packets);
+- QETH_TXQ_STAT_ADD(queue, tx_bytes, tx_bytes);
++ if (!rc)
+ return NETDEV_TX_OK;
+- }
+
+ tx_drop:
+ QETH_TXQ_STAT_INC(queue, tx_dropped);
diff --git a/patches.suse/s390-qeth-use-iqd-multi-write b/patches.suse/s390-qeth-use-iqd-multi-write
new file mode 100644
index 0000000000..379f0ced19
--- /dev/null
+++ b/patches.suse/s390-qeth-use-iqd-multi-write
@@ -0,0 +1,161 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Thu, 31 Oct 2019 13:42:15 +0100
+Subject: s390/qeth: use IQD Multi-Write
+Git-commit: 8b664cd127a1e3777e23c8aaa96ba52ef741bb55
+Patch-mainline: v5.5-rc1
+References: jsc#SLE-7795 LTC#179220
+
+For IQD devices with Multi-Write support, we can defer the queue-flush
+further and transmit multiple IO buffers with a single TX doorbell.
+The same-target restriction still applies.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/net/qeth_core.h | 9 ++++++
+ drivers/s390/net/qeth_core_main.c | 54 ++++++++++++++++++++++++++++++--------
+ 2 files changed, 53 insertions(+), 10 deletions(-)
+
+--- a/drivers/s390/net/qeth_core.h
++++ b/drivers/s390/net/qeth_core.h
+@@ -532,6 +532,8 @@ struct qeth_qdio_out_q {
+ struct timer_list timer;
+ struct qeth_hdr *prev_hdr;
+ u8 bulk_start;
++ u8 bulk_count;
++ u8 bulk_max;
+ };
+
+ #define qeth_for_each_output_queue(card, q, i) \
+@@ -880,6 +882,13 @@ static inline u16 qeth_iqd_translate_txq
+ return txq;
+ }
+
++static inline bool qeth_iqd_is_mcast_queue(struct qeth_card *card,
++ struct qeth_qdio_out_q *queue)
++{
++ return qeth_iqd_translate_txq(card->dev, queue->queue_no) ==
++ QETH_IQD_MCAST_TXQ;
++}
++
+ static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+ unsigned int elements)
+ {
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2632,6 +2632,18 @@ static int qeth_init_input_buffer(struct
+ return 0;
+ }
+
++static unsigned int qeth_tx_select_bulk_max(struct qeth_card *card,
++ struct qeth_qdio_out_q *queue)
++{
++ if (!IS_IQD(card) ||
++ qeth_iqd_is_mcast_queue(card, queue) ||
++ card->options.cq == QETH_CQ_ENABLED ||
++ qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd))
++ return 1;
++
++ return card->ssqd.mmwc ? card->ssqd.mmwc : 1;
++}
++
+ int qeth_init_qdio_queues(struct qeth_card *card)
+ {
+ unsigned int i;
+@@ -2671,6 +2683,8 @@ int qeth_init_qdio_queues(struct qeth_ca
+ queue->do_pack = 0;
+ queue->prev_hdr = NULL;
+ queue->bulk_start = 0;
++ queue->bulk_count = 0;
++ queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
+ atomic_set(&queue->used_buffers, 0);
+ atomic_set(&queue->set_pci_flags_count, 0);
+ atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+@@ -3317,10 +3331,11 @@ static void qeth_flush_buffers(struct qe
+
+ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
+ {
+- qeth_flush_buffers(queue, queue->bulk_start, 1);
++ qeth_flush_buffers(queue, queue->bulk_start, queue->bulk_count);
+
+- queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
++ queue->bulk_start = QDIO_BUFNR(queue->bulk_start + queue->bulk_count);
+ queue->prev_hdr = NULL;
++ queue->bulk_count = 0;
+ }
+
+ static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
+@@ -3679,10 +3694,10 @@ check_layout:
+ }
+
+ static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
+- struct qeth_qdio_out_buffer *buffer,
+ struct sk_buff *curr_skb,
+ struct qeth_hdr *curr_hdr)
+ {
++ struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
+ struct qeth_hdr *prev_hdr = queue->prev_hdr;
+
+ if (!prev_hdr)
+@@ -3802,13 +3817,14 @@ static int __qeth_xmit(struct qeth_card
+ struct qeth_hdr *hdr, unsigned int offset,
+ unsigned int hd_len)
+ {
+- struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
+ unsigned int bytes = qdisc_pkt_len(skb);
++ struct qeth_qdio_out_buffer *buffer;
+ unsigned int next_element;
+ struct netdev_queue *txq;
+ bool stopped = false;
+ bool flush;
+
++ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start + queue->bulk_count)];
+ txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
+
+ /* Just a sanity check, the wake/stop logic should ensure that we always
+@@ -3817,11 +3833,23 @@ static int __qeth_xmit(struct qeth_card
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+ return -EBUSY;
+
+- if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
+- !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
+- atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+- qeth_flush_queue(queue);
+- buffer = queue->bufs[queue->bulk_start];
++ flush = !qeth_iqd_may_bulk(queue, skb, hdr);
++
++ if (flush ||
++ (buffer->next_element_to_fill + elements > queue->max_elements)) {
++ if (buffer->next_element_to_fill > 0) {
++ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
++ queue->bulk_count++;
++ }
++
++ if (queue->bulk_count >= queue->bulk_max)
++ flush = true;
++
++ if (flush)
++ qeth_flush_queue(queue);
++
++ buffer = queue->bufs[QDIO_BUFNR(queue->bulk_start +
++ queue->bulk_count)];
+
+ /* Sanity-check again: */
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
+@@ -3847,7 +3875,13 @@ static int __qeth_xmit(struct qeth_card
+
+ if (flush || next_element >= queue->max_elements) {
+ atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
+- qeth_flush_queue(queue);
++ queue->bulk_count++;
++
++ if (queue->bulk_count >= queue->bulk_max)
++ flush = true;
++
++ if (flush)
++ qeth_flush_queue(queue);
+ }
+
+ if (stopped && !qeth_out_queue_is_full(queue))
diff --git a/patches.suse/s390-qeth-when-in-tx-napi-mode-use-napi_consume_skb b/patches.suse/s390-qeth-when-in-tx-napi-mode-use-napi_consume_skb
new file mode 100644
index 0000000000..f5e7cb3ad1
--- /dev/null
+++ b/patches.suse/s390-qeth-when-in-tx-napi-mode-use-napi_consume_skb
@@ -0,0 +1,117 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Fri, 23 Aug 2019 11:48:51 +0200
+Subject: s390/qeth: when in TX NAPI mode, use napi_consume_skb()
+Git-commit: 85e537d8f1b6b1201ced628b124b3d08436f5a04
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7795 LTC#179220
+
+This allows the stack to bulk-free our TX-completed skbs.
+
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/net/qeth_core_main.c | 24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -70,7 +70,8 @@ static void qeth_free_qdio_queues(struct
+ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+ enum iucv_tx_notify notification);
+-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error);
++static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
++ int budget);
+ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
+
+ static void qeth_close_dev_handler(struct work_struct *work)
+@@ -410,7 +411,7 @@ static void qeth_cleanup_handled_pending
+ /* release here to avoid interleaving between
+ outbound tasklet and inbound tasklet
+ regarding notifications and lifecycle */
+- qeth_tx_complete_buf(c, forced_cleanup);
++ qeth_tx_complete_buf(c, forced_cleanup, 0);
+
+ c = f->next_pending;
+ WARN_ON_ONCE(head->next_pending != f);
+@@ -1094,7 +1095,8 @@ static void qeth_notify_skbs(struct qeth
+ }
+ }
+
+-static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error)
++static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
++ int budget)
+ {
+ struct qeth_qdio_out_q *queue = buf->q;
+ struct sk_buff *skb;
+@@ -1132,13 +1134,13 @@ static void qeth_tx_complete_buf(struct
+ }
+ }
+
+- consume_skb(skb);
++ napi_consume_skb(skb, budget);
+ }
+ }
+
+ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+ struct qeth_qdio_out_buffer *buf,
+- bool error)
++ bool error, int budget)
+ {
+ int i;
+
+@@ -1146,7 +1148,7 @@ static void qeth_clear_output_buffer(str
+ if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
+ atomic_dec(&queue->set_pci_flags_count);
+
+- qeth_tx_complete_buf(buf, error);
++ qeth_tx_complete_buf(buf, error, budget);
+
+ for (i = 0; i < queue->max_elements; ++i) {
+ if (buf->buffer->element[i].addr && buf->is_header[i])
+@@ -1168,7 +1170,7 @@ static void qeth_drain_output_queue(stru
+ if (!q->bufs[j])
+ continue;
+ qeth_cleanup_handled_pending(q, j, 1);
+- qeth_clear_output_buffer(q, q->bufs[j], true);
++ qeth_clear_output_buffer(q, q->bufs[j], true, 0);
+ if (free) {
+ kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
+ q->bufs[j] = NULL;
+@@ -3470,7 +3472,7 @@ static void qeth_qdio_output_handler(str
+ int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
+ buffer = queue->bufs[bidx];
+ qeth_handle_send_error(card, buffer, qdio_error);
+- qeth_clear_output_buffer(queue, buffer, qdio_error);
++ qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
+ }
+
+ atomic_sub(count, &queue->used_buffers);
+@@ -5155,7 +5157,7 @@ out:
+ EXPORT_SYMBOL_GPL(qeth_poll);
+
+ static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
+- unsigned int bidx, bool error)
++ unsigned int bidx, bool error, int budget)
+ {
+ struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
+ u8 sflags = buffer->buffer->element[15].sflags;
+@@ -5185,7 +5187,7 @@ static void qeth_iqd_tx_complete(struct
+ if (card->options.cq == QETH_CQ_ENABLED)
+ qeth_notify_skbs(queue, buffer,
+ qeth_compute_cq_notification(sflags, 0));
+- qeth_clear_output_buffer(queue, buffer, error);
++ qeth_clear_output_buffer(queue, buffer, error, budget);
+ }
+
+ static int qeth_tx_poll(struct napi_struct *napi, int budget)
+@@ -5229,7 +5231,7 @@ static int qeth_tx_poll(struct napi_stru
+ unsigned int bidx = QDIO_BUFNR(i);
+
+ qeth_handle_send_error(card, queue->bufs[bidx], error);
+- qeth_iqd_tx_complete(queue, bidx, error);
++ qeth_iqd_tx_complete(queue, bidx, error, budget);
+ qeth_cleanup_handled_pending(queue, bidx, false);
+ }
+
diff --git a/patches.suse/s390-zcrypt-add-low-level-functions-for-cca-aes-cipher-keys b/patches.suse/s390-zcrypt-add-low-level-functions-for-cca-aes-cipher-keys
new file mode 100644
index 0000000000..889a43fe84
--- /dev/null
+++ b/patches.suse/s390-zcrypt-add-low-level-functions-for-cca-aes-cipher-keys
@@ -0,0 +1,1028 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Wed, 3 Jul 2019 13:16:51 +0200
+Subject: s390/zcrypt: Add low level functions for CCA AES cipher keys
+Git-commit: 4bc123b18ce6ae6c42c69d0456b5acbd2f7bc8bd
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+This patch adds low level functions, structs and defines to support
+CCA AES cipher keys:
+- struct cipherkeytoken can be used for an inside view of the CCA AES
+ cipher key token blob.
+- function cca_cipher2protkey() derives an CPACF protected key from an
+ CCA AES cipher key.
+- function cca_gencipherkey() generates an CCA AES cipher key with
+ random value.
+- function cca_findcard2() constructs a list of apqns based on input
+ constrains like min hardware type, mkvp values.
+- cca_check_secaescipherkey() does a check on the given CCA AES cipher
+ key blob.
+- cca_clr2cipherkey() generates an CCA AES cipher key from a given
+ clear key value.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/zcrypt_ccamisc.c | 800 ++++++++++++++++++++++++++++++++++-
+ drivers/s390/crypto/zcrypt_ccamisc.h | 107 ++++
+ 2 files changed, 903 insertions(+), 4 deletions(-)
+
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -13,6 +13,7 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/random.h>
+ #include <asm/zcrypt.h>
+ #include <asm/pkey.h>
+
+@@ -45,13 +46,12 @@ static LIST_HEAD(cca_info_list);
+ static DEFINE_SPINLOCK(cca_info_list_lock);
+
+ /*
+- * Simple check if the token is a valid CCA secure AES key
++ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+ * also checked. Returns 0 on success or errno value on failure.
+ */
+ int cca_check_secaeskeytoken(debug_info_t *dbg, int dbflvl,
+ const u8 *token, int keybitsize)
+-
+ {
+ struct secaeskeytoken *t = (struct secaeskeytoken *) token;
+
+@@ -83,6 +83,96 @@ int cca_check_secaeskeytoken(debug_info_
+ EXPORT_SYMBOL(cca_check_secaeskeytoken);
+
+ /*
++ * Simple check if the token is a valid CCA secure AES cipher key
++ * token. If keybitsize is given, the bitsize of the key is
++ * also checked. If checkcpacfexport is enabled, the key is also
++ * checked for the export flag to allow CPACF export.
++ * Returns 0 on success or errno value on failure.
++ */
++int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
++ const u8 *token, int keybitsize,
++ int checkcpacfexport)
++{
++ struct cipherkeytoken *t = (struct cipherkeytoken *) token;
++ bool keybitsizeok = true;
++
++#define DBF(...) debug_sprintf_event(dbg, dbflvl, ##__VA_ARGS__)
++
++ if (t->type != TOKTYPE_CCA_INTERNAL) {
++ if (dbg)
++ DBF("%s token check failed, type 0x%02x != 0x%02x\n",
++ __func__, (int) t->type, TOKTYPE_CCA_INTERNAL);
++ return -EINVAL;
++ }
++ if (t->version != TOKVER_CCA_VLSC) {
++ if (dbg)
++ DBF("%s token check failed, version 0x%02x != 0x%02x\n",
++ __func__, (int) t->version, TOKVER_CCA_VLSC);
++ return -EINVAL;
++ }
++ if (t->algtype != 0x02) {
++ if (dbg)
++ DBF("%s token check failed, algtype 0x%02x != 0x02\n",
++ __func__, (int) t->algtype);
++ return -EINVAL;
++ }
++ if (t->keytype != 0x0001) {
++ if (dbg)
++ DBF("%s token check failed, keytype 0x%04x != 0x0001\n",
++ __func__, (int) t->keytype);
++ return -EINVAL;
++ }
++ if (t->plfver != 0x00 && t->plfver != 0x01) {
++ if (dbg)
++ DBF("%s token check failed, unknown plfver 0x%02x\n",
++ __func__, (int) t->plfver);
++ return -EINVAL;
++ }
++ if (t->wpllen != 512 && t->wpllen != 576 && t->wpllen != 640) {
++ if (dbg)
++ DBF("%s token check failed, unknown wpllen %d\n",
++ __func__, (int) t->wpllen);
++ return -EINVAL;
++ }
++ if (keybitsize > 0) {
++ switch (keybitsize) {
++ case 128:
++ if (t->wpllen != (t->plfver ? 640 : 512))
++ keybitsizeok = false;
++ break;
++ case 192:
++ if (t->wpllen != (t->plfver ? 640 : 576))
++ keybitsizeok = false;
++ break;
++ case 256:
++ if (t->wpllen != 640)
++ keybitsizeok = false;
++ break;
++ default:
++ keybitsizeok = false;
++ break;
++ }
++ if (!keybitsizeok) {
++ if (dbg)
++ DBF("%s token check failed, bitsize %d\n",
++ __func__, keybitsize);
++ return -EINVAL;
++ }
++ }
++ if (checkcpacfexport && !(t->kmf1 & KMF1_XPRT_CPAC)) {
++ if (dbg)
++ DBF("%s token check failed, XPRT_CPAC bit is 0\n",
++ __func__);
++ return -EINVAL;
++ }
++
++#undef DBF
++
++ return 0;
++}
++EXPORT_SYMBOL(cca_check_secaescipherkey);
++
++/*
+ * Allocate consecutive memory for request CPRB, request param
+ * block, reply CPRB and reply param block and fill in values
+ * for the common fields. Returns 0 on success or errno value
+@@ -441,7 +531,8 @@ int cca_clr2seckey(u16 cardnr, u16 domai
+ }
+
+ /* copy the generated secure key token */
+- memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
++ if (seckey)
++ memcpy(seckey, prepparm->lv3.keyblock.tok, SECKEYBLOBSIZE);
+
+ out:
+ free_cprbmem(mem, PARMBSIZE, 1);
+@@ -595,6 +686,623 @@ out:
+ EXPORT_SYMBOL(cca_sec2protkey);
+
+ /*
++ * AES cipher key skeleton created with CSNBKTB2 with these flags:
++ * INTERNAL, NO-KEY, AES, CIPHER, ANY-MODE, NOEX-SYM, NOEXAASY,
++ * NOEXUASY, XPRTCPAC, NOEX-RAW, NOEX-DES, NOEX-AES, NOEX-RSA
++ * used by cca_gencipherkey() and cca_clr2cipherkey().
++ */
++static const u8 aes_cipher_key_skeleton[] = {
++ 0x01, 0x00, 0x00, 0x38, 0x05, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00,
++ 0x00, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x00, 0x02, 0x00, 0x01, 0x02, 0xc0, 0x00, 0xff,
++ 0x00, 0x03, 0x08, 0xc8, 0x00, 0x00, 0x00, 0x00 };
++#define SIZEOF_SKELETON (sizeof(aes_cipher_key_skeleton))
++
++/*
++ * Generate (random) CCA AES CIPHER secure key.
++ */
++int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
++ u8 *keybuf, size_t *keybufsize)
++{
++ int rc;
++ u8 *mem;
++ struct CPRBX *preqcblk, *prepcblk;
++ struct ica_xcRB xcrb;
++ struct gkreqparm {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ char rule_array[2*8];
++ struct {
++ u16 len;
++ u8 key_type_1[8];
++ u8 key_type_2[8];
++ u16 clear_key_bit_len;
++ u16 key_name_1_len;
++ u16 key_name_2_len;
++ u16 user_data_1_len;
++ u16 user_data_2_len;
++ u8 key_name_1[0];
++ u8 key_name_2[0];
++ u8 user_data_1[0];
++ u8 user_data_2[0];
++ } vud;
++ struct {
++ u16 len;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 kek_id_1[0];
++ } tlv1;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 kek_id_2[0];
++ } tlv2;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 gen_key_id_1[SIZEOF_SKELETON];
++ } tlv3;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 gen_key_id_1_label[0];
++ } tlv4;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 gen_key_id_2[0];
++ } tlv5;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 gen_key_id_2_label[0];
++ } tlv6;
++ } kb;
++ } __packed * preqparm;
++ struct gkrepparm {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ struct {
++ u16 len;
++ } vud;
++ struct {
++ u16 len;
++ struct {
++ u16 len;
++ u16 flag;
++ u8 gen_key[0]; /* 120-136 bytes */
++ } tlv1;
++ } kb;
++ } __packed * prepparm;
++ struct cipherkeytoken *t;
++
++ /* get already prepared memory for 2 cprbs with param block each */
++ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
++ if (rc)
++ return rc;
++
++ /* fill request cprb struct */
++ preqcblk->domain = domain;
++ preqcblk->req_parml = sizeof(struct gkreqparm);
++
++ /* prepare request param block with GK request */
++ preqparm = (struct gkreqparm *) preqcblk->req_parmb;
++ memcpy(preqparm->subfunc_code, "GK", 2);
++ preqparm->rule_array_len = sizeof(uint16_t) + 2 * 8;
++ memcpy(preqparm->rule_array, "AES OP ", 2*8);
++
++ /* prepare vud block */
++ preqparm->vud.len = sizeof(preqparm->vud);
++ switch (keybitsize) {
++ case 128:
++ case 192:
++ case 256:
++ break;
++ default:
++ DEBUG_ERR(
++ "%s unknown/unsupported keybitsize %d\n",
++ __func__, keybitsize);
++ rc = -EINVAL;
++ goto out;
++ }
++ preqparm->vud.clear_key_bit_len = keybitsize;
++ memcpy(preqparm->vud.key_type_1, "TOKEN ", 8);
++ memset(preqparm->vud.key_type_2, ' ', sizeof(preqparm->vud.key_type_2));
++
++ /* prepare kb block */
++ preqparm->kb.len = sizeof(preqparm->kb);
++ preqparm->kb.tlv1.len = sizeof(preqparm->kb.tlv1);
++ preqparm->kb.tlv1.flag = 0x0030;
++ preqparm->kb.tlv2.len = sizeof(preqparm->kb.tlv2);
++ preqparm->kb.tlv2.flag = 0x0030;
++ preqparm->kb.tlv3.len = sizeof(preqparm->kb.tlv3);
++ preqparm->kb.tlv3.flag = 0x0030;
++ memcpy(preqparm->kb.tlv3.gen_key_id_1,
++ aes_cipher_key_skeleton, SIZEOF_SKELETON);
++ preqparm->kb.tlv4.len = sizeof(preqparm->kb.tlv4);
++ preqparm->kb.tlv4.flag = 0x0030;
++ preqparm->kb.tlv5.len = sizeof(preqparm->kb.tlv5);
++ preqparm->kb.tlv5.flag = 0x0030;
++ preqparm->kb.tlv6.len = sizeof(preqparm->kb.tlv6);
++ preqparm->kb.tlv6.flag = 0x0030;
++
++ /* patch the skeleton key token export flags inside the kb block */
++ if (keygenflags) {
++ t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
++ t->kmf1 |= (u16) (keygenflags & 0x0000FFFF);
++ }
++
++ /* prepare xcrb struct */
++ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
++
++ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
++ rc = _zcrypt_send_cprb(&xcrb);
++ if (rc) {
++ DEBUG_ERR(
++ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
++ __func__, (int) cardnr, (int) domain, rc);
++ goto out;
++ }
++
++ /* check response returncode and reasoncode */
++ if (prepcblk->ccp_rtcode != 0) {
++ DEBUG_ERR(
++ "%s cipher key generate failure, card response %d/%d\n",
++ __func__,
++ (int) prepcblk->ccp_rtcode,
++ (int) prepcblk->ccp_rscode);
++ rc = -EIO;
++ goto out;
++ }
++
++ /* process response cprb param block */
++ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
++ prepparm = (struct gkrepparm *) prepcblk->rpl_parmb;
++
++ /* do some plausibility checks on the key block */
++ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
++ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
++ DEBUG_ERR("%s reply with invalid or unknown key block\n",
++ __func__);
++ rc = -EIO;
++ goto out;
++ }
++
++ /* and some checks on the generated key */
++ rc = cca_check_secaescipherkey(zcrypt_dbf_info, DBF_ERR,
++ prepparm->kb.tlv1.gen_key,
++ keybitsize, 1);
++ if (rc) {
++ rc = -EIO;
++ goto out;
++ }
++
++ /* copy the generated vlsc key token */
++ t = (struct cipherkeytoken *) prepparm->kb.tlv1.gen_key;
++ if (keybuf) {
++ if (*keybufsize >= t->len)
++ memcpy(keybuf, t, t->len);
++ else
++ rc = -EINVAL;
++ }
++ *keybufsize = t->len;
++
++out:
++ free_cprbmem(mem, PARMBSIZE, 0);
++ return rc;
++}
++EXPORT_SYMBOL(cca_gencipherkey);
++
++/*
++ * Helper function, does a the CSNBKPI2 CPRB.
++ */
++static int _ip_cprb_helper(u16 cardnr, u16 domain,
++ const char *rule_array_1,
++ const char *rule_array_2,
++ const char *rule_array_3,
++ const u8 *clr_key_value,
++ int clr_key_bit_size,
++ u8 *key_token,
++ int *key_token_size)
++{
++ int rc, n;
++ u8 *mem;
++ struct CPRBX *preqcblk, *prepcblk;
++ struct ica_xcRB xcrb;
++ struct rule_array_block {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ char rule_array[0];
++ } __packed * preq_ra_block;
++ struct vud_block {
++ u16 len;
++ struct {
++ u16 len;
++ u16 flag; /* 0x0064 */
++ u16 clr_key_bit_len;
++ } tlv1;
++ struct {
++ u16 len;
++ u16 flag; /* 0x0063 */
++ u8 clr_key[0]; /* clear key value bytes */
++ } tlv2;
++ } __packed * preq_vud_block;
++ struct key_block {
++ u16 len;
++ struct {
++ u16 len;
++ u16 flag; /* 0x0030 */
++ u8 key_token[0]; /* key skeleton */
++ } tlv1;
++ } __packed * preq_key_block;
++ struct iprepparm {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ struct {
++ u16 len;
++ } vud;
++ struct {
++ u16 len;
++ struct {
++ u16 len;
++ u16 flag; /* 0x0030 */
++ u8 key_token[0]; /* key token */
++ } tlv1;
++ } kb;
++ } __packed * prepparm;
++ struct cipherkeytoken *t;
++ int complete = strncmp(rule_array_2, "COMPLETE", 8) ? 0 : 1;
++
++ /* get already prepared memory for 2 cprbs with param block each */
++ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
++ if (rc)
++ return rc;
++
++ /* fill request cprb struct */
++ preqcblk->domain = domain;
++ preqcblk->req_parml = 0;
++
++ /* prepare request param block with IP request */
++ preq_ra_block = (struct rule_array_block *) preqcblk->req_parmb;
++ memcpy(preq_ra_block->subfunc_code, "IP", 2);
++ preq_ra_block->rule_array_len = sizeof(uint16_t) + 2 * 8;
++ memcpy(preq_ra_block->rule_array, rule_array_1, 8);
++ memcpy(preq_ra_block->rule_array + 8, rule_array_2, 8);
++ preqcblk->req_parml = sizeof(struct rule_array_block) + 2 * 8;
++ if (rule_array_3) {
++ preq_ra_block->rule_array_len += 8;
++ memcpy(preq_ra_block->rule_array + 16, rule_array_3, 8);
++ preqcblk->req_parml += 8;
++ }
++
++ /* prepare vud block */
++ preq_vud_block = (struct vud_block *)
++ (preqcblk->req_parmb + preqcblk->req_parml);
++ n = complete ? 0 : (clr_key_bit_size + 7) / 8;
++ preq_vud_block->len = sizeof(struct vud_block) + n;
++ preq_vud_block->tlv1.len = sizeof(preq_vud_block->tlv1);
++ preq_vud_block->tlv1.flag = 0x0064;
++ preq_vud_block->tlv1.clr_key_bit_len = complete ? 0 : clr_key_bit_size;
++ preq_vud_block->tlv2.len = sizeof(preq_vud_block->tlv2) + n;
++ preq_vud_block->tlv2.flag = 0x0063;
++ if (!complete)
++ memcpy(preq_vud_block->tlv2.clr_key, clr_key_value, n);
++ preqcblk->req_parml += preq_vud_block->len;
++
++ /* prepare key block */
++ preq_key_block = (struct key_block *)
++ (preqcblk->req_parmb + preqcblk->req_parml);
++ n = *key_token_size;
++ preq_key_block->len = sizeof(struct key_block) + n;
++ preq_key_block->tlv1.len = sizeof(preq_key_block->tlv1) + n;
++ preq_key_block->tlv1.flag = 0x0030;
++ memcpy(preq_key_block->tlv1.key_token, key_token, *key_token_size);
++ preqcblk->req_parml += preq_key_block->len;
++
++ /* prepare xcrb struct */
++ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
++
++ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
++ rc = _zcrypt_send_cprb(&xcrb);
++ if (rc) {
++ DEBUG_ERR(
++ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
++ __func__, (int) cardnr, (int) domain, rc);
++ goto out;
++ }
++
++ /* check response returncode and reasoncode */
++ if (prepcblk->ccp_rtcode != 0) {
++ DEBUG_ERR(
++ "%s CSNBKPI2 failure, card response %d/%d\n",
++ __func__,
++ (int) prepcblk->ccp_rtcode,
++ (int) prepcblk->ccp_rscode);
++ rc = -EIO;
++ goto out;
++ }
++
++ /* process response cprb param block */
++ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
++ prepparm = (struct iprepparm *) prepcblk->rpl_parmb;
++
++ /* do some plausibility checks on the key block */
++ if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
++ prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
++ DEBUG_ERR("%s reply with invalid or unknown key block\n",
++ __func__);
++ rc = -EIO;
++ goto out;
++ }
++
++ /* do not check the key here, it may be incomplete */
++
++ /* copy the vlsc key token back */
++ t = (struct cipherkeytoken *) prepparm->kb.tlv1.key_token;
++ memcpy(key_token, t, t->len);
++ *key_token_size = t->len;
++
++out:
++ free_cprbmem(mem, PARMBSIZE, 0);
++ return rc;
++}
++
++/*
++ * Build CCA AES CIPHER secure key with a given clear key value.
++ */
++int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
++ const u8 *clrkey, u8 *keybuf, size_t *keybufsize)
++{
++ int rc;
++ u8 *token;
++ int tokensize;
++ u8 exorbuf[32];
++ struct cipherkeytoken *t;
++
++ /* fill exorbuf with random data */
++ get_random_bytes(exorbuf, sizeof(exorbuf));
++
++ /* allocate space for the key token to build */
++ token = kmalloc(MAXCCAVLSCTOKENSIZE, GFP_KERNEL);
++ if (!token)
++ return -ENOMEM;
++
++ /* prepare the token with the key skeleton */
++ tokensize = SIZEOF_SKELETON;
++ memcpy(token, aes_cipher_key_skeleton, tokensize);
++
++ /* patch the skeleton key token export flags */
++ if (keygenflags) {
++ t = (struct cipherkeytoken *) token;
++ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
++ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
++ }
++
++ /*
++ * Do the key import with the clear key value in 4 steps:
++ * 1/4 FIRST import with only random data
++ * 2/4 EXOR the clear key
++ * 3/4 EXOR the very same random data again
++ * 4/4 COMPLETE the secure cipher key import
++ */
++ rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
++ exorbuf, keybitsize, token, &tokensize);
++ if (rc) {
++ DEBUG_ERR(
++ "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
++ __func__, rc);
++ goto out;
++ }
++ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
++ clrkey, keybitsize, token, &tokensize);
++ if (rc) {
++ DEBUG_ERR(
++ "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
++ __func__, rc);
++ goto out;
++ }
++ rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
++ exorbuf, keybitsize, token, &tokensize);
++ if (rc) {
++ DEBUG_ERR(
++ "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
++ __func__, rc);
++ goto out;
++ }
++ rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
++ NULL, keybitsize, token, &tokensize);
++ if (rc) {
++ DEBUG_ERR(
++ "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
++ __func__, rc);
++ goto out;
++ }
++
++ /* copy the generated key token */
++ if (keybuf) {
++ if (tokensize > *keybufsize)
++ rc = -EINVAL;
++ else
++ memcpy(keybuf, token, tokensize);
++ }
++ *keybufsize = tokensize;
++
++out:
++ kfree(token);
++ return rc;
++}
++EXPORT_SYMBOL(cca_clr2cipherkey);
++
++/*
++ * Derive proteced key from CCA AES cipher secure key.
++ */
++int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
++ u8 *protkey, u32 *protkeylen, u32 *protkeytype)
++{
++ int rc;
++ u8 *mem;
++ struct CPRBX *preqcblk, *prepcblk;
++ struct ica_xcRB xcrb;
++ struct aureqparm {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ u8 rule_array[8];
++ struct {
++ u16 len;
++ u16 tk_blob_len;
++ u16 tk_blob_tag;
++ u8 tk_blob[66];
++ } vud;
++ struct {
++ u16 len;
++ u16 cca_key_token_len;
++ u16 cca_key_token_flags;
++ u8 cca_key_token[0]; // 64 or more
++ } kb;
++ } __packed * preqparm;
++ struct aurepparm {
++ u8 subfunc_code[2];
++ u16 rule_array_len;
++ struct {
++ u16 len;
++ u16 sublen;
++ u16 tag;
++ struct cpacfkeyblock {
++ u8 version; /* version of this struct */
++ u8 flags[2];
++ u8 algo;
++ u8 form;
++ u8 pad1[3];
++ u16 keylen;
++ u8 key[64]; /* the key (keylen bytes) */
++ u16 keyattrlen;
++ u8 keyattr[32];
++ u8 pad2[1];
++ u8 vptype;
++ u8 vp[32]; /* verification pattern */
++ } ckb;
++ } vud;
++ struct {
++ u16 len;
++ } kb;
++ } __packed * prepparm;
++ int keytoklen = ((struct cipherkeytoken *)ckey)->len;
++
++ /* get already prepared memory for 2 cprbs with param block each */
++ rc = alloc_and_prep_cprbmem(PARMBSIZE, &mem, &preqcblk, &prepcblk);
++ if (rc)
++ return rc;
++
++ /* fill request cprb struct */
++ preqcblk->domain = domain;
++
++ /* fill request cprb param block with AU request */
++ preqparm = (struct aureqparm *) preqcblk->req_parmb;
++ memcpy(preqparm->subfunc_code, "AU", 2);
++ preqparm->rule_array_len =
++ sizeof(preqparm->rule_array_len)
++ + sizeof(preqparm->rule_array);
++ memcpy(preqparm->rule_array, "EXPT-SK ", 8);
++ /* vud, tk blob */
++ preqparm->vud.len = sizeof(preqparm->vud);
++ preqparm->vud.tk_blob_len = sizeof(preqparm->vud.tk_blob)
++ + 2 * sizeof(uint16_t);
++ preqparm->vud.tk_blob_tag = 0x00C2;
++ /* kb, cca token */
++ preqparm->kb.len = keytoklen + 3 * sizeof(uint16_t);
++ preqparm->kb.cca_key_token_len = keytoklen + 2 * sizeof(uint16_t);
++ memcpy(preqparm->kb.cca_key_token, ckey, keytoklen);
++ /* now fill length of param block into cprb */
++ preqcblk->req_parml = sizeof(struct aureqparm) + keytoklen;
++
++ /* fill xcrb struct */
++ prep_xcrb(&xcrb, cardnr, preqcblk, prepcblk);
++
++ /* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
++ rc = _zcrypt_send_cprb(&xcrb);
++ if (rc) {
++ DEBUG_ERR(
++ "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
++ __func__, (int) cardnr, (int) domain, rc);
++ goto out;
++ }
++
++ /* check response returncode and reasoncode */
++ if (prepcblk->ccp_rtcode != 0) {
++ DEBUG_ERR(
++ "%s unwrap secure key failure, card response %d/%d\n",
++ __func__,
++ (int) prepcblk->ccp_rtcode,
++ (int) prepcblk->ccp_rscode);
++ rc = -EIO;
++ goto out;
++ }
++ if (prepcblk->ccp_rscode != 0) {
++ DEBUG_WARN(
++ "%s unwrap secure key warning, card response %d/%d\n",
++ __func__,
++ (int) prepcblk->ccp_rtcode,
++ (int) prepcblk->ccp_rscode);
++ }
++
++ /* process response cprb param block */
++ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX);
++ prepparm = (struct aurepparm *) prepcblk->rpl_parmb;
++
++ /* check the returned keyblock */
++ if (prepparm->vud.ckb.version != 0x01) {
++ DEBUG_ERR(
++ "%s reply param keyblock version mismatch 0x%02x != 0x01\n",
++ __func__, (int) prepparm->vud.ckb.version);
++ rc = -EIO;
++ goto out;
++ }
++ if (prepparm->vud.ckb.algo != 0x02) {
++ DEBUG_ERR(
++ "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
++ __func__, (int) prepparm->vud.ckb.algo);
++ rc = -EIO;
++ goto out;
++ }
++
++ /* copy the translated protected key */
++ switch (prepparm->vud.ckb.keylen) {
++ case 16+32:
++ /* AES 128 protected key */
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_128;
++ break;
++ case 24+32:
++ /* AES 192 protected key */
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_192;
++ break;
++ case 32+32:
++ /* AES 256 protected key */
++ if (protkeytype)
++ *protkeytype = PKEY_KEYTYPE_AES_256;
++ break;
++ default:
++ DEBUG_ERR("%s unknown/unsupported keylen %d\n",
++ __func__, prepparm->vud.ckb.keylen);
++ rc = -EIO;
++ goto out;
++ }
++ memcpy(protkey, prepparm->vud.ckb.key, prepparm->vud.ckb.keylen);
++ if (protkeylen)
++ *protkeylen = prepparm->vud.ckb.keylen;
++
++out:
++ free_cprbmem(mem, PARMBSIZE, 0);
++ return rc;
++}
++EXPORT_SYMBOL(cca_cipher2protkey);
++
++/*
+ * query cryptographic facility from CCA adapter
+ */
+ int cca_query_crypto_facility(u16 cardnr, u16 domain,
+@@ -954,6 +1662,92 @@ int cca_findcard(const u8 *key, u16 *pca
+ }
+ EXPORT_SYMBOL(cca_findcard);
+
++int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
++ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify)
++{
++ struct zcrypt_device_status_ext *device_status;
++ int i, n, card, dom, curmatch, oldmatch, rc = 0;
++ struct cca_info ci;
++
++ *apqns = NULL;
++ *nr_apqns = 0;
++
++ /* fetch status of all crypto cards */
++ device_status = kmalloc_array(MAX_ZDEV_ENTRIES_EXT,
++ sizeof(struct zcrypt_device_status_ext),
++ GFP_KERNEL);
++ if (!device_status)
++ return -ENOMEM;
++ zcrypt_device_status_mask_ext(device_status);
++
++ /* loop two times: first gather eligible apqns, then store them */
++ while (1) {
++ n = 0;
++ /* walk through all the crypto cards */
++ for (i = 0; i < MAX_ZDEV_ENTRIES_EXT; i++) {
++ card = AP_QID_CARD(device_status[i].qid);
++ dom = AP_QID_QUEUE(device_status[i].qid);
++ /* check online state */
++ if (!device_status[i].online)
++ continue;
++ /* check for cca functions */
++ if (!(device_status[i].functions & 0x04))
++ continue;
++ /* check cardnr */
++ if (cardnr != 0xFFFF && card != cardnr)
++ continue;
++ /* check domain */
++ if (domain != 0xFFFF && dom != domain)
++ continue;
++ /* get cca info on this apqn */
++ if (cca_get_info(card, dom, &ci, verify))
++ continue;
++ /* current master key needs to be valid */
++ if (ci.cur_mk_state != '2')
++ continue;
++ /* check min hardware type */
++ if (minhwtype > 0 && minhwtype > ci.hwtype)
++ continue;
++ if (cur_mkvp || old_mkvp) {
++ /* check mkvps */
++ curmatch = oldmatch = 0;
++ if (cur_mkvp && cur_mkvp == ci.cur_mkvp)
++ curmatch = 1;
++ if (old_mkvp && ci.old_mk_state == '2' &&
++ old_mkvp == ci.old_mkvp)
++ oldmatch = 1;
++ if ((cur_mkvp || old_mkvp) &&
++ (curmatch + oldmatch < 1))
++ continue;
++ }
++ /* apqn passed all filtering criterons */
++ if (*apqns && n < *nr_apqns)
++ (*apqns)[n] = (((u16)card) << 16) | ((u16) dom);
++ n++;
++ }
++ /* loop 2nd time: array has been filled */
++ if (*apqns)
++ break;
++ /* loop 1st time: have # of eligible apqns in n */
++ if (!n) {
++ rc = -ENODEV; /* no eligible apqns found */
++ break;
++ }
++ *nr_apqns = n;
++ /* allocate array to store n apqns into */
++ *apqns = kmalloc_array(n, sizeof(u32), GFP_KERNEL);
++ if (!*apqns) {
++ rc = -ENOMEM;
++ break;
++ }
++ verify = 0;
++ }
++
++ kfree(device_status);
++ return rc;
++}
++EXPORT_SYMBOL(cca_findcard2);
++
+ void __exit zcrypt_ccamisc_exit(void)
+ {
+ mkvp_cache_free();
+--- a/drivers/s390/crypto/zcrypt_ccamisc.h
++++ b/drivers/s390/crypto/zcrypt_ccamisc.h
+@@ -22,11 +22,16 @@
+
+ /* For TOKTYPE_CCA_INTERNAL: */
+ #define TOKVER_CCA_AES 0x04 /* CCA AES key token */
++#define TOKVER_CCA_VLSC 0x05 /* var length sym cipher key token */
++
++/* Max size of a cca variable length cipher key token */
++#define MAXCCAVLSCTOKENSIZE 725
+
+ /* header part of a CCA key token */
+ struct keytoken_header {
+ u8 type; /* one of the TOKTYPE values */
+- u8 res0[3];
++ u8 res0[1];
++ u16 len; /* vlsc token: total length in bytes */
+ u8 version; /* one of the TOKVER values */
+ u8 res1[3];
+ } __packed;
+@@ -47,6 +52,56 @@ struct secaeskeytoken {
+ u8 tvv[4]; /* token validation value */
+ } __packed;
+
++/* inside view of a variable length symmetric cipher AES key token */
++struct cipherkeytoken {
++ u8 type; /* 0x01 for internal key token */
++ u8 res0[1];
++ u16 len; /* total key token length in bytes */
++ u8 version; /* should be 0x05 */
++ u8 res1[3];
++ u8 kms; /* key material state, 0x03 means wrapped with MK */
++ u8 kvpt; /* key verification pattern type, should be 0x01 */
++ u64 mkvp0; /* master key verification pattern, lo part */
++ u64 mkvp1; /* master key verification pattern, hi part (unused) */
++ u8 eskwm; /* encrypted section key wrapping method */
++ u8 hashalg; /* hash algorithmus used for wrapping key */
++ u8 plfver; /* pay load format version */
++ u8 res2[1];
++ u8 adsver; /* associated data section version */
++ u8 res3[1];
++ u16 adslen; /* associated data section length */
++ u8 kllen; /* optional key label length */
++ u8 ieaslen; /* optional extended associated data length */
++ u8 uadlen; /* optional user definable associated data length */
++ u8 res4[1];
++ u16 wpllen; /* wrapped payload length in bits: */
++ /* plfver 0x00 0x01 */
++ /* AES-128 512 640 */
++ /* AES-192 576 640 */
++ /* AES-256 640 640 */
++ u8 res5[1];
++ u8 algtype; /* 0x02 for AES cipher */
++ u16 keytype; /* 0x0001 for 'cipher' */
++ u8 kufc; /* key usage field count */
++ u16 kuf1; /* key usage field 1 */
++ u16 kuf2; /* key usage field 2 */
++ u8 kmfc; /* key management field count */
++ u16 kmf1; /* key management field 1 */
++ u16 kmf2; /* key management field 2 */
++ u16 kmf3; /* key management field 3 */
++ u8 vdata[0]; /* variable part data follows */
++} __packed;
++
++/* Some defines for the CCA AES cipherkeytoken kmf1 field */
++#define KMF1_XPRT_SYM 0x8000
++#define KMF1_XPRT_UASY 0x4000
++#define KMF1_XPRT_AASY 0x2000
++#define KMF1_XPRT_RAW 0x1000
++#define KMF1_XPRT_CPAC 0x0800
++#define KMF1_XPRT_DES 0x0080
++#define KMF1_XPRT_AES 0x0040
++#define KMF1_XPRT_RSA 0x0008
++
+ /*
+ * Simple check if the token is a valid CCA secure AES data key
+ * token. If keybitsize is given, the bitsize of the key is
+@@ -56,6 +111,17 @@ int cca_check_secaeskeytoken(debug_info_
+ const u8 *token, int keybitsize);
+
+ /*
++ * Simple check if the token is a valid CCA secure AES cipher key
++ * token. If keybitsize is given, the bitsize of the key is
++ * also checked. If checkcpacfexport is enabled, the key is also
++ * checked for the export flag to allow CPACF export.
++ * Returns 0 on success or errno value on failure.
++ */
++int cca_check_secaescipherkey(debug_info_t *dbg, int dbflvl,
++ const u8 *token, int keybitsize,
++ int checkcpacfexport);
++
++/*
+ * Generate (random) CCA AES DATA secure key.
+ */
+ int cca_genseckey(u16 cardnr, u16 domain, u32 keytype, u8 *seckey);
+@@ -75,6 +141,24 @@ int cca_sec2protkey(u16 cardnr, u16 doma
+ u32 *protkeytype);
+
+ /*
++ * Generate (random) CCA AES CIPHER secure key.
++ */
++int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
++ u8 *keybuf, size_t *keybufsize);
++
++/*
++ * Derive proteced key from CCA AES cipher secure key.
++ */
++int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
++ u8 *protkey, u32 *protkeylen, u32 *protkeytype);
++
++/*
++ * Build CCA AES CIPHER secure key with a given clear key value.
++ */
++int cca_clr2cipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
++ const u8 *clrkey, u8 *keybuf, size_t *keybufsize);
++
++/*
+ * Query cryptographic facility from CCA adapter
+ */
+ int cca_query_crypto_facility(u16 cardnr, u16 domain,
+@@ -90,6 +174,27 @@ int cca_query_crypto_facility(u16 cardnr
+ */
+ int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+
++/*
++ * Build a list of cca apqns meeting the following constrains:
++ * - apqn is online and is in fact a CCA apqn
++ * - if cardnr is not FFFF only apqns with this cardnr
++ * - if domain is not FFFF only apqns with this domainnr
++ * - if minhwtype > 0 only apqns with hwtype >= minhwtype
++ * - if cur_mkvp != 0 only apqns where cur_mkvp == mkvp
++ * - if old_mkvp != 0 only apqns where old_mkvp == mkvp
++ * - if verify is enabled and a cur_mkvp and/or old_mkvp
++ * value is given, then refetch the cca_info and make sure the current
++ * cur_mkvp or old_mkvp values of the apqn are used.
++ * The array of apqn entries is allocated with kmalloc and returned in *apqns;
++ * the number of apqns stored into the list is returned in *nr_apqns. One apqn
++ * entry is simple a 32 bit value with 16 bit cardnr and 16 bit domain nr and
++ * may be casted to struct pkey_apqn. The return value is either 0 for success
++ * or a negative errno value. If no apqn meeting the criterias is found,
++ * -ENODEV is returned.
++ */
++int cca_findcard2(u32 **apqns, u32 *nr_apqns, u16 cardnr, u16 domain,
++ int minhwtype, u64 cur_mkvp, u64 old_mkvp, int verify);
++
+ /* struct to hold info for each CCA queue */
+ struct cca_info {
+ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
diff --git a/patches.suse/s390-zcrypt-extend-cca_findcard-function-and-helper b/patches.suse/s390-zcrypt-extend-cca_findcard-function-and-helper
new file mode 100644
index 0000000000..82491fc759
--- /dev/null
+++ b/patches.suse/s390-zcrypt-extend-cca_findcard-function-and-helper
@@ -0,0 +1,240 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Tue, 18 Jun 2019 15:53:12 +0200
+Subject: s390/zcrypt: extend cca_findcard function and helper
+Git-commit: 4da57a2fea064f662c29e77da043baebb8d6cdc8
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+Rework and extension of the cca_findcard function to be prepared for
+other types of secure key blobs. Split the function and extract an
+internal function which has no awareness of key blobs any
+more. Improve this function and the helper code around to be able to
+check for a minimal crypto card hardware level (Background: the newer
+AES cipher keys need to match to the master key verification pattern
+and need to have a crypto card CEX6 or higher).
+
+No API change, neither for the in-kernel API nor the ioctl interface.
+
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/zcrypt_api.c | 28 ++++++++++++++
+ drivers/s390/crypto/zcrypt_api.h | 7 +--
+ drivers/s390/crypto/zcrypt_ccamisc.c | 69 +++++++++++++++++++++++++----------
+ drivers/s390/crypto/zcrypt_ccamisc.h | 3 +
+ 4 files changed, 83 insertions(+), 24 deletions(-)
+
+--- a/drivers/s390/crypto/zcrypt_api.c
++++ b/drivers/s390/crypto/zcrypt_api.c
+@@ -1161,6 +1161,34 @@ void zcrypt_device_status_mask_ext(struc
+ }
+ EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
+
++int zcrypt_device_status_ext(int card, int queue,
++ struct zcrypt_device_status_ext *devstat)
++{
++ struct zcrypt_card *zc;
++ struct zcrypt_queue *zq;
++
++ memset(devstat, 0, sizeof(*devstat));
++
++ spin_lock(&zcrypt_list_lock);
++ for_each_zcrypt_card(zc) {
++ for_each_zcrypt_queue(zq, zc) {
++ if (card == AP_QID_CARD(zq->queue->qid) &&
++ queue == AP_QID_QUEUE(zq->queue->qid)) {
++ devstat->hwtype = zc->card->ap_dev.device_type;
++ devstat->functions = zc->card->functions >> 26;
++ devstat->qid = zq->queue->qid;
++ devstat->online = zq->online ? 0x01 : 0x00;
++ spin_unlock(&zcrypt_list_lock);
++ return 0;
++ }
++ }
++ }
++ spin_unlock(&zcrypt_list_lock);
++
++ return -ENODEV;
++}
++EXPORT_SYMBOL(zcrypt_device_status_ext);
++
+ static void zcrypt_status_mask(char status[], size_t max_adapters)
+ {
+ struct zcrypt_card *zc;
+--- a/drivers/s390/crypto/zcrypt_api.h
++++ b/drivers/s390/crypto/zcrypt_api.h
+@@ -121,9 +121,6 @@ void zcrypt_card_get(struct zcrypt_card
+ int zcrypt_card_put(struct zcrypt_card *);
+ int zcrypt_card_register(struct zcrypt_card *);
+ void zcrypt_card_unregister(struct zcrypt_card *);
+-struct zcrypt_card *zcrypt_card_get_best(unsigned int *,
+- unsigned int, unsigned int);
+-void zcrypt_card_put_best(struct zcrypt_card *, unsigned int);
+
+ struct zcrypt_queue *zcrypt_queue_alloc(size_t);
+ void zcrypt_queue_free(struct zcrypt_queue *);
+@@ -132,8 +129,6 @@ int zcrypt_queue_put(struct zcrypt_queue
+ int zcrypt_queue_register(struct zcrypt_queue *);
+ void zcrypt_queue_unregister(struct zcrypt_queue *);
+ void zcrypt_queue_force_online(struct zcrypt_queue *, int);
+-struct zcrypt_queue *zcrypt_queue_get_best(unsigned int, unsigned int);
+-void zcrypt_queue_put_best(struct zcrypt_queue *, unsigned int);
+
+ int zcrypt_rng_device_add(void);
+ void zcrypt_rng_device_remove(void);
+@@ -145,5 +140,7 @@ int zcrypt_api_init(void);
+ void zcrypt_api_exit(void);
+ long zcrypt_send_cprb(struct ica_xcRB *xcRB);
+ void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus);
++int zcrypt_device_status_ext(int card, int queue,
++ struct zcrypt_device_status_ext *devstatus);
+
+ #endif /* _ZCRYPT_API_H_ */
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -779,7 +779,17 @@ static int fetch_cca_info(u16 cardnr, u1
+ int rc, found = 0;
+ size_t rlen, vlen;
+ u8 *rarray, *varray, *pg;
++ struct zcrypt_device_status_ext devstat;
+
++ memset(ci, 0, sizeof(*ci));
++
++ /* get first info from zcrypt device driver about this apqn */
++ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
++ if (rc)
++ return rc;
++ ci->hwtype = devstat.hwtype;
++
++ /* prep page for rule array and var array use */
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg)
+ return -ENOMEM;
+@@ -787,10 +797,10 @@ static int fetch_cca_info(u16 cardnr, u1
+ varray = pg + PAGE_SIZE/2;
+ rlen = vlen = PAGE_SIZE/2;
+
++ /* QF for this card/domain */
+ rc = cca_query_crypto_facility(cardnr, domain, "STATICSA",
+ rarray, &rlen, varray, &vlen);
+ if (rc == 0 && rlen >= 10*8 && vlen >= 204) {
+- memset(ci, 0, sizeof(*ci));
+ memcpy(ci->serial, rarray, 8);
+ ci->new_mk_state = (char) rarray[7*8];
+ ci->cur_mk_state = (char) rarray[8*8];
+@@ -828,23 +838,19 @@ int cca_get_info(u16 card, u16 dom, stru
+ EXPORT_SYMBOL(cca_get_info);
+
+ /*
+- * Search for a matching crypto card based on the Master Key
+- * Verification Pattern provided inside a secure key.
+- * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+- * 1 if OLD MKVP matches.
++ * Search for a matching crypto card based on the
++ * Master Key Verification Pattern given.
+ */
+-int cca_findcard(const u8 *seckey, u16 *pcardnr, u16 *pdomain, int verify)
++static int findcard(u64 mkvp, u16 *pcardnr, u16 *pdomain,
++ int verify, int minhwtype)
+ {
+- const struct secaeskeytoken *t = (const struct secaeskeytoken *) seckey;
+ struct zcrypt_device_status_ext *device_status;
+ u16 card, dom;
+ struct cca_info ci;
+ int i, rc, oi = -1;
+
+- /* some simple checks of the given secure key token */
+- if (t->type != TOKTYPE_CCA_INTERNAL ||
+- t->version != TOKVER_CCA_AES ||
+- t->mkvp == 0)
++ /* mkvp must not be zero, minhwtype needs to be >= 0 */
++ if (mkvp == 0 || minhwtype < 0)
+ return -EINVAL;
+
+ /* fetch status of all crypto cards */
+@@ -863,15 +869,17 @@ int cca_findcard(const u8 *seckey, u16 *
+ device_status[i].functions & 0x04) {
+ /* enabled CCA card, check current mkvp from cache */
+ if (cca_info_cache_fetch(card, dom, &ci) == 0 &&
++ ci.hwtype >= minhwtype &&
+ ci.cur_mk_state == '2' &&
+- ci.cur_mkvp == t->mkvp) {
++ ci.cur_mkvp == mkvp) {
+ if (!verify)
+ break;
+ /* verify: refresh card info */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+- if (ci.cur_mk_state == '2' &&
+- ci.cur_mkvp == t->mkvp)
++ if (ci.hwtype >= minhwtype &&
++ ci.cur_mk_state == '2' &&
++ ci.cur_mkvp == mkvp)
+ break;
+ }
+ }
+@@ -892,11 +900,13 @@ int cca_findcard(const u8 *seckey, u16 *
+ /* fresh fetch mkvp from adapter */
+ if (fetch_cca_info(card, dom, &ci) == 0) {
+ cca_info_cache_update(card, dom, &ci);
+- if (ci.cur_mk_state == '2' &&
+- ci.cur_mkvp == t->mkvp)
++ if (ci.hwtype >= minhwtype &&
++ ci.cur_mk_state == '2' &&
++ ci.cur_mkvp == mkvp)
+ break;
+- if (ci.old_mk_state == '2' &&
+- ci.old_mkvp == t->mkvp &&
++ if (ci.hwtype >= minhwtype &&
++ ci.old_mk_state == '2' &&
++ ci.old_mkvp == mkvp &&
+ oi < 0)
+ oi = i;
+ }
+@@ -919,6 +929,29 @@ int cca_findcard(const u8 *seckey, u16 *
+ kfree(device_status);
+ return rc;
+ }
++
++/*
++ * Search for a matching crypto card based on the Master Key
++ * Verification Pattern provided inside a secure key token.
++ */
++int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify)
++{
++ u64 mkvp;
++ const struct keytoken_header *hdr = (struct keytoken_header *) key;
++
++ if (hdr->type != TOKTYPE_CCA_INTERNAL)
++ return -EINVAL;
++
++ switch (hdr->version) {
++ case TOKVER_CCA_AES:
++ mkvp = ((struct secaeskeytoken *)key)->mkvp;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return findcard(mkvp, pcardnr, pdomain, verify, 0);
++}
+ EXPORT_SYMBOL(cca_findcard);
+
+ void __exit zcrypt_ccamisc_exit(void)
+--- a/drivers/s390/crypto/zcrypt_ccamisc.h
++++ b/drivers/s390/crypto/zcrypt_ccamisc.h
+@@ -88,10 +88,11 @@ int cca_query_crypto_facility(u16 cardnr
+ * Returns < 0 on failure, 0 if CURRENT MKVP matches and
+ * 1 if OLD MKVP matches.
+ */
+-int cca_findcard(const u8 *seckey, u16 *pcardnr, u16 *pdomain, int verify);
++int cca_findcard(const u8 *key, u16 *pcardnr, u16 *pdomain, int verify);
+
+ /* struct to hold info for each CCA queue */
+ struct cca_info {
++ int hwtype; /* one of the defined AP_DEVICE_TYPE_* */
+ char new_mk_state; /* '1' empty, '2' partially full, '3' full */
+ char cur_mk_state; /* '1' invalid, '2' valid */
+ char old_mk_state; /* '1' invalid, '2' valid */
diff --git a/patches.suse/s390-zcrypt-fix-wrong-handling-of-cca-cipher-keygenflags b/patches.suse/s390-zcrypt-fix-wrong-handling-of-cca-cipher-keygenflags
new file mode 100644
index 0000000000..b2171beec6
--- /dev/null
+++ b/patches.suse/s390-zcrypt-fix-wrong-handling-of-cca-cipher-keygenflags
@@ -0,0 +1,37 @@
+From: Harald Freudenberger <freude@linux.ibm.com>
+Date: Thu, 29 Aug 2019 15:16:35 +0200
+Subject: s390/zcrypt: fix wrong handling of cca cipher keygenflags
+Git-commit: deffa48fb014f06f7cf8c4b3ea3c96160be3c854
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-7533 LTC#178844
+
+Tests showed that the keygenflags parameter is not handled
+correctly within the zcrypt ccamisc generate cca cipher key
+code. A similar code is used with cca cipher key value import
+and there the flags are handled correctly. For unknown reason
+these lines have not been updated for the generate function
+and so this patch now introduces these two lines of code.
+
+This affects only pkey with the use of CCA cipher keys and
+the use of additional key generate flags.
+
+Fixes: 4bc123b18ce6 ("s390/zcrypt: Add low level functions for CCA AES cipher keys")
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/zcrypt_ccamisc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/crypto/zcrypt_ccamisc.c
++++ b/drivers/s390/crypto/zcrypt_ccamisc.c
+@@ -838,7 +838,8 @@ int cca_gencipherkey(u16 cardnr, u16 dom
+ /* patch the skeleton key token export flags inside the kb block */
+ if (keygenflags) {
+ t = (struct cipherkeytoken *) preqparm->kb.tlv3.gen_key_id_1;
+- t->kmf1 |= (u16) (keygenflags & 0x0000FFFF);
++ t->kmf1 |= (u16) (keygenflags & 0x0000FF00);
++ t->kmf1 &= (u16) ~(keygenflags & 0x000000FF);
+ }
+
+ /* prepare xcrb struct */
diff --git a/series.conf b/series.conf
index b16f1b8020..9c3cf34688 100644
--- a/series.conf
+++ b/series.conf
@@ -246,8 +246,15 @@
patches.suse/s390-zcrypt-new-sysfs-attributes-serialnr-and-mkvps
patches.suse/s390-kasan-provide-uninstrumented-__strlen.patch
patches.suse/s390-process-avoid-potential-reading-of-freed-stack.patch
+ patches.suse/s390-pkey-pkey-cleanup-narrow-in-kernel-api-fix-some-variable-types
+ patches.suse/s390-zcrypt-extend-cca_findcard-function-and-helper
+ patches.suse/s390-zcrypt-add-low-level-functions-for-cca-aes-cipher-keys
+ patches.suse/s390-pkey-add-cca-aes-cipher-key-support
+ patches.suse/s390-paes-prepare-paes-functions-for-large-key-blobs
+ patches.suse/s390-zcrypt-fix-wrong-handling-of-cca-cipher-keygenflags
patches.suse/s390-sclp-Fix-bit-checked-for-has_sipl.patch
patches.suse/s390-crypto-xts-aes-s390-fix-extra-run-time-crypto-s.patch
+ patches.suse/s390-crypto-support-for-sha3-via-cpacf-msa6
patches.suse/s390-pci-fix-MSI-message-data.patch
patches.suse/docs-cgroup-v1-blkio-controller.rst-remove-a-CFQ-lef.patch
patches.suse/docs-ipmb-place-it-at-driver-api-and-convert-to-ReST.patch
@@ -1079,6 +1086,13 @@
patches.suse/net-hns3-Fix-Wunused-const-variable-warning.patch
patches.suse/net-core-skmsg-Delete-an-unnecessary-check-before-th.patch
patches.suse/bnxt_en-Fix-allocation-of-zero-statistics-block-size.patch
+ patches.suse/s390-qdio-enable-drivers-to-poll-for-output-completions
+ patches.suse/s390-qdio-let-drivers-opt-out-from-output-queue-scanning
+ patches.suse/s390-qeth-collect-accurate-tx-statistics
+ patches.suse/s390-qeth-add-tx-napi-support-for-iqd-devices
+ patches.suse/s390-qeth-when-in-tx-napi-mode-use-napi_consume_skb
+ patches.suse/s390-qeth-add-bql-support-for-iqd-devices
+ patches.suse/s390-qeth-add-xmit_more-support-for-iqd-devices
patches.suse/ice-Allow-egress-control-packets-from-PF_VSI.patch
patches.suse/ice-Account-for-all-states-of-FW-DCBx-and-LLDP.patch
patches.suse/ice-Don-t-call-synchronize_irq-for-VF-s-from-the-hos.patch
@@ -2334,6 +2348,7 @@
patches.suse/lib-lzo-lzo1x_compress.c-fix-alignment-bug-in-lzo-rl.patch
patches.suse/IB-hfi1-remove-unlikely-from-IS_ERR-condition.patch
patches.suse/xen-pci-reserve-MCFG-areas-earlier.patch
+ patches.suse/s390-pkey-add-sysfs-attributes-to-emit-aes-cipher-key-blobs
patches.suse/s390-zcrypt-cex7s-exploitation-support
patches.suse/s390-topology-avoid-firing-events-before-kobjs-are-c.patch
patches.suse/s390-cio-avoid-calling-strlen-on-null-pointer.patch
@@ -3826,6 +3841,8 @@
patches.suse/0004-mm-refresh-ZONE_DMA-and-ZONE_DMA32-comments-in-enum-.patch
patches.suse/arm64-mm-fix-unused-variable-warning-in-zone_sizes_init.patch
patches.suse/arm64-mm-reserve-cma-and-crashkernel-in-zone_dma32.patch
+ patches.suse/s390-pkey-fix-memory-leak-within-copy_apqns_from_user
+ patches.suse/s390-crypto-fix-unsigned-variable-compared-with-zero
patches.suse/0001-crypto-algif_skcipher-Use-chunksize-instead-of-block.patch
patches.suse/0001-crypto-af_alg-cast-ki_complete-ternary-op-to-int.patch
patches.suse/crypto-user-fix-memory-leak-in-crypto_report.patch
@@ -3835,6 +3852,8 @@
patches.suse/net-bcmgenet-Generate-a-random-MAC-if-none-is-valid.patch
patches.suse/net-bcmgenet-Add-a-shutdown-callback.patch
patches.suse/Bluetooth-hci_bcm-Fix-RTS-handling-during-startup.patch
+ patches.suse/s390-qdio-implement-iqd-multi-write
+ patches.suse/s390-qeth-use-iqd-multi-write
patches.suse/rsi-release-skb-if-rsi_prepare_beacon-fails.patch
patches.suse/rtlwifi-prevent-memory-leak-in-rtl_usb_probe.patch
patches.suse/libertas-fix-a-potential-NULL-pointer-dereference.patch
diff --git a/supported.conf b/supported.conf
index 4b057f9a85..85201ebdf8 100644
--- a/supported.conf
+++ b/supported.conf
@@ -78,6 +78,8 @@
arch/s390/crypto/prng
arch/s390/crypto/sha1_s390
arch/s390/crypto/sha256_s390
+ arch/s390/crypto/sha3_256_s390
+ arch/s390/crypto/sha3_512_s390
arch/s390/crypto/sha512_s390
arch/s390/crypto/sha_common
arch/s390/kvm/kvm