Home Home > GIT Browse > SLE15-SP1-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKernel Build Daemon <kbuild@suse.de>2019-07-20 07:05:31 +0200
committerKernel Build Daemon <kbuild@suse.de>2019-07-20 07:05:31 +0200
commitf0d27f10fd67ffaa262e91047133c0b430b960ad (patch)
treec2d06c0060ffd0f3eb964b99fdf28af95ef2176d
parenta84f98f35251cd555e13eb7130bff053f48b4c0a (diff)
parent8e52c21df5c4e5ce6da5091d3d3b63313a3c926d (diff)
Merge branch 'SLE15-SP1' into SLE15-SP1-AZURESLE15-SP1-AZURE
-rw-r--r--config/ppc64le/default1
-rw-r--r--config/s390x/default2
-rw-r--r--config/s390x/zfcpdump3
-rw-r--r--config/x86_64/default1
-rw-r--r--patches.arch/s390-dma-provide-proper-ARCH_ZONE_DMA_BITS30
-rw-r--r--patches.arch/s390-uv-introduce-guest-side-ultravisor-code2
-rw-r--r--patches.drivers/ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch100
-rw-r--r--patches.drivers/ALSA-hda-hdmi-Fix-i915-reverse-port-pin-mapping.patch72
-rw-r--r--patches.drivers/ALSA-hda-hdmi-Remove-duplicated-define.patch34
-rw-r--r--patches.drivers/ALSA-hda-realtek-Fixed-Headphone-Mic-can-t-record-on.patch40
-rw-r--r--patches.drivers/ALSA-hda-realtek-apply-ALC891-headset-fixup-to-one-D.patch37
-rw-r--r--patches.drivers/ALSA-seq-Break-too-long-mutex-context-in-the-write-l.patch76
-rw-r--r--patches.drivers/clk-qcom-Fix-Wunused-const-variable.patch98
-rw-r--r--patches.drivers/clk-rockchip-Don-t-yell-about-bad-mmc-phases-when-ge.patch52
-rw-r--r--patches.drivers/clk-tegra210-fix-PLLU-and-PLLU_OUT1.patch72
-rw-r--r--patches.drivers/dmaengine-hsu-Revert-set-HSU_CH_MTSR-to-memory-width.patch51
-rw-r--r--patches.drivers/mei-bus-need-to-unlink-client-before-freeing.patch79
-rw-r--r--patches.drivers/mei-me-add-denverton-innovation-engine-device-IDs.patch45
-rw-r--r--patches.drivers/mei-me-add-gemini-lake-devices-id.patch42
-rw-r--r--patches.drm/drm-vc4-fix-fb-references-in-async-update.patch41
-rw-r--r--patches.fixes/crypto-talitos-fix-max-key-size-for-sha384-and-sha51.patch42
-rw-r--r--patches.fixes/x86-asm-memcpy_mcsafe-Define-copy_to_iter_mcsafe.patch2
-rw-r--r--patches.fixes/x86-asm-memcpy_mcsafe-Fix-copy_to_user_mcsafe-except.patch2
-rw-r--r--patches.kabi/hda-relaxed_resume-flag-kabi-fix.patch28
-rw-r--r--patches.kabi/iommu-helper-mark-iommu_is_span_boundary-as-inline25
-rw-r--r--patches.kabi/s390-airq-use-dma-memory-for-adapter-interrupts50
-rw-r--r--patches.kabi/s390-cio-add-basic-protected-virtualization-support61
-rw-r--r--patches.kabi/s390-mm-force-swiotlb-for-protected-virtualization27
-rw-r--r--patches.suse/arm64-don-t-override-dma_max_pfn34
-rw-r--r--patches.suse/dma-direct-add-support-for-allocation-from-zone_dma-and-zone_dma3250
-rw-r--r--patches.suse/dma-direct-don-t-retry-allocation-for-no-op-gfp_dma34
-rw-r--r--patches.suse/dma-direct-retry-allocations-using-gfp_dma-for-small-masks56
-rw-r--r--patches.suse/dma-mapping-move-dma_mark_clean-to-dma-direct-h139
-rw-r--r--patches.suse/dma-mapping-move-swiotlb-arch-helpers-to-a-new-header697
-rw-r--r--patches.suse/dma-mapping-take-dma_pfn_offset-into-account-in-dma_max_pfn28
-rw-r--r--patches.suse/iommu-helper-mark-iommu_is_span_boundary-as-inline221
-rw-r--r--patches.suse/mips-fix-an-off-by-one-in-dma_capable27
-rw-r--r--patches.suse/net-af_iucv-build-proper-skbs-for-hipertransport89
-rw-r--r--patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport59
-rw-r--r--patches.suse/pkey-indicate-old-mkvp-only-if-old-and-current-mkvp-are-different32
-rw-r--r--patches.suse/s390-add-alignment-hints-to-vector-load-and-store48
-rw-r--r--patches.suse/s390-airq-use-dma-memory-for-adapter-interrupts168
-rw-r--r--patches.suse/s390-cio-add-basic-protected-virtualization-support721
-rw-r--r--patches.suse/s390-cio-introduce-dma-pools-to-cio242
-rw-r--r--patches.suse/s390-cpu_mf-add-store-cpu-counter-multiple-instruction-support80
-rw-r--r--patches.suse/s390-cpu_mf-move-struct-cpu_cf_events-and-per-cpu-variable-to-header-file58
-rw-r--r--patches.suse/s390-cpu_mf-replace-stcctm5-with-the-stcctm-function80
-rw-r--r--patches.suse/s390-cpum_cf-add-ctr_stcctm-function47
-rw-r--r--patches.suse/s390-cpum_cf-add-minimal-in-kernel-interface-for-counter-measurements87
-rw-r--r--patches.suse/s390-cpum_cf-add-support-for-cpu-mf-svn-6164
-rw-r--r--patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_alert-to-obtain-measurement-alerts80
-rw-r--r--patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_avail-function71
-rw-r--r--patches.suse/s390-cpum_cf-move-counter-set-controls-to-a-new-header-file164
-rw-r--r--patches.suse/s390-cpum_cf-prepare-for-in-kernel-counter-measurements101
-rw-r--r--patches.suse/s390-cpum_cf-rename-per-cpu-counter-facility-structure-and-variables186
-rw-r--r--patches.suse/s390-cpum_cf_diag-add-support-for-cpu-mf-svn-648
-rw-r--r--patches.suse/s390-cpum_cf_diag-add-support-for-s390-counter-facility-diagnostic-trace798
-rw-r--r--patches.suse/s390-cpumf-add-extended-counter-set-definitions-for-model-8561-and-856230
-rw-r--r--patches.suse/s390-cpumf-fix-warning-from-check_processor_id91
-rw-r--r--patches.suse/s390-mm-force-swiotlb-for-protected-virtualization240
-rw-r--r--patches.suse/s390-remove-the-unused-dma_capable-helper27
-rw-r--r--patches.suse/s390-report-new-cpu-capabilities66
-rw-r--r--patches.suse/virtio-s390-add-indirection-to-indicators-access125
-rw-r--r--patches.suse/virtio-s390-dma-support-for-virtio-ccw99
-rw-r--r--patches.suse/virtio-s390-make-airq-summary-indicators-dma116
-rw-r--r--patches.suse/virtio-s390-use-cacheline-aligned-airq-bit-vectors33
-rw-r--r--patches.suse/virtio-s390-use-dma-memory-for-ccw-i-o-and-classic-notifiers510
-rw-r--r--patches.suse/virtio-s390-use-vring_create_virtqueue114
-rw-r--r--series.conf64
69 files changed, 7235 insertions, 4 deletions
diff --git a/config/ppc64le/default b/config/ppc64le/default
index 66d0a875ae..45405a485a 100644
--- a/config/ppc64le/default
+++ b/config/ppc64le/default
@@ -333,6 +333,7 @@ CONFIG_COMPAT_OLD_SIGACTION=y
# CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT is not set
# CONFIG_ARCH_HAS_STRICT_KERNEL_RWX is not set
# CONFIG_ARCH_HAS_STRICT_MODULE_RWX is not set
+CONFIG_ARCH_HAS_PHYS_TO_DMA=y
# CONFIG_REFCOUNT_FULL is not set
#
diff --git a/config/s390x/default b/config/s390x/default
index 498b72fc3c..7089543efb 100644
--- a/config/s390x/default
+++ b/config/s390x/default
@@ -2,6 +2,7 @@
# Automatically generated file; DO NOT EDIT.
# Linux/s390 4.12.14 Kernel Configuration
#
+CONFIG_ARCH_HAS_MEM_ENCRYPT=y
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
CONFIG_CPU_BIG_ENDIAN=y
@@ -21,6 +22,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_NO_IOPORT_MAP=y
# CONFIG_PCI_QUIRKS is not set
CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_SWIOTLB=y
CONFIG_S390=y
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_PGTABLE_LEVELS=4
diff --git a/config/s390x/zfcpdump b/config/s390x/zfcpdump
index 7d8e6b3185..374d1d6616 100644
--- a/config/s390x/zfcpdump
+++ b/config/s390x/zfcpdump
@@ -2,6 +2,7 @@
# Automatically generated file; DO NOT EDIT.
# Linux/s390 4.12.14 Kernel Configuration
#
+CONFIG_ARCH_HAS_MEM_ENCRYPT=y
CONFIG_MMU=y
CONFIG_ZONE_DMA=y
CONFIG_CPU_BIG_ENDIAN=y
@@ -21,6 +22,7 @@ CONFIG_AUDIT_ARCH=y
CONFIG_NO_IOPORT_MAP=y
# CONFIG_PCI_QUIRKS is not set
CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_SWIOTLB=y
CONFIG_S390=y
CONFIG_SCHED_OMIT_FRAME_POINTER=y
CONFIG_PGTABLE_LEVELS=4
@@ -1225,6 +1227,7 @@ CONFIG_DECOMPRESS_LZMA=y
CONFIG_DECOMPRESS_XZ=y
CONFIG_DECOMPRESS_LZO=y
CONFIG_DECOMPRESS_LZ4=y
+CONFIG_GENERIC_ALLOCATOR=y
CONFIG_HAS_DMA=y
# CONFIG_SGL_ALLOC is not set
CONFIG_DMA_NOOP_OPS=y
diff --git a/config/x86_64/default b/config/x86_64/default
index f505ac56d3..e719942934 100644
--- a/config/x86_64/default
+++ b/config/x86_64/default
@@ -344,6 +344,7 @@ CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
CONFIG_STRICT_MODULE_RWX=y
+CONFIG_ARCH_HAS_PHYS_TO_DMA=y
# CONFIG_REFCOUNT_FULL is not set
#
diff --git a/patches.arch/s390-dma-provide-proper-ARCH_ZONE_DMA_BITS b/patches.arch/s390-dma-provide-proper-ARCH_ZONE_DMA_BITS
new file mode 100644
index 0000000000..d10b01b525
--- /dev/null
+++ b/patches.arch/s390-dma-provide-proper-ARCH_ZONE_DMA_BITS
@@ -0,0 +1,30 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Thu, 18 Jul 2019 19:21:20 +0200
+Subject: s390/dma: provide proper ARCH_ZONE_DMA_BITS value
+Patch-mainline: not yet, sent 2019-07-18
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+On s390 ZONE_DMA is up to 2G, i.e. ARCH_ZONE_DMA_BITS should be 31 bits.
+The current value is 24 and makes __dma_direct_alloc_pages() take a
+wrong turn first (but __dma_direct_alloc_pages() recovers then).
+
+Let's correct ARCH_ZONE_DMA_BITS value and avoid wrong turns.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reported-by: Petr Tesarik <ptesarik@suse.cz>
+Fixes: c61e9637340e ("dma-direct: add support for allocation from ZONE_DMA and ZONE_DMA32")
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/dma.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/include/asm/dma.h
++++ b/arch/s390/include/asm/dma.h
+@@ -9,6 +9,7 @@
+ * by the 31 bit heritage.
+ */
+ #define MAX_DMA_ADDRESS 0x80000000
++#define ARCH_ZONE_DMA_BITS 31
+
+ #ifdef CONFIG_PCI
+ extern int isa_dma_bridge_buggy;
diff --git a/patches.arch/s390-uv-introduce-guest-side-ultravisor-code b/patches.arch/s390-uv-introduce-guest-side-ultravisor-code
index 6d5999a087..69eea72986 100644
--- a/patches.arch/s390-uv-introduce-guest-side-ultravisor-code
+++ b/patches.arch/s390-uv-introduce-guest-side-ultravisor-code
@@ -197,7 +197,7 @@ Acked-by: Petr Tesarik <ptesarik@suse.com>
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_PERF_EVENTS) += perf_event.
- obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
+ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o
+obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
diff --git a/patches.drivers/ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch b/patches.drivers/ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch
new file mode 100644
index 0000000000..5e4031af3c
--- /dev/null
+++ b/patches.drivers/ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch
@@ -0,0 +1,100 @@
+From 4914da2fb0c89205790503f20dfdde854f3afdd8 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 16 Jul 2019 08:56:51 +0200
+Subject: [PATCH] ALSA: hda - Don't resume forcibly i915 HDMI/DP codec
+Git-commit: 4914da2fb0c89205790503f20dfdde854f3afdd8
+Patch-mainline: v5.3-rc1
+References: bsc#1111666
+
+We apply the codec resume forcibly at system resume callback for
+updating and syncing the jack detection state that may have changed
+during sleeping. This is, however, superfluous for the codec like
+Intel HDMI/DP, where the jack detection is managed via the audio
+component notification; i.e. the jack state change shall be reported
+sooner or later from the graphics side at mode change.
+
+This patch changes the codec resume callback to avoid the forcible
+resume conditionally with a new flag, codec->relaxed_resume, for
+reducing the resume time. The flag is set in the codec probe.
+
+Although this doesn't fix the entire bug mentioned in the bugzilla
+entry below, it's still a good optimization and some improvements are
+seen.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=201901
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ include/sound/hda_codec.h | 2 ++
+ sound/pci/hda/hda_codec.c | 8 ++++++--
+ sound/pci/hda/patch_hdmi.c | 6 +++++-
+ 3 files changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
+index 8f46ff3449d5..871993696c5f 100644
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -252,6 +252,8 @@ struct hda_codec {
+ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
+ unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
++ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
++
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
+ unsigned long power_off_acct;
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 5346631df1ec..e30e86ca6b72 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2941,15 +2941,19 @@ static int hda_codec_runtime_resume(struct device *dev)
+ #ifdef CONFIG_PM_SLEEP
+ static int hda_codec_force_resume(struct device *dev)
+ {
++ struct hda_codec *codec = dev_to_hda_codec(dev);
++ bool forced_resume = !codec->relaxed_resume;
+ int ret;
+
+ /* The get/put pair below enforces the runtime resume even if the
+ * device hasn't been used at suspend time. This trick is needed to
+ * update the jack state change during the sleep.
+ */
+- pm_runtime_get_noresume(dev);
++ if (forced_resume)
++ pm_runtime_get_noresume(dev);
+ ret = pm_runtime_force_resume(dev);
+- pm_runtime_put(dev);
++ if (forced_resume)
++ pm_runtime_put(dev);
+ return ret;
+ }
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 0b2a26e2c5f1..bea7b0961080 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2292,8 +2292,10 @@ static void generic_hdmi_free(struct hda_codec *codec)
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx, pcm_idx;
+
+- if (codec_has_acomp(codec))
++ if (codec_has_acomp(codec)) {
+ snd_hdac_acomp_register_notifier(&codec->bus->core, NULL);
++ codec->relaxed_resume = 0;
++ }
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+@@ -2579,6 +2581,8 @@ static void register_i915_notifier(struct hda_codec *codec)
+ spec->drm_audio_ops.pin_eld_notify = intel_pin_eld_notify;
+ snd_hdac_acomp_register_notifier(&codec->bus->core,
+ &spec->drm_audio_ops);
++ /* no need for forcible resume for jack check thanks to notifier */
++ codec->relaxed_resume = 1;
+ }
+
+ /* setup_stream ops override for HSW+ */
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-hdmi-Fix-i915-reverse-port-pin-mapping.patch b/patches.drivers/ALSA-hda-hdmi-Fix-i915-reverse-port-pin-mapping.patch
new file mode 100644
index 0000000000..54a94cf485
--- /dev/null
+++ b/patches.drivers/ALSA-hda-hdmi-Fix-i915-reverse-port-pin-mapping.patch
@@ -0,0 +1,72 @@
+From 3140aafb22edeab0cc41f15f53b12a118c0ac215 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 15 Jul 2019 23:14:53 +0200
+Subject: [PATCH] ALSA: hda/hdmi - Fix i915 reverse port/pin mapping
+Git-commit: 3140aafb22edeab0cc41f15f53b12a118c0ac215
+Patch-mainline: v5.3-rc1
+References: bsc#1111666
+
+The recent fix for Icelake HDMI codec introduced the mapping from pin
+NID to the i915 gfx port number. However, it forgot the reverse
+mapping from the port number to the pin NID that is used in the ELD
+notifier callback. As a result, it's processed to a wrong widget and
+gives a warning like
+ snd_hda_codec_hdmi hdaudioC0D2: HDMI: pin nid 5 not registered
+
+This patch corrects it with a proper reverse mapping function.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=204133
+Fixes: b0d8bc50b9f2 ("ALSA: hda: hdmi - add Icelake support")
+Reviewed-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_hdmi.c | 24 +++++++++++++++++++-----
+ 1 file changed, 19 insertions(+), 5 deletions(-)
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 1e6c489bca15..0b2a26e2c5f1 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2524,18 +2524,32 @@ static int intel_pin2port(void *audio_ptr, int pin_nid)
+ return -1;
+ }
+
++static int intel_port2pin(struct hda_codec *codec, int port)
++{
++ struct hdmi_spec *spec = codec->spec;
++
++ if (!spec->port_num) {
++ /* we assume only from port-B to port-D */
++ if (port < 1 || port > 3)
++ return 0;
++ /* intel port is 1-based */
++ return port + intel_base_nid(codec) - 1;
++ }
++
++ if (port < 1 || port > spec->port_num)
++ return 0;
++ return spec->port_map[port - 1];
++}
++
+ static void intel_pin_eld_notify(void *audio_ptr, int port, int pipe)
+ {
+ struct hda_codec *codec = audio_ptr;
+ int pin_nid;
+ int dev_id = pipe;
+
+- /* we assume only from port-B to port-D */
+- if (port < 1 || port > 3)
++ pin_nid = intel_port2pin(codec, port);
++ if (!pin_nid)
+ return;
+-
+- pin_nid = port + intel_base_nid(codec) - 1; /* intel port is 1-based */
+-
+ /* skip notification during system suspend (but not in runtime PM);
+ * the state will be updated at resume
+ */
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-hdmi-Remove-duplicated-define.patch b/patches.drivers/ALSA-hda-hdmi-Remove-duplicated-define.patch
new file mode 100644
index 0000000000..21dbd0d2ac
--- /dev/null
+++ b/patches.drivers/ALSA-hda-hdmi-Remove-duplicated-define.patch
@@ -0,0 +1,34 @@
+From eb4177116bf568a413c544eca3f4446cb4064be9 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 15 Jul 2019 23:12:13 +0200
+Subject: [PATCH] ALSA: hda/hdmi - Remove duplicated define
+Git-commit: eb4177116bf568a413c544eca3f4446cb4064be9
+Patch-mainline: v5.3-rc1
+References: bsc#1111666
+
+INTEL_GET_VENDOR_VERB is defined twice identically.
+Let's remove a superfluous line.
+
+Fixes: b0d8bc50b9f2 ("ALSA: hda: hdmi - add Icelake support")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_hdmi.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 40323d91f9e4..1e6c489bca15 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2416,7 +2416,6 @@ static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
+ snd_hda_override_conn_list(codec, nid, spec->num_cvts, spec->cvt_nids);
+ }
+
+-#define INTEL_GET_VENDOR_VERB 0xf81
+ #define INTEL_GET_VENDOR_VERB 0xf81
+ #define INTEL_SET_VENDOR_VERB 0x781
+ #define INTEL_EN_DP12 0x02 /* enable DP 1.2 features */
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-realtek-Fixed-Headphone-Mic-can-t-record-on.patch b/patches.drivers/ALSA-hda-realtek-Fixed-Headphone-Mic-can-t-record-on.patch
new file mode 100644
index 0000000000..b87e2efc55
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-Fixed-Headphone-Mic-can-t-record-on.patch
@@ -0,0 +1,40 @@
+From fbc571290d9f7bfe089c50f4ac4028dd98ebfe98 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Mon, 15 Jul 2019 10:41:50 +0800
+Subject: [PATCH] ALSA: hda/realtek - Fixed Headphone Mic can't record on Dell platform
+Git-commit: fbc571290d9f7bfe089c50f4ac4028dd98ebfe98
+Patch-mainline: v5.3-rc1
+References: bsc#1051510
+
+It assigned to wrong model. So, The headphone Mic can't work.
+
+Fixes: 3f640970a414 ("ALSA: hda - Fix headset mic detection problem for several Dell laptops")
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f24a757f8239..1c84c12b39b3 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7657,9 +7657,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
+ {0x21, 0x03211020}),
+- SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
+ {0x14, 0x90170110},
+ {0x21, 0x04211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
++ {0x14, 0x90170110},
++ {0x21, 0x04211030}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS,
+ {0x17, 0x21014020},
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-hda-realtek-apply-ALC891-headset-fixup-to-one-D.patch b/patches.drivers/ALSA-hda-realtek-apply-ALC891-headset-fixup-to-one-D.patch
new file mode 100644
index 0000000000..30187d84d0
--- /dev/null
+++ b/patches.drivers/ALSA-hda-realtek-apply-ALC891-headset-fixup-to-one-D.patch
@@ -0,0 +1,37 @@
+From 4b4e0e32e4b09274dbc9d173016c1a026f44608c Mon Sep 17 00:00:00 2001
+From: Hui Wang <hui.wang@canonical.com>
+Date: Tue, 16 Jul 2019 15:21:34 +0800
+Subject: [PATCH] ALSA: hda/realtek: apply ALC891 headset fixup to one Dell machine
+Git-commit: 4b4e0e32e4b09274dbc9d173016c1a026f44608c
+Patch-mainline: v5.3-rc1
+References: bsc#1051510
+
+Without this patch, the headset-mic and headphone-mic don't work.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Hui Wang <hui.wang@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/pci/hda/patch_realtek.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 1c84c12b39b3..de224cbea7a0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -8803,6 +8803,11 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
+ {0x18, 0x01a19030},
+ {0x1a, 0x01813040},
+ {0x21, 0x01014020}),
++ SND_HDA_PIN_QUIRK(0x10ec0867, 0x1028, "Dell", ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
++ {0x16, 0x01813030},
++ {0x17, 0x02211010},
++ {0x18, 0x01a19040},
++ {0x21, 0x01014020}),
+ SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
+ {0x14, 0x01014010},
+ {0x18, 0x01a19020},
+--
+2.16.4
+
diff --git a/patches.drivers/ALSA-seq-Break-too-long-mutex-context-in-the-write-l.patch b/patches.drivers/ALSA-seq-Break-too-long-mutex-context-in-the-write-l.patch
new file mode 100644
index 0000000000..123538a072
--- /dev/null
+++ b/patches.drivers/ALSA-seq-Break-too-long-mutex-context-in-the-write-l.patch
@@ -0,0 +1,76 @@
+From ede34f397ddb063b145b9e7d79c6026f819ded13 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 15 Jul 2019 22:50:27 +0200
+Subject: [PATCH] ALSA: seq: Break too long mutex context in the write loop
+Git-commit: ede34f397ddb063b145b9e7d79c6026f819ded13
+Patch-mainline: v5.3-rc1
+References: bsc#1051510
+
+The fix for the racy writes and ioctls to sequencer widened the
+application of client->ioctl_mutex to the whole write loop. Although
+it does unlock/relock for the lengthy operation like the event dup,
+the loop keeps the ioctl_mutex for the whole time in other
+situations. This may take quite long time if the user-space would
+give a huge buffer, and this is a likely cause of some weird behavior
+spotted by syzcaller fuzzer.
+
+This patch puts a simple workaround, just adding a mutex break in the
+loop when a large number of events have been processed. This
+shouldn't hit any performance drop because the threshold is set high
+enough for usual operations.
+
+Fixes: 7bd800915677 ("ALSA: seq: More protection for concurrent write and ioctl races")
+Reported-by: syzbot+97aae04ce27e39cbfca9@syzkaller.appspotmail.com
+Reported-by: syzbot+4c595632b98bb8ffcc66@syzkaller.appspotmail.com
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ sound/core/seq/seq_clientmgr.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index a60e7a17f0b8..7737b2670064 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1021,7 +1021,7 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ {
+ struct snd_seq_client *client = file->private_data;
+ int written = 0, len;
+- int err;
++ int err, handled;
+ struct snd_seq_event event;
+
+ if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
+@@ -1034,6 +1034,8 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ if (!client->accept_output || client->pool == NULL)
+ return -ENXIO;
+
++ repeat:
++ handled = 0;
+ /* allocate the pool now if the pool is not allocated yet */
+ mutex_lock(&client->ioctl_mutex);
+ if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
+@@ -1093,12 +1095,19 @@ static ssize_t snd_seq_write(struct file *file, const char __user *buf,
+ 0, 0, &client->ioctl_mutex);
+ if (err < 0)
+ break;
++ handled++;
+
+ __skip_event:
+ /* Update pointers and counts */
+ count -= len;
+ buf += len;
+ written += len;
++
++ /* let's have a coffee break if too many events are queued */
++ if (++handled >= 200) {
++ mutex_unlock(&client->ioctl_mutex);
++ goto repeat;
++ }
+ }
+
+ out:
+--
+2.16.4
+
diff --git a/patches.drivers/clk-qcom-Fix-Wunused-const-variable.patch b/patches.drivers/clk-qcom-Fix-Wunused-const-variable.patch
new file mode 100644
index 0000000000..3ce29afdd8
--- /dev/null
+++ b/patches.drivers/clk-qcom-Fix-Wunused-const-variable.patch
@@ -0,0 +1,98 @@
+From da642427bd7710ec4f4140f693f59aa8521a358c Mon Sep 17 00:00:00 2001
+From: Nathan Huckleberry <nhuck@google.com>
+Date: Tue, 11 Jun 2019 14:11:34 -0700
+Subject: [PATCH] clk: qcom: Fix -Wunused-const-variable
+Git-commit: da642427bd7710ec4f4140f693f59aa8521a358c
+Patch-mainline: 5.3-rc1
+References: bsc#1051510
+
+Clang produces the following warning
+
+drivers/clk/qcom/gcc-msm8996.c:133:32: warning: unused variable
+'gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map' [-Wunused-const-variable]
+static const struct
+parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] =
+{ ^drivers/clk/qcom/gcc-msm8996.c:141:27: warning: unused variable
+'gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div' [-Wunused-const-variable] static
+const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = { ^
+drivers/clk/qcom/gcc-msm8996.c:187:32: warning: unused variable
+'gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map'
+[-Wunused-const-variable] static const struct parent_map
+gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = { ^
+drivers/clk/qcom/gcc-msm8996.c:197:27: warning: unused variable
+'gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div'
+[-Wunused-const-variable] static const char * const
+gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = {
+
+It looks like these were never used.
+
+Fixes: b1e010c0730a ("clk: qcom: Add MSM8996 Global Clock Control (GCC) driver")
+Cc: clang-built-linux@googlegroups.com
+Link: https://github.com/ClangBuiltLinux/linux/issues/518
+Suggested-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Nathan Huckleberry <nhuck@google.com>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/clk/qcom/gcc-msm8996.c | 36 ------------------------------------
+ 1 file changed, 36 deletions(-)
+
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index 4632b9272b7f..292d7214a226 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -138,22 +138,6 @@ static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ "gpll0_early_div"
+ };
+
+-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] = {
+- { P_XO, 0 },
+- { P_GPLL0, 1 },
+- { P_GPLL2, 2 },
+- { P_GPLL3, 3 },
+- { P_GPLL0_EARLY_DIV, 6 }
+-};
+-
+-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = {
+- "xo",
+- "gpll0",
+- "gpll2",
+- "gpll3",
+- "gpll0_early_div"
+-};
+-
+ static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+@@ -192,26 +176,6 @@ static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early
+ "gpll0_early_div"
+ };
+
+-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = {
+- { P_XO, 0 },
+- { P_GPLL0, 1 },
+- { P_GPLL2, 2 },
+- { P_GPLL3, 3 },
+- { P_GPLL1, 4 },
+- { P_GPLL4, 5 },
+- { P_GPLL0_EARLY_DIV, 6 }
+-};
+-
+-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = {
+- "xo",
+- "gpll0",
+- "gpll2",
+- "gpll3",
+- "gpll1",
+- "gpll4",
+- "gpll0_early_div"
+-};
+-
+ static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+--
+2.16.4
+
diff --git a/patches.drivers/clk-rockchip-Don-t-yell-about-bad-mmc-phases-when-ge.patch b/patches.drivers/clk-rockchip-Don-t-yell-about-bad-mmc-phases-when-ge.patch
new file mode 100644
index 0000000000..49795603d8
--- /dev/null
+++ b/patches.drivers/clk-rockchip-Don-t-yell-about-bad-mmc-phases-when-ge.patch
@@ -0,0 +1,52 @@
+From 6943b839721ad4a31ad2bacf6e71b21f2dfe3134 Mon Sep 17 00:00:00 2001
+From: Douglas Anderson <dianders@chromium.org>
+Date: Fri, 3 May 2019 14:22:08 -0700
+Subject: [PATCH] clk: rockchip: Don't yell about bad mmc phases when getting
+Git-commit: 6943b839721ad4a31ad2bacf6e71b21f2dfe3134
+Patch-mainline: 5.3-rc1
+References: bsc#1051510
+
+At boot time, my rk3288-veyron devices yell with 8 lines that look
+like this:
+ [ 0.000000] rockchip_mmc_get_phase: invalid clk rate
+
+This is because the clock framework at clk_register() time tries to
+get the phase but we don't have a parent yet.
+
+While the errors appear to be harmless they are still ugly and, in
+general, we don't want yells like this in the log unless they are
+important.
+
+There's no real reason to be yelling here. We can still return
+-EINVAL to indicate that the phase makes no sense without a parent.
+If someone really tries to do tuning and the clock is reported as 0
+then we'll see the yells in rockchip_mmc_set_phase().
+
+Fixes: 4bf59902b500 ("clk: rockchip: Prevent calculating mmc phase if clock rate is zero")
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/clk/rockchip/clk-mmc-phase.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
+index 07526f64dbfd..17662217d1bb 100644
+--- a/drivers/clk/rockchip/clk-mmc-phase.c
++++ b/drivers/clk/rockchip/clk-mmc-phase.c
+@@ -61,10 +61,8 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw)
+ u32 delay_num = 0;
+
+ /* See the comment for rockchip_mmc_set_phase below */
+- if (!rate) {
+- pr_err("%s: invalid clk rate\n", __func__);
++ if (!rate)
+ return -EINVAL;
+- }
+
+ raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+
+--
+2.16.4
+
diff --git a/patches.drivers/clk-tegra210-fix-PLLU-and-PLLU_OUT1.patch b/patches.drivers/clk-tegra210-fix-PLLU-and-PLLU_OUT1.patch
new file mode 100644
index 0000000000..69ac925158
--- /dev/null
+++ b/patches.drivers/clk-tegra210-fix-PLLU-and-PLLU_OUT1.patch
@@ -0,0 +1,72 @@
+From 0d34dfbf3023cf119b83f6470692c0b10c832495 Mon Sep 17 00:00:00 2001
+From: JC Kuo <jckuo@nvidia.com>
+Date: Wed, 12 Jun 2019 11:14:34 +0800
+Subject: [PATCH] clk: tegra210: fix PLLU and PLLU_OUT1
+Git-commit: 0d34dfbf3023cf119b83f6470692c0b10c832495
+Patch-mainline: 5.3-rc1
+References: bsc#1051510
+
+Full-speed and low-speed USB devices do not work with Tegra210
+platforms because of incorrect PLLU/PLLU_OUT1 clock settings.
+
+When full-speed device is connected:
+[ 14.059886] usb 1-3: new full-speed USB device number 2 using tegra-xusb
+[ 14.196295] usb 1-3: device descriptor read/64, error -71
+[ 14.436311] usb 1-3: device descriptor read/64, error -71
+[ 14.675749] usb 1-3: new full-speed USB device number 3 using tegra-xusb
+[ 14.812335] usb 1-3: device descriptor read/64, error -71
+[ 15.052316] usb 1-3: device descriptor read/64, error -71
+[ 15.164799] usb usb1-port3: attempt power cycle
+
+When low-speed device is connected:
+[ 37.610949] usb usb1-port3: Cannot enable. Maybe the USB cable is bad?
+[ 38.557376] usb usb1-port3: Cannot enable. Maybe the USB cable is bad?
+[ 38.564977] usb usb1-port3: attempt power cycle
+
+This commit fixes the issue by:
+ 1. initializing PLLU_OUT1 before initializing XUSB_FS_SRC clock
+ because PLLU_OUT1 is parent of XUSB_FS_SRC.
+ 2. changing PLLU post-divider to /2 (DIVP=1) according to Technical
+ Reference Manual.
+
+Fixes: e745f992cf4b ("clk: tegra: Rework pll_u")
+Signed-off-by: JC Kuo <jckuo@nvidia.com>
+Acked-by: Peter De Schrijver <pdeschrijver@nvidia.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/clk/tegra/clk-tegra210.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/clk/tegra/clk-tegra210.c
++++ b/drivers/clk/tegra/clk-tegra210.c
+@@ -2054,9 +2054,9 @@ static struct div_nmp pllu_nmp = {
+ };
+
+ static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
+- { 12000000, 480000000, 40, 1, 0, 0 },
+- { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
+- { 38400000, 480000000, 25, 2, 0, 0 },
++ { 12000000, 480000000, 40, 1, 1, 0 },
++ { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
++ { 38400000, 480000000, 25, 2, 1, 0 },
+ { 0, 0, 0, 0, 0, 0 },
+ };
+
+@@ -2979,6 +2979,7 @@ static struct tegra_clk_init_table init_
+ { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
+ { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
+ { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
++ { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
+ { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
+ { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
+ { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
+@@ -3004,7 +3005,6 @@ static struct tegra_clk_init_table init_
+ { TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
+ { TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
+ { TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
+- { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
+ { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
+ /* This MUST be the last entry. */
+ { TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
diff --git a/patches.drivers/dmaengine-hsu-Revert-set-HSU_CH_MTSR-to-memory-width.patch b/patches.drivers/dmaengine-hsu-Revert-set-HSU_CH_MTSR-to-memory-width.patch
new file mode 100644
index 0000000000..eabef6274a
--- /dev/null
+++ b/patches.drivers/dmaengine-hsu-Revert-set-HSU_CH_MTSR-to-memory-width.patch
@@ -0,0 +1,51 @@
+From c24a5c735f87d0549060de31367c095e8810b895 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Thu, 13 Jun 2019 16:32:32 +0300
+Subject: [PATCH] dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width"
+Git-commit: c24a5c735f87d0549060de31367c095e8810b895
+Patch-mainline: 5.3-rc1
+References: bsc#1051510
+
+The commit
+
+ 080edf75d337 ("dmaengine: hsu: set HSU_CH_MTSR to memory width")
+
+has been mistakenly submitted. The further investigations show that
+the original code does better job since the memory side transfer size
+has never been configured by DMA users.
+
+As per latest revision of documentation: "Channel minimum transfer size
+(CHnMTSR)... For IOSF UART, maximum value that can be programmed is 64 and
+minimum value that can be programmed is 1."
+
+This reverts commit 080edf75d337d35faa6fc3df99342b10d2848d16.
+
+Fixes: 080edf75d337 ("dmaengine: hsu: set HSU_CH_MTSR to memory width")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/dma/hsu/hsu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index e06f20272fd7..dfabc64c2ab0 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
+
+ if (hsuc->direction == DMA_MEM_TO_DEV) {
+ bsr = config->dst_maxburst;
+- mtsr = config->src_addr_width;
++ mtsr = config->dst_addr_width;
+ } else if (hsuc->direction == DMA_DEV_TO_MEM) {
+ bsr = config->src_maxburst;
+- mtsr = config->dst_addr_width;
++ mtsr = config->src_addr_width;
+ }
+
+ hsu_chan_disable(hsuc);
+--
+2.16.4
+
diff --git a/patches.drivers/mei-bus-need-to-unlink-client-before-freeing.patch b/patches.drivers/mei-bus-need-to-unlink-client-before-freeing.patch
new file mode 100644
index 0000000000..dcaf538c99
--- /dev/null
+++ b/patches.drivers/mei-bus-need-to-unlink-client-before-freeing.patch
@@ -0,0 +1,79 @@
+From 34f1166afd67f9f48a08c52f36180048908506a4 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Mon, 27 Aug 2018 22:40:16 +0300
+Subject: [PATCH] mei: bus: need to unlink client before freeing
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: 34f1166afd67f9f48a08c52f36180048908506a4
+Patch-mainline: v4.19-rc4
+References: bsc#1051510
+
+In case a client fails to connect in mei_cldev_enable(), the
+caller won't call the mei_cldev_disable leaving the client
+in a linked stated. Upon driver unload the client structure
+will be freed in mei_cl_bus_dev_release(), leaving a stale pointer
+on a fail_list. This will eventually end up in crash
+during power down flow in mei_cl_set_disonnected().
+
+Rip: mei_cl_set_disconnected+0x5/0x260[mei]
+Call trace:
+mei_cl_all_disconnect+0x22/0x30
+mei_reset+0x194/0x250
+__synchronize_hardirq+0x43/0x50
+_cond_resched+0x15/0x30
+mei_me_intr_clear+0x20/0x100
+mei_stop+0x76/0xb0
+mei_me_shutdown+0x3f/0x80
+pci_device_shutdown+0x34/0x60
+kernel_restart+0x0e/0x30
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=200455
+Fixes: 'c110cdb17148 ("mei: bus: make a client pointer always available")'
+Cc: <stable@vger.kernel.org> 4.10+
+Tested-by: Georg Müller <georgmueller@gmx.net>
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/misc/mei/bus.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/misc/mei/bus.c
++++ b/drivers/misc/mei/bus.c
+@@ -465,17 +465,15 @@ int mei_cldev_enable(struct mei_cl_devic
+
+ cl = cldev->cl;
+
++ mutex_lock(&bus->device_lock);
+ if (cl->state == MEI_FILE_UNINITIALIZED) {
+- mutex_lock(&bus->device_lock);
+ ret = mei_cl_link(cl);
+- mutex_unlock(&bus->device_lock);
+ if (ret)
+- return ret;
++ goto out;
+ /* update pointers */
+ cl->cldev = cldev;
+ }
+
+- mutex_lock(&bus->device_lock);
+ if (mei_cl_is_connected(cl)) {
+ ret = 0;
+ goto out;
+@@ -841,12 +839,13 @@ static void mei_cl_bus_dev_release(struc
+
+ mei_me_cl_put(cldev->me_cl);
+ mei_dev_bus_put(cldev->bus);
++ mei_cl_unlink(cldev->cl);
+ kfree(cldev->cl);
+ kfree(cldev);
+ }
+
+ static struct device_type mei_cl_device_type = {
+- .release = mei_cl_bus_dev_release,
++ .release = mei_cl_bus_dev_release,
+ };
+
+ /**
diff --git a/patches.drivers/mei-me-add-denverton-innovation-engine-device-IDs.patch b/patches.drivers/mei-me-add-denverton-innovation-engine-device-IDs.patch
new file mode 100644
index 0000000000..d6a9450207
--- /dev/null
+++ b/patches.drivers/mei-me-add-denverton-innovation-engine-device-IDs.patch
@@ -0,0 +1,45 @@
+From f7ee8ead151f9d0b8dac6ab6c3ff49bbe809c564 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Sun, 13 Jan 2019 14:24:48 +0200
+Subject: [PATCH] mei: me: add denverton innovation engine device IDs
+Git-commit: f7ee8ead151f9d0b8dac6ab6c3ff49bbe809c564
+Patch-mainline: v5.0-rc4
+References: bsc#1051510
+
+Add the Denverton innovation engine (IE) device ids.
+The IE is an ME-like device which provides HW security
+offloading.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/misc/mei/hw-me-regs.h | 2 ++
+ drivers/misc/mei/pci-me.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -127,6 +127,8 @@
+ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
+ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+
++#define MEI_DEV_ID_DNV_IE 0x19E5 /* Denverton IE */
++
+ #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
+
+ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -92,6 +92,8 @@ static const struct pci_device_id mei_me
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_DNV_IE, mei_me_pch8_cfg)},
++
+ {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, mei_me_pch8_cfg)},
+
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
diff --git a/patches.drivers/mei-me-add-gemini-lake-devices-id.patch b/patches.drivers/mei-me-add-gemini-lake-devices-id.patch
new file mode 100644
index 0000000000..26bfea1fe7
--- /dev/null
+++ b/patches.drivers/mei-me-add-gemini-lake-devices-id.patch
@@ -0,0 +1,42 @@
+From 688cb67839e852740d22cf763e5eafb27d5a6e53 Mon Sep 17 00:00:00 2001
+From: Tomas Winkler <tomas.winkler@intel.com>
+Date: Sun, 24 Sep 2017 11:35:34 +0300
+Subject: [PATCH] mei: me: add gemini lake devices id
+Git-commit: 688cb67839e852740d22cf763e5eafb27d5a6e53
+Patch-mainline: v4.14-rc5
+References: bsc#1051510
+
+Add Gemini Lake (GLK) device id.
+
+Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/misc/mei/hw-me-regs.h | 2 ++
+ drivers/misc/mei/pci-me.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+--- a/drivers/misc/mei/hw-me-regs.h
++++ b/drivers/misc/mei/hw-me-regs.h
+@@ -127,6 +127,8 @@
+ #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
+ #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+
++#define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */
++
+ #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
+ #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+
+--- a/drivers/misc/mei/pci-me.c
++++ b/drivers/misc/mei/pci-me.c
+@@ -92,6 +92,8 @@ static const struct pci_device_id mei_me
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
++ {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, mei_me_pch8_cfg)},
++
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
+
diff --git a/patches.drm/drm-vc4-fix-fb-references-in-async-update.patch b/patches.drm/drm-vc4-fix-fb-references-in-async-update.patch
new file mode 100644
index 0000000000..f8d2bc62a1
--- /dev/null
+++ b/patches.drm/drm-vc4-fix-fb-references-in-async-update.patch
@@ -0,0 +1,41 @@
+From c16b85559dcfb5a348cc085a7b4c75ed49b05e2c Mon Sep 17 00:00:00 2001
+From: Helen Koike <helen.koike@collabora.com>
+Date: Mon, 3 Jun 2019 13:56:09 -0300
+Subject: [PATCH] drm/vc4: fix fb references in async update
+Git-commit: c16b85559dcfb5a348cc085a7b4c75ed49b05e2c
+Patch-mainline: v5.2-rc4
+References: bsc#1141312
+
+[ adjusted the context to match with SLE15-SP1 -- tiwai ]
+
+Async update callbacks are expected to set the old_fb in the new_state
+so prepare/cleanup framebuffers are balanced.
+
+Calling drm_atomic_set_fb_for_plane() (which gets a reference of the new
+fb and put the old fb) is not required, as it's taken care by
+drm_mode_cursor_universal() when calling drm_atomic_helper_update_plane().
+
+Cc: <stable@vger.kernel.org> # v4.19+
+Fixes: 539c320bfa97 ("drm/vc4: update cursors asynchronously through atomic")
+Suggested-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Helen Koike <helen.koike@collabora.com>
+Reviewed-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190603165610.24614-5-helen.koike@collabora.com
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/gpu/drm/vc4/vc4_plane.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/vc4/vc4_plane.c
++++ b/drivers/gpu/drm/vc4/vc4_plane.c
+@@ -815,7 +815,7 @@ static void vc4_plane_atomic_async_updat
+
+ if (plane->state->fb != state->fb) {
+ vc4_plane_async_set_fb(plane, state->fb);
+- drm_atomic_set_fb_for_plane(plane->state, state->fb);
++ swap(plane->state->fb, state->fb);
+ }
+
+ /* Set the cursor's position on the screen. This is the
diff --git a/patches.fixes/crypto-talitos-fix-max-key-size-for-sha384-and-sha51.patch b/patches.fixes/crypto-talitos-fix-max-key-size-for-sha384-and-sha51.patch
new file mode 100644
index 0000000000..194d4bbf1d
--- /dev/null
+++ b/patches.fixes/crypto-talitos-fix-max-key-size-for-sha384-and-sha51.patch
@@ -0,0 +1,42 @@
+From 192125ed5ce62afba24312d8e7a0314577565b4a Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Wed, 12 Jun 2019 05:49:50 +0000
+Subject: [PATCH] crypto: talitos - fix max key size for sha384 and sha512
+Mime-version: 1.0
+Content-type: text/plain; charset=UTF-8
+Content-transfer-encoding: 8bit
+Git-commit: 192125ed5ce62afba24312d8e7a0314577565b4a
+Patch-mainline: 5.3-rc1
+References: bsc#1051510
+
+Below commit came with a typo in the CONFIG_ symbol, leading
+to a permanently reduced max key size regarless of the driver
+capabilities.
+
+Reported-by: Horia Geantă <horia.geanta@nxp.com>
+Fixes: b8fbdc2bc4e7 ("crypto: talitos - reduce max key size for SEC1")
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/crypto/talitos.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 32a7e747dc5f..c865f5d5eaba 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -823,7 +823,7 @@ static void talitos_unregister_rng(struct device *dev)
+ * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
+ */
+ #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
+-#ifdef CONFIG_CRYPTO_DEV_TALITOS_SEC2
++#ifdef CONFIG_CRYPTO_DEV_TALITOS2
+ #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
+ #else
+ #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE)
+--
+2.16.4
+
diff --git a/patches.fixes/x86-asm-memcpy_mcsafe-Define-copy_to_iter_mcsafe.patch b/patches.fixes/x86-asm-memcpy_mcsafe-Define-copy_to_iter_mcsafe.patch
index ffdaacaa77..b2dc6e37cc 100644
--- a/patches.fixes/x86-asm-memcpy_mcsafe-Define-copy_to_iter_mcsafe.patch
+++ b/patches.fixes/x86-asm-memcpy_mcsafe-Define-copy_to_iter_mcsafe.patch
@@ -37,7 +37,7 @@ Acked-by: Jan Kara <jack@suse.cz>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,6 +54,7 @@ config X86
- select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
+ select ARCH_HAS_UACCESS_MCSAFE if X86_64
diff --git a/patches.fixes/x86-asm-memcpy_mcsafe-Fix-copy_to_user_mcsafe-except.patch b/patches.fixes/x86-asm-memcpy_mcsafe-Fix-copy_to_user_mcsafe-except.patch
index c0676aecc8..72a2408a0b 100644
--- a/patches.fixes/x86-asm-memcpy_mcsafe-Fix-copy_to_user_mcsafe-except.patch
+++ b/patches.fixes/x86-asm-memcpy_mcsafe-Fix-copy_to_user_mcsafe-except.patch
@@ -61,7 +61,7 @@ Acked-by: Jan Kara <jack@suse.cz>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -54,7 +54,7 @@ config X86
- select ARCH_HAS_KCOV if X86_64
+ select ARCH_HAS_PHYS_TO_DMA
select ARCH_HAS_PMEM_API if X86_64
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
- select ARCH_HAS_UACCESS_MCSAFE if X86_64
diff --git a/patches.kabi/hda-relaxed_resume-flag-kabi-fix.patch b/patches.kabi/hda-relaxed_resume-flag-kabi-fix.patch
new file mode 100644
index 0000000000..0ff011bc2a
--- /dev/null
+++ b/patches.kabi/hda-relaxed_resume-flag-kabi-fix.patch
@@ -0,0 +1,28 @@
+From: Takashi Iwai <tiwai@suse.de>
+Subject: kABI fix for hda_codec.relaxed_resume flag
+Patch-mainline: Never, kABI workaround
+References: bsc#1111666
+
+The recent fix
+ ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch
+introduced a new bit flag to hda_codec object, and we need to wrap it
+with the usual ifdef for kABI compatibility.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ include/sound/hda_codec.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/sound/hda_codec.h
++++ b/include/sound/hda_codec.h
+@@ -262,7 +262,9 @@ struct hda_codec {
+ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
+ unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
++#ifndef __GENKSYMS__
+ unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
++#endif
+
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
diff --git a/patches.kabi/iommu-helper-mark-iommu_is_span_boundary-as-inline b/patches.kabi/iommu-helper-mark-iommu_is_span_boundary-as-inline
new file mode 100644
index 0000000000..af6b6b2f3a
--- /dev/null
+++ b/patches.kabi/iommu-helper-mark-iommu_is_span_boundary-as-inline
@@ -0,0 +1,25 @@
+From: Petr Tesarik <ptesarik@suse.com>
+Subject: kABI: Fix lost iommu-helper symbols on arm64
+Patch-mainline: never, kabi
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Since IOMMU_HELPER symbols are already part of kABI, they cannot be
+removed after release.
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/arm64/Kconfig | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -230,6 +230,9 @@ config SMP
+ config SWIOTLB
+ def_bool y
+
++config IOMMU_HELPER
++ def_bool SWIOTLB
++
+ config KERNEL_MODE_NEON
+ def_bool y
+
diff --git a/patches.kabi/s390-airq-use-dma-memory-for-adapter-interrupts b/patches.kabi/s390-airq-use-dma-memory-for-adapter-interrupts
new file mode 100644
index 0000000000..993a99d7b7
--- /dev/null
+++ b/patches.kabi/s390-airq-use-dma-memory-for-adapter-interrupts
@@ -0,0 +1,50 @@
+From: Petr Tesarik <ptesarik@suse.com>
+Subject: kABI: mask changes made by use of DMA memory for adapter interrupts
+Patch-mainline: never, kabi
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Upstream commit b50623e5db802e41736f3305cb54c03bc7f0e30a added a new
+field to struct iv_irq. This struct is never embedded and should not be
+allocated outside of airq_iv_create(), so it is safe to add masked
+fields at the end of the struct.
+
+The same upstream commit also changes some symvers by including
+linux/dma-mapping.h from arch/s390/include/asm/airq.h. This provides
+full definition of a few dma-related structs that were previously
+incomplete.
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/airq.h | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/airq.h
++++ b/arch/s390/include/asm/airq.h
+@@ -10,7 +10,9 @@
+ #define _ASM_S390_AIRQ_H
+
+ #include <linux/bit_spinlock.h>
++#ifndef __GENKSYMS__
+ #include <linux/dma-mapping.h>
++#endif
+
+ struct airq_struct {
+ struct hlist_node list; /* Handler queueing. */
+@@ -29,7 +31,6 @@ void unregister_adapter_interrupt(struct
+ /* Adapter interrupt bit vector */
+ struct airq_iv {
+ unsigned long *vector; /* Adapter interrupt bit vector */
+- dma_addr_t vector_dma; /* Adapter interrupt bit vector dma */
+ unsigned long *avail; /* Allocation bit mask for the bit vector */
+ unsigned long *bitlock; /* Lock bit mask for the bit vector */
+ unsigned long *ptr; /* Pointer associated with each bit */
+@@ -38,6 +39,9 @@ struct airq_iv {
+ unsigned long end; /* Number of highest allocated bit + 1 */
+ unsigned long flags; /* Allocation flags */
+ spinlock_t lock; /* Lock to protect alloc & free */
++#ifndef __GENKSYMS__
++ dma_addr_t vector_dma; /* Adapter interrupt bit vector dma */
++#endif
+ };
+
+ #define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
diff --git a/patches.kabi/s390-cio-add-basic-protected-virtualization-support b/patches.kabi/s390-cio-add-basic-protected-virtualization-support
new file mode 100644
index 0000000000..ac320f68e2
--- /dev/null
+++ b/patches.kabi/s390-cio-add-basic-protected-virtualization-support
@@ -0,0 +1,61 @@
+From: Petr Tesarik <ptesarik@suse.com>
+Subject: kABI: mask changes made by basic protected virtualization support
+Patch-mainline: never, kabi
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Upstream commit 37db8985b2116c89a3cbaf87083a02f83afaba5b redesigns some
+fields in struct ccw_device_private. This is a private struct and should
+never be used by third-party code, so simply revert the changes for
+genksyms.
+
+The same upstream commit also changes some symvers by including
+linux/dma-mapping.h from drivers/s390/cio/device.c. This provides full
+definition of a few dma-related structs that were previously incomplete.
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/cio/device.c | 2 ++
+ drivers/s390/cio/io_sch.h | 10 ++++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -25,7 +25,9 @@
+ #include <linux/timer.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/sched/signal.h>
++#ifndef __GENKSYMS__
+ #include <linux/dma-mapping.h>
++#endif
+
+ #include <asm/ccwdev.h>
+ #include <asm/cio.h>
+--- a/drivers/s390/cio/io_sch.h
++++ b/drivers/s390/cio/io_sch.h
+@@ -165,7 +165,15 @@ struct ccw_device_private {
+ } __attribute__((packed)) flags;
+ unsigned long intparm; /* user interruption parameter */
+ struct qdio_irq *qdio_data;
++#ifdef __GENKSYMS__
++ struct irb irb; /* device status */
++#endif
+ int async_kill_io_rc;
++#ifdef __GENKSYMS__
++ struct senseid senseid; /* SenseID info */
++ struct pgid pgid[8]; /* path group IDs per chpid*/
++ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
++#endif
+ struct work_struct todo_work;
+ enum cdev_todo todo;
+ wait_queue_head_t wait_q;
+@@ -174,8 +182,10 @@ struct ccw_device_private {
+ struct list_head cmb_list; /* list of measured devices */
+ u64 cmb_start_time; /* clock value of cmb reset */
+ void *cmb_wait; /* deferred cmb enable/disable */
++#ifndef __GENKSYMS__
+ struct gen_pool *dma_pool;
+ struct ccw_device_dma_area *dma_area;
++#endif
+ enum interruption_class int_class;
+ };
+
diff --git a/patches.kabi/s390-mm-force-swiotlb-for-protected-virtualization b/patches.kabi/s390-mm-force-swiotlb-for-protected-virtualization
new file mode 100644
index 0000000000..3ac87306cb
--- /dev/null
+++ b/patches.kabi/s390-mm-force-swiotlb-for-protected-virtualization
@@ -0,0 +1,27 @@
+From: Petr Tesarik <ptesarik@suse.com>
+Subject: kABI: mask changes made by swiotlb for protected virtualization
+Patch-mainline: never, kabi
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Upstream commit 64e1f0c531d1072cd97939bf0d8df42b26713543 changes some
+symvers by including linux/dma-mapping.h from drivers/s390/cio/css.c.
+This provides full definition of a few dma-related structs that were
+previously incomplete.
+
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/cio/css.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -22,7 +22,9 @@
+ #include <linux/suspend.h>
+ #include <linux/proc_fs.h>
+ #include <linux/genalloc.h>
++#ifndef __GENKSYMS__
+ #include <linux/dma-mapping.h>
++#endif
+ #include <asm/isc.h>
+ #include <asm/crw.h>
+
diff --git a/patches.suse/arm64-don-t-override-dma_max_pfn b/patches.suse/arm64-don-t-override-dma_max_pfn
new file mode 100644
index 0000000000..98493e680a
--- /dev/null
+++ b/patches.suse/arm64-don-t-override-dma_max_pfn
@@ -0,0 +1,34 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 9 Jan 2018 22:11:10 +0100
+Subject: arm64: don't override dma_max_pfn
+Git-commit: 298f0027d5452c171dc9e7caccbd00e36056aaa3
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+The generic version now takes dma_pfn_offset into account, so there is no
+more need for an architecture override.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/arm64/include/asm/dma-mapping.h | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -79,14 +79,5 @@ static inline void dma_mark_clean(void *
+ {
+ }
+
+-/* Override for dma_max_pfn() */
+-static inline unsigned long dma_max_pfn(struct device *dev)
+-{
+- dma_addr_t dma_max = (dma_addr_t)*dev->dma_mask;
+-
+- return (ulong)dma_to_phys(dev, dma_max) >> PAGE_SHIFT;
+-}
+-#define dma_max_pfn(dev) dma_max_pfn(dev)
+-
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_DMA_MAPPING_H */
diff --git a/patches.suse/dma-direct-add-support-for-allocation-from-zone_dma-and-zone_dma32 b/patches.suse/dma-direct-add-support-for-allocation-from-zone_dma-and-zone_dma32
new file mode 100644
index 0000000000..d5b31b04f9
--- /dev/null
+++ b/patches.suse/dma-direct-add-support-for-allocation-from-zone_dma-and-zone_dma32
@@ -0,0 +1,50 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 9 Jan 2018 23:39:03 +0100
+Subject: dma-direct: add support for allocation from ZONE_DMA and ZONE_DMA32
+Git-commit: c61e9637340e1d0672c6f0c0b11aa81d7262c49d
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+This allows to dip into zones for lower memory if they are available.
+If one of the zones is not available the corresponding GFP_* flag
+will evaluate to 0 so they won't change anything. We provide an
+arch tunable for those architectures that do not use GFP_DMA for
+the lowest 24-bits, given that there are a few.
+
+Roughly based on the x86 code.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ lib/dma-noop.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/lib/dma-noop.c
++++ b/lib/dma-noop.c
+@@ -8,12 +8,26 @@
+ #include <linux/dma-direct.h>
+ #include <linux/scatterlist.h>
+
++/*
++ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
++ * some use it for entirely different regions:
++ */
++#ifndef ARCH_ZONE_DMA_BITS
++#define ARCH_ZONE_DMA_BITS 24
++#endif
++
+ static void *dma_noop_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+ {
+ void *ret;
+
++ /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
++ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
++ gfp |= GFP_DMA;
++ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
++ gfp |= GFP_DMA32;
++
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+ if (ret)
+ *dma_handle = virt_to_phys(ret);
diff --git a/patches.suse/dma-direct-don-t-retry-allocation-for-no-op-gfp_dma b/patches.suse/dma-direct-don-t-retry-allocation-for-no-op-gfp_dma
new file mode 100644
index 0000000000..0367bebf3e
--- /dev/null
+++ b/patches.suse/dma-direct-don-t-retry-allocation-for-no-op-gfp_dma
@@ -0,0 +1,34 @@
+From: Takashi Iwai <tiwai@suse.de>
+Date: Sun, 15 Apr 2018 11:08:07 +0200
+Subject: dma-direct: don't retry allocation for no-op GFP_DMA
+Git-commit: 504a918e6714b551b7b39940dbab32610fafa1fe
+Patch-mainline: v4.17-rc3
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+When an allocation with lower dma_coherent mask fails, dma_direct_alloc()
+retries the allocation with GFP_DMA. But, this is useless for
+architectures that hav no ZONE_DMA.
+
+Fix it by adding the check of CONFIG_ZONE_DMA before retrying the
+allocation.
+
+Fixes: 95f183916d4b ("dma-direct: retry allocations using GFP_DMA for small masks")
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ lib/dma-noop.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/lib/dma-noop.c
++++ b/lib/dma-noop.c
+@@ -25,7 +25,8 @@ again:
+ free_pages((unsigned long)ret, get_order(size));
+ ret = NULL;
+
+- if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
++ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
++ dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+ !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
diff --git a/patches.suse/dma-direct-retry-allocations-using-gfp_dma-for-small-masks b/patches.suse/dma-direct-retry-allocations-using-gfp_dma-for-small-masks
new file mode 100644
index 0000000000..42ff797a41
--- /dev/null
+++ b/patches.suse/dma-direct-retry-allocations-using-gfp_dma-for-small-masks
@@ -0,0 +1,56 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 9 Jan 2018 23:40:57 +0100
+Subject: dma-direct: retry allocations using GFP_DMA for small masks
+Git-commit: 95f183916d4b0bc1943684948ecdd2469f1aa978
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+If an attempt to allocate memory succeeded, but isn't inside the
+supported DMA mask, retry the allocation with GFP_DMA set as a
+last resort.
+
+Based on the x86 code, but an off by one error in what is now
+dma_coherent_ok has been fixed vs the x86 code.
+
+[ ptesarik: This is in fact a rewrite for dma-noop, taking only the
+ idea from the original commit. ]
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ lib/dma-noop.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/lib/dma-noop.c
++++ b/lib/dma-noop.c
+@@ -16,6 +16,11 @@
+ #define ARCH_ZONE_DMA_BITS 24
+ #endif
+
++static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
++{
++ return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
++}
++
+ static void *dma_noop_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ unsigned long attrs)
+@@ -28,7 +33,18 @@ static void *dma_noop_alloc(struct devic
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+
++again:
+ ret = (void *)__get_free_pages(gfp, get_order(size));
++ if (ret && !dma_coherent_ok(dev, virt_to_phys(ret), size)) {
++ free_pages((unsigned long)ret, get_order(size));
++ ret = NULL;
++
++ if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
++ !(gfp & GFP_DMA)) {
++ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
++ goto again;
++ }
++ }
+ if (ret)
+ *dma_handle = virt_to_phys(ret);
+ return ret;
diff --git a/patches.suse/dma-mapping-move-dma_mark_clean-to-dma-direct-h b/patches.suse/dma-mapping-move-dma_mark_clean-to-dma-direct-h
new file mode 100644
index 0000000000..3e3d31fce8
--- /dev/null
+++ b/patches.suse/dma-mapping-move-dma_mark_clean-to-dma-direct-h
@@ -0,0 +1,139 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 9 Jan 2018 22:11:31 +0100
+Subject: dma-mapping: move dma_mark_clean to dma-direct.h
+Git-commit: b49efd76248242169f28ffd20ada05064d01ed9f
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+And unlike the other helpers we don't require a <asm/dma-direct.h> as
+this helper is a special case for ia64 only, and this keeps it as
+simple as possible.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/arm/include/asm/dma-mapping.h | 2 --
+ arch/arm64/include/asm/dma-mapping.h | 4 ----
+ arch/ia64/Kconfig | 1 +
+ arch/ia64/include/asm/dma.h | 2 --
+ arch/mips/include/asm/dma-mapping.h | 2 --
+ arch/powerpc/include/asm/swiotlb.h | 2 --
+ arch/tile/include/asm/dma-mapping.h | 2 --
+ arch/unicore32/include/asm/dma-mapping.h | 2 --
+ arch/x86/include/asm/swiotlb.h | 2 --
+ include/linux/dma-direct.h | 9 +++++++++
+ 10 files changed, 10 insertions(+), 18 deletions(-)
+
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -112,8 +112,6 @@ static inline bool is_device_dma_coheren
+ return dev->archdata.dma_coherent;
+ }
+
+-static inline void dma_mark_clean(void *addr, size_t size) { }
+-
+ /**
+ * arm_dma_alloc - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -53,9 +53,5 @@ static inline bool is_device_dma_coheren
+ return dev->archdata.dma_coherent;
+ }
+
+-static inline void dma_mark_clean(void *addr, size_t size)
+-{
+-}
+-
+ #endif /* __KERNEL__ */
+ #endif /* __ASM_DMA_MAPPING_H */
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -32,6 +32,7 @@ config IA64
+ select HAVE_MEMBLOCK
+ select HAVE_MEMBLOCK_NODE_MAP
+ select HAVE_VIRT_CPU_ACCOUNTING
++ select ARCH_HAS_DMA_MARK_CLEAN
+ select ARCH_HAS_SG_CHAIN
+ select VIRT_TO_BUS
+ select ARCH_DISCARD_MEMBLOCK
+--- a/arch/ia64/include/asm/dma.h
++++ b/arch/ia64/include/asm/dma.h
+@@ -19,6 +19,4 @@ extern unsigned long MAX_DMA_ADDRESS;
+
+ #define free_dma(x)
+
+-void dma_mark_clean(void *addr, size_t size);
+-
+ #endif /* _ASM_IA64_DMA_H */
+--- a/arch/mips/include/asm/dma-mapping.h
++++ b/arch/mips/include/asm/dma-mapping.h
+@@ -16,8 +16,6 @@ static inline const struct dma_map_ops *
+ return mips_dma_map_ops;
+ }
+
+-static inline void dma_mark_clean(void *addr, size_t size) {}
+-
+ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+--- a/arch/powerpc/include/asm/swiotlb.h
++++ b/arch/powerpc/include/asm/swiotlb.h
+@@ -15,8 +15,6 @@
+
+ extern const struct dma_map_ops swiotlb_dma_ops;
+
+-static inline void dma_mark_clean(void *addr, size_t size) {}
+-
+ extern unsigned int ppc_swiotlb_enable;
+ int __init swiotlb_setup_bus_notifier(void);
+
+--- a/arch/tile/include/asm/dma-mapping.h
++++ b/arch/tile/include/asm/dma-mapping.h
+@@ -44,8 +44,6 @@ static inline void set_dma_offset(struct
+ dev->archdata.dma_offset = off;
+ }
+
+-static inline void dma_mark_clean(void *addr, size_t size) {}
+-
+ #define HAVE_ARCH_DMA_SET_MASK 1
+ int dma_set_mask(struct device *dev, u64 mask);
+
+--- a/arch/unicore32/include/asm/dma-mapping.h
++++ b/arch/unicore32/include/asm/dma-mapping.h
+@@ -28,8 +28,6 @@ static inline const struct dma_map_ops *
+ return &swiotlb_dma_map_ops;
+ }
+
+-static inline void dma_mark_clean(void *addr, size_t size) {}
+-
+ static inline void dma_cache_sync(struct device *dev, void *vaddr,
+ size_t size, enum dma_data_direction direction)
+ {
+--- a/arch/x86/include/asm/swiotlb.h
++++ b/arch/x86/include/asm/swiotlb.h
+@@ -27,8 +27,6 @@ static inline void pci_swiotlb_late_init
+ }
+ #endif
+
+-static inline void dma_mark_clean(void *addr, size_t size) {}
+-
+ extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ unsigned long attrs);
+--- a/include/linux/dma-direct.h
++++ b/include/linux/dma-direct.h
+@@ -29,4 +29,13 @@ static inline bool dma_capable(struct de
+ return addr + size - 1 <= *dev->dma_mask;
+ }
+ #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
++
++#ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN
++void dma_mark_clean(void *addr, size_t size);
++#else
++static inline void dma_mark_clean(void *addr, size_t size)
++{
++}
++#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
++
+ #endif /* _LINUX_DMA_DIRECT_H */
diff --git a/patches.suse/dma-mapping-move-swiotlb-arch-helpers-to-a-new-header b/patches.suse/dma-mapping-move-swiotlb-arch-helpers-to-a-new-header
new file mode 100644
index 0000000000..dfec3865c6
--- /dev/null
+++ b/patches.suse/dma-mapping-move-swiotlb-arch-helpers-to-a-new-header
@@ -0,0 +1,697 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 10 Jan 2018 16:21:13 +0100
+Subject: dma-mapping: move swiotlb arch helpers to a new header
+Git-commit: ea8c64ace86647260ec4255f483e5844d62af2df
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+phys_to_dma, dma_to_phys and dma_capable are helpers published by
+architecture code for use of swiotlb and xen-swiotlb only. Drivers are
+not supposed to use these directly, but use the DMA API instead.
+
+Move these to a new asm/dma-direct.h helper, included by a
+linux/dma-direct.h wrapper that provides the default linear mapping
+unless the architecture wants to override it.
+
+In the MIPS case the existing dma-coherent.h is reused for now as
+untangling it will take a bit of work.
+
+[ ptesarik: Also include the new header from lib/dma-noop.c ]
+[ ptesarik: Removed the drivers/crypto/marvell/cesa.c hunk,
+ because SLE15-SP1 already contains upstream commit
+ 37d728f76c41ab819a9fd31d701de55102559484 ]
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/Kconfig | 4 +
+ arch/arm/Kconfig | 1
+ arch/arm/include/asm/dma-direct.h | 36 +++++++++++++++
+ arch/arm/include/asm/dma-mapping.h | 31 ------------
+ arch/arm64/include/asm/dma-mapping.h | 22 ---------
+ arch/arm64/mm/dma-mapping.c | 2
+ arch/ia64/include/asm/dma-mapping.h | 18 -------
+ arch/mips/Kconfig | 2
+ arch/mips/include/asm/dma-direct.h | 1
+ arch/mips/include/asm/dma-mapping.h | 8 ---
+ arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h | 8 +++
+ arch/mips/include/asm/mach-generic/dma-coherence.h | 12 -----
+ arch/mips/include/asm/mach-loongson64/dma-coherence.h | 8 +++
+ arch/powerpc/Kconfig | 1
+ arch/powerpc/include/asm/dma-direct.h | 29 ++++++++++++
+ arch/powerpc/include/asm/dma-mapping.h | 25 ----------
+ arch/tile/include/asm/dma-mapping.h | 18 -------
+ arch/unicore32/include/asm/dma-mapping.h | 18 -------
+ arch/x86/Kconfig | 1
+ arch/x86/include/asm/dma-direct.h | 30 ++++++++++++
+ arch/x86/include/asm/dma-mapping.h | 26 ----------
+ arch/x86/kernel/amd_gart_64.c | 1
+ arch/x86/kernel/pci-dma.c | 2
+ arch/x86/kernel/pci-nommu.c | 2
+ arch/x86/kernel/pci-swiotlb.c | 2
+ arch/x86/mm/mem_encrypt.c | 2
+ arch/x86/pci/sta2x11-fixup.c | 1
+ arch/xtensa/include/asm/dma-mapping.h | 10 ----
+ drivers/mtd/nand/qcom_nandc.c | 1
+ drivers/xen/swiotlb-xen.c | 2
+ include/linux/dma-direct.h | 32 +++++++++++++
+ lib/dma-noop.c | 2
+ lib/swiotlb.c | 2
+ 33 files changed, 164 insertions(+), 196 deletions(-)
+
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -864,6 +864,10 @@ config STRICT_MODULE_RWX
+ and non-text memory will be made non-executable. This provides
+ protection against certain security exploits (e.g. writing to text)
+
++# select if the architecture provides an asm/dma-direct.h header
++config ARCH_HAS_PHYS_TO_DMA
++ bool
++
+ config REFCOUNT_FULL
+ bool "Perform full reference count validation at the expense of speed"
+ help
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -6,6 +6,7 @@ config ARM
+ select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_SET_MEMORY
++ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
+ select ARCH_HAS_STRICT_MODULE_RWX if MMU
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+--- /dev/null
++++ b/arch/arm/include/asm/dma-direct.h
+@@ -0,0 +1,36 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef ASM_ARM_DMA_DIRECT_H
++#define ASM_ARM_DMA_DIRECT_H 1
++
++static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
++{
++ unsigned int offset = paddr & ~PAGE_MASK;
++ return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
++}
++
++static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
++{
++ unsigned int offset = dev_addr & ~PAGE_MASK;
++ return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
++}
++
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++ u64 limit, mask;
++
++ if (!dev->dma_mask)
++ return 0;
++
++ mask = *dev->dma_mask;
++
++ limit = (mask + 1) & ~mask;
++ if (limit && size > limit)
++ return 0;
++
++ if ((addr | (addr + size - 1)) & ~mask)
++ return 0;
++
++ return 1;
++}
++
++#endif /* ASM_ARM_DMA_DIRECT_H */
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -112,37 +112,6 @@ static inline bool is_device_dma_coheren
+ return dev->archdata.dma_coherent;
+ }
+
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- unsigned int offset = paddr & ~PAGE_MASK;
+- return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+-{
+- unsigned int offset = dev_addr & ~PAGE_MASK;
+- return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
+-}
+-
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- u64 limit, mask;
+-
+- if (!dev->dma_mask)
+- return 0;
+-
+- mask = *dev->dma_mask;
+-
+- limit = (mask + 1) & ~mask;
+- if (limit && size > limit)
+- return 0;
+-
+- if ((addr | (addr + size - 1)) & ~mask)
+- return 0;
+-
+- return 1;
+-}
+-
+ static inline void dma_mark_clean(void *addr, size_t size) { }
+
+ /**
+--- a/arch/arm64/include/asm/dma-mapping.h
++++ b/arch/arm64/include/asm/dma-mapping.h
+@@ -53,28 +53,6 @@ static inline bool is_device_dma_coheren
+ return dev->archdata.dma_coherent;
+ }
+
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- dma_addr_t dev_addr = (dma_addr_t)paddr;
+-
+- return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
+-{
+- phys_addr_t paddr = (phys_addr_t)dev_addr;
+-
+- return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
+-}
+-
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return false;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+ static inline void dma_mark_clean(void *addr, size_t size)
+ {
+ }
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -24,7 +24,7 @@
+ #include <linux/export.h>
+ #include <linux/slab.h>
+ #include <linux/genalloc.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/dma-contiguous.h>
+ #include <linux/vmalloc.h>
+ #include <linux/swiotlb.h>
+--- a/arch/ia64/include/asm/dma-mapping.h
++++ b/arch/ia64/include/asm/dma-mapping.h
+@@ -28,24 +28,6 @@ static inline const struct dma_map_ops *
+ return platform_dma_get_ops(NULL);
+ }
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return 0;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return paddr;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return daddr;
+-}
+-
+ static inline void
+ dma_cache_sync (struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -423,6 +423,7 @@ config MACH_LOONGSON32
+
+ config MACH_LOONGSON64
+ bool "Loongson-2/3 family of machines"
++ select ARCH_HAS_PHYS_TO_DMA
+ select SYS_SUPPORTS_ZBOOT
+ help
+ This enables the support of Loongson-2/3 family of machines.
+@@ -894,6 +895,7 @@ config MIKROTIK_RB532
+ config CAVIUM_OCTEON_SOC
+ bool "Cavium Networks Octeon SoC based boards"
+ select CEVT_R4K
++ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_PHYS_ADDR_T_64BIT
+ select DMA_COHERENT
+ select SYS_SUPPORTS_64BIT_KERNEL
+--- /dev/null
++++ b/arch/mips/include/asm/dma-direct.h
+@@ -0,0 +1 @@
++#include <asm/dma-coherence.h>
+--- a/arch/mips/include/asm/dma-mapping.h
++++ b/arch/mips/include/asm/dma-mapping.h
+@@ -16,14 +16,6 @@ static inline const struct dma_map_ops *
+ return mips_dma_map_ops;
+ }
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return false;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+ extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+--- a/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
++++ b/arch/mips/include/asm/mach-cavium-octeon/dma-coherence.h
+@@ -61,6 +61,14 @@ static inline void plat_post_dma_flush(s
+ {
+ }
+
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++ if (!dev->dma_mask)
++ return false;
++
++ return addr + size - 1 <= *dev->dma_mask;
++}
++
+ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+
+--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
++++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
+@@ -70,16 +70,4 @@ static inline void plat_post_dma_flush(s
+ }
+ #endif
+
+-#ifdef CONFIG_SWIOTLB
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return paddr;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return daddr;
+-}
+-#endif
+-
+ #endif /* __ASM_MACH_GENERIC_DMA_COHERENCE_H */
+--- a/arch/mips/include/asm/mach-loongson64/dma-coherence.h
++++ b/arch/mips/include/asm/mach-loongson64/dma-coherence.h
+@@ -17,6 +17,14 @@
+
+ struct device;
+
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++ if (!dev->dma_mask)
++ return false;
++
++ return addr + size - 1 <= *dev->dma_mask;
++}
++
+ extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+ extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -134,6 +134,7 @@ config PPC
+ select ARCH_HAS_DMA_SET_COHERENT_MASK
+ select ARCH_HAS_ELF_RANDOMIZE
+ select ARCH_HAS_GCOV_PROFILE_ALL
++ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_HAS_PMEM_API if PPC64
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
+ select ARCH_HAS_SG_CHAIN
+--- /dev/null
++++ b/arch/powerpc/include/asm/dma-direct.h
+@@ -0,0 +1,29 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef ASM_POWERPC_DMA_DIRECT_H
++#define ASM_POWERPC_DMA_DIRECT_H 1
++
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++#ifdef CONFIG_SWIOTLB
++ struct dev_archdata *sd = &dev->archdata;
++
++ if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
++ return false;
++#endif
++
++ if (!dev->dma_mask)
++ return false;
++
++ return addr + size - 1 <= *dev->dma_mask;
++}
++
++static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
++{
++ return paddr + get_dma_offset(dev);
++}
++
++static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
++{
++ return daddr - get_dma_offset(dev);
++}
++#endif /* ASM_POWERPC_DMA_DIRECT_H */
+--- a/arch/powerpc/include/asm/dma-mapping.h
++++ b/arch/powerpc/include/asm/dma-mapping.h
+@@ -119,31 +119,6 @@ extern int dma_set_mask(struct device *d
+ extern int __dma_set_mask(struct device *dev, u64 dma_mask);
+ extern u64 __dma_get_required_mask(struct device *dev);
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+-#ifdef CONFIG_SWIOTLB
+- struct dev_archdata *sd = &dev->archdata;
+-
+- if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
+- return false;
+-#endif
+-
+- if (!dev->dma_mask)
+- return false;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return paddr + get_dma_offset(dev);
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return daddr - get_dma_offset(dev);
+-}
+-
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+--- a/arch/tile/include/asm/dma-mapping.h
++++ b/arch/tile/include/asm/dma-mapping.h
+@@ -44,26 +44,8 @@ static inline void set_dma_offset(struct
+ dev->archdata.dma_offset = off;
+ }
+
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return paddr;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return daddr;
+-}
+-
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return 0;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+ #define HAVE_ARCH_DMA_SET_MASK 1
+ int dma_set_mask(struct device *dev, u64 mask);
+
+--- a/arch/unicore32/include/asm/dma-mapping.h
++++ b/arch/unicore32/include/asm/dma-mapping.h
+@@ -28,24 +28,6 @@ static inline const struct dma_map_ops *
+ return &swiotlb_dma_map_ops;
+ }
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (dev && dev->dma_mask)
+- return addr + size - 1 <= *dev->dma_mask;
+-
+- return 1;
+-}
+-
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return paddr;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return daddr;
+-}
+-
+ static inline void dma_mark_clean(void *addr, size_t size) {}
+
+ static inline void dma_cache_sync(struct device *dev, void *vaddr,
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -52,6 +52,7 @@ config X86
+ select ARCH_HAS_FAST_MULTIPLIER
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_KCOV if X86_64
++ select ARCH_HAS_PHYS_TO_DMA
+ select ARCH_HAS_PMEM_API if X86_64
+ select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
+ select ARCH_HAS_SET_MEMORY
+--- /dev/null
++++ b/arch/x86/include/asm/dma-direct.h
+@@ -0,0 +1,30 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef ASM_X86_DMA_DIRECT_H
++#define ASM_X86_DMA_DIRECT_H 1
++
++#include <linux/mem_encrypt.h>
++
++#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
++bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
++dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
++phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
++#else
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++ if (!dev->dma_mask)
++ return 0;
++
++ return addr + size - 1 <= *dev->dma_mask;
++}
++
++static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
++{
++ return __sme_set(paddr);
++}
++
++static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
++{
++ return __sme_clr(daddr);
++}
++#endif /* CONFIG_X86_DMA_REMAP */
++#endif /* ASM_X86_DMA_DIRECT_H */
+--- a/arch/x86/include/asm/dma-mapping.h
++++ b/arch/x86/include/asm/dma-mapping.h
+@@ -12,7 +12,6 @@
+ #include <asm/io.h>
+ #include <asm/swiotlb.h>
+ #include <linux/dma-contiguous.h>
+-#include <linux/mem_encrypt.h>
+
+ #ifdef CONFIG_ISA
+ # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
+@@ -47,31 +46,6 @@ extern void dma_generic_free_coherent(st
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs);
+
+-#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
+-extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
+-extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
+-extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
+-#else
+-
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return 0;
+-
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return __sme_set(paddr);
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return __sme_clr(daddr);
+-}
+-#endif /* CONFIG_X86_DMA_REMAP */
+-
+ static inline void
+ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction dir)
+--- a/arch/x86/kernel/amd_gart_64.c
++++ b/arch/x86/kernel/amd_gart_64.c
+@@ -31,6 +31,7 @@
+ #include <linux/io.h>
+ #include <linux/gfp.h>
+ #include <linux/atomic.h>
++#include <linux/dma-direct.h>
+ #include <asm/mtrr.h>
+ #include <asm/pgtable.h>
+ #include <asm/proto.h>
+--- a/arch/x86/kernel/pci-dma.c
++++ b/arch/x86/kernel/pci-dma.c
+@@ -1,4 +1,4 @@
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/dma-debug.h>
+ #include <linux/dmar.h>
+ #include <linux/export.h>
+--- a/arch/x86/kernel/pci-nommu.c
++++ b/arch/x86/kernel/pci-nommu.c
+@@ -1,6 +1,6 @@
+ /* Fallback functions when the main IOMMU code is not compiled in. This
+ code is roughly equivalent to i386. */
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/scatterlist.h>
+ #include <linux/string.h>
+ #include <linux/gfp.h>
+--- a/arch/x86/kernel/pci-swiotlb.c
++++ b/arch/x86/kernel/pci-swiotlb.c
+@@ -5,7 +5,7 @@
+ #include <linux/init.h>
+ #include <linux/swiotlb.h>
+ #include <linux/bootmem.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/mem_encrypt.h>
+
+ #include <asm/iommu.h>
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -13,7 +13,7 @@
+ #include <linux/linkage.h>
+ #include <linux/init.h>
+ #include <linux/mm.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/swiotlb.h>
+ #include <linux/mem_encrypt.h>
+
+--- a/arch/x86/pci/sta2x11-fixup.c
++++ b/arch/x86/pci/sta2x11-fixup.c
+@@ -26,6 +26,7 @@
+ #include <linux/pci_ids.h>
+ #include <linux/export.h>
+ #include <linux/list.h>
++#include <linux/dma-direct.h>
+
+ #define STA2X11_SWIOTLB_SIZE (4*1024*1024)
+ extern int swiotlb_late_init_with_default_size(size_t default_size);
+--- a/arch/xtensa/include/asm/dma-mapping.h
++++ b/arch/xtensa/include/asm/dma-mapping.h
+@@ -28,14 +28,4 @@ static inline const struct dma_map_ops *
+ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+-static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
+-{
+- return (dma_addr_t)paddr;
+-}
+-
+-static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+-{
+- return (phys_addr_t)daddr;
+-}
+-
+ #endif /* _XTENSA_DMA_MAPPING_H */
+--- a/drivers/mtd/nand/qcom_nandc.c
++++ b/drivers/mtd/nand/qcom_nandc.c
+@@ -22,6 +22,7 @@
+ #include <linux/of.h>
+ #include <linux/of_device.h>
+ #include <linux/delay.h>
++#include <linux/dma-direct.h> /* XXX: drivers shall never use this directly! */
+
+ /* NANDc reg offsets */
+ #define NAND_FLASH_CMD 0x00
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -36,7 +36,7 @@
+ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+ #include <linux/bootmem.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/export.h>
+ #include <xen/swiotlb-xen.h>
+ #include <xen/page.h>
+--- /dev/null
++++ b/include/linux/dma-direct.h
+@@ -0,0 +1,32 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_DMA_DIRECT_H
++#define _LINUX_DMA_DIRECT_H 1
++
++#include <linux/dma-mapping.h>
++
++#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
++#include <asm/dma-direct.h>
++#else
++static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
++{
++ dma_addr_t dev_addr = (dma_addr_t)paddr;
++
++ return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
++}
++
++static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
++{
++ phys_addr_t paddr = (phys_addr_t)dev_addr;
++
++ return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
++}
++
++static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
++{
++ if (!dev->dma_mask)
++ return false;
++
++ return addr + size - 1 <= *dev->dma_mask;
++}
++#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
++#endif /* _LINUX_DMA_DIRECT_H */
+--- a/lib/dma-noop.c
++++ b/lib/dma-noop.c
+@@ -5,7 +5,7 @@
+ */
+ #include <linux/export.h>
+ #include <linux/mm.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/scatterlist.h>
+
+ static void *dma_noop_alloc(struct device *dev, size_t size,
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -18,7 +18,7 @@
+ */
+
+ #include <linux/cache.h>
+-#include <linux/dma-mapping.h>
++#include <linux/dma-direct.h>
+ #include <linux/mm.h>
+ #include <linux/export.h>
+ #include <linux/spinlock.h>
diff --git a/patches.suse/dma-mapping-take-dma_pfn_offset-into-account-in-dma_max_pfn b/patches.suse/dma-mapping-take-dma_pfn_offset-into-account-in-dma_max_pfn
new file mode 100644
index 0000000000..9cb753b9bd
--- /dev/null
+++ b/patches.suse/dma-mapping-take-dma_pfn_offset-into-account-in-dma_max_pfn
@@ -0,0 +1,28 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 30 Nov 2017 07:32:51 -0800
+Subject: dma-mapping: take dma_pfn_offset into account in dma_max_pfn
+Git-commit: a41ef1e455a9796be8cb986f0616f52453ac8e4b
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+This makes sure the generic version can be used with architectures /
+devices that have a DMA offset in the direct mapping.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ include/linux/dma-mapping.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -686,7 +686,7 @@ static inline int dma_set_seg_boundary(s
+ #ifndef dma_max_pfn
+ static inline unsigned long dma_max_pfn(struct device *dev)
+ {
+- return *dev->dma_mask >> PAGE_SHIFT;
++ return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
+ }
+ #endif
+
diff --git a/patches.suse/iommu-helper-mark-iommu_is_span_boundary-as-inline b/patches.suse/iommu-helper-mark-iommu_is_span_boundary-as-inline
new file mode 100644
index 0000000000..3b09bbe803
--- /dev/null
+++ b/patches.suse/iommu-helper-mark-iommu_is_span_boundary-as-inline
@@ -0,0 +1,221 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 3 Apr 2018 15:41:07 +0200
+Subject: iommu-helper: mark iommu_is_span_boundary as inline
+Git-commit: 79c1879ee5473e3404469b07f9bccfe6d0814b93
+Patch-mainline: v4.18-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+This avoids selecting IOMMU_HELPER just for this function. And we only
+use it once or twice in normal builds so this often even is a size
+reduction.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/alpha/Kconfig | 3 ---
+ arch/arm/Kconfig | 3 ---
+ arch/arm64/Kconfig | 3 ---
+ arch/ia64/Kconfig | 3 ---
+ arch/mips/cavium-octeon/Kconfig | 4 ----
+ arch/mips/loongson64/Kconfig | 4 ----
+ arch/mips/netlogic/Kconfig | 3 ---
+ arch/powerpc/Kconfig | 1 -
+ arch/unicore32/mm/Kconfig | 3 ---
+ arch/x86/Kconfig | 2 +-
+ drivers/parisc/Kconfig | 5 -----
+ include/linux/iommu-helper.h | 13 ++++++++++---
+ lib/iommu-helper.c | 12 +-----------
+ 13 files changed, 12 insertions(+), 47 deletions(-)
+
+--- a/arch/alpha/Kconfig
++++ b/arch/alpha/Kconfig
+@@ -342,9 +342,6 @@ config PCI_DOMAINS
+ config PCI_SYSCALL
+ def_bool PCI
+
+-config IOMMU_HELPER
+- def_bool PCI
+-
+ config ALPHA_NONAME
+ bool
+ depends on ALPHA_BOOK1 || ALPHA_NONAME_CH
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1782,9 +1782,6 @@ config SECCOMP
+ config SWIOTLB
+ def_bool y
+
+-config IOMMU_HELPER
+- def_bool SWIOTLB
+-
+ config PARAVIRT
+ bool "Enable paravirtualization code"
+ help
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -229,9 +229,6 @@ config SMP
+ config SWIOTLB
+ def_bool y
+
+-config IOMMU_HELPER
+- def_bool SWIOTLB
+-
+ config KERNEL_MODE_NEON
+ def_bool y
+
+--- a/arch/ia64/Kconfig
++++ b/arch/ia64/Kconfig
+@@ -607,6 +607,3 @@ source "security/Kconfig"
+ source "crypto/Kconfig"
+
+ source "lib/Kconfig"
+-
+-config IOMMU_HELPER
+- def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
+--- a/arch/mips/cavium-octeon/Kconfig
++++ b/arch/mips/cavium-octeon/Kconfig
+@@ -66,15 +66,11 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY
+ help
+ Lock the kernel's implementation of memcpy() into L2.
+
+-config IOMMU_HELPER
+- bool
+-
+ config NEED_SG_DMA_LENGTH
+ bool
+
+ config SWIOTLB
+ def_bool y
+- select IOMMU_HELPER
+ select NEED_SG_DMA_LENGTH
+
+ config OCTEON_ILM
+--- a/arch/mips/loongson64/Kconfig
++++ b/arch/mips/loongson64/Kconfig
+@@ -125,9 +125,6 @@ config LOONGSON_UART_BASE
+ default y
+ depends on EARLY_PRINTK || SERIAL_8250
+
+-config IOMMU_HELPER
+- bool
+-
+ config NEED_SG_DMA_LENGTH
+ bool
+
+@@ -135,7 +132,6 @@ config SWIOTLB
+ bool "Soft IOMMU Support for All-Memory DMA"
+ default y
+ depends on CPU_LOONGSON3
+- select IOMMU_HELPER
+ select NEED_SG_DMA_LENGTH
+ select NEED_DMA_MAP_STATE
+
+--- a/arch/mips/netlogic/Kconfig
++++ b/arch/mips/netlogic/Kconfig
+@@ -82,9 +82,6 @@ endif
+ config NLM_COMMON
+ bool
+
+-config IOMMU_HELPER
+- bool
+-
+ config NEED_SG_DMA_LENGTH
+ bool
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -475,7 +475,6 @@ config IOMMU_HELPER
+ config SWIOTLB
+ bool "SWIOTLB support"
+ default n
+- select IOMMU_HELPER
+ ---help---
+ Support for IO bounce buffering for systems without an IOMMU.
+ This allows us to DMA to the full physical address space on
+--- a/arch/unicore32/mm/Kconfig
++++ b/arch/unicore32/mm/Kconfig
+@@ -42,9 +42,6 @@ config CPU_TLB_SINGLE_ENTRY_DISABLE
+ config SWIOTLB
+ def_bool y
+
+-config IOMMU_HELPER
+- def_bool SWIOTLB
+-
+ config NEED_SG_DMA_LENGTH
+ def_bool SWIOTLB
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -919,7 +919,7 @@ config SWIOTLB
+
+ config IOMMU_HELPER
+ def_bool y
+- depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU
++ depends on CALGARY_IOMMU || GART_IOMMU
+
+ config MAXSMP
+ bool "Enable Maximum number of SMP Processors and NUMA Nodes"
+--- a/drivers/parisc/Kconfig
++++ b/drivers/parisc/Kconfig
+@@ -103,11 +103,6 @@ config IOMMU_SBA
+ depends on PCI_LBA
+ default PCI_LBA
+
+-config IOMMU_HELPER
+- bool
+- depends on IOMMU_SBA || IOMMU_CCIO
+- default y
+-
+ source "drivers/pcmcia/Kconfig"
+
+ endmenu
+--- a/include/linux/iommu-helper.h
++++ b/include/linux/iommu-helper.h
+@@ -1,6 +1,7 @@
+ #ifndef _LINUX_IOMMU_HELPER_H
+ #define _LINUX_IOMMU_HELPER_H
+
++#include <linux/bug.h>
+ #include <linux/kernel.h>
+
+ static inline unsigned long iommu_device_max_index(unsigned long size,
+@@ -13,9 +14,15 @@ static inline unsigned long iommu_device
+ return size;
+ }
+
+-extern int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+- unsigned long shift,
+- unsigned long boundary_size);
++static inline int iommu_is_span_boundary(unsigned int index, unsigned int nr,
++ unsigned long shift, unsigned long boundary_size)
++{
++ BUG_ON(!is_power_of_2(boundary_size));
++
++ shift = (shift + index) & (boundary_size - 1);
++ return shift + nr > boundary_size;
++}
++
+ extern unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
+ unsigned long shift,
+--- a/lib/iommu-helper.c
++++ b/lib/iommu-helper.c
+@@ -4,17 +4,7 @@
+
+ #include <linux/export.h>
+ #include <linux/bitmap.h>
+-#include <linux/bug.h>
+-
+-int iommu_is_span_boundary(unsigned int index, unsigned int nr,
+- unsigned long shift,
+- unsigned long boundary_size)
+-{
+- BUG_ON(!is_power_of_2(boundary_size));
+-
+- shift = (shift + index) & (boundary_size - 1);
+- return shift + nr > boundary_size;
+-}
++#include <linux/iommu-helper.h>
+
+ unsigned long iommu_area_alloc(unsigned long *map, unsigned long size,
+ unsigned long start, unsigned int nr,
diff --git a/patches.suse/mips-fix-an-off-by-one-in-dma_capable b/patches.suse/mips-fix-an-off-by-one-in-dma_capable
new file mode 100644
index 0000000000..802646ce4f
--- /dev/null
+++ b/patches.suse/mips-fix-an-off-by-one-in-dma_capable
@@ -0,0 +1,27 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 10 Jan 2018 16:19:47 +0100
+Subject: mips: fix an off-by-one in dma_capable
+Git-commit: 10dac04c79b181b255a62f60919f29acc56277ac
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+This makes it match the generic version.
+
+Reported-by: Vladimir Murzin <vladimir.murzin@arm.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/mips/include/asm/dma-mapping.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/include/asm/dma-mapping.h
++++ b/arch/mips/include/asm/dma-mapping.h
+@@ -21,7 +21,7 @@ static inline bool dma_capable(struct de
+ if (!dev->dma_mask)
+ return false;
+
+- return addr + size <= *dev->dma_mask;
++ return addr + size - 1 <= *dev->dma_mask;
+ }
+
+ static inline void dma_mark_clean(void *addr, size_t size) {}
diff --git a/patches.suse/net-af_iucv-build-proper-skbs-for-hipertransport b/patches.suse/net-af_iucv-build-proper-skbs-for-hipertransport
new file mode 100644
index 0000000000..21ebaa90c5
--- /dev/null
+++ b/patches.suse/net-af_iucv-build-proper-skbs-for-hipertransport
@@ -0,0 +1,89 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Tue, 18 Jun 2019 20:43:00 +0200
+Subject: net/af_iucv: build proper skbs for HiperTransport
+Git-commit: 238965b71b968dc5b3c0fe430e946f488322c4b5
+Patch-mainline: v5.2-rc1
+References: bsc#1142221 LTC#179332
+
+The HiperSockets-based transport path in af_iucv is still too closely
+entangled with qeth.
+With commit a647a02512ca ("s390/qeth: speed-up L3 IQD xmit"), the
+relevant xmit code in qeth has begun to use skb_cow_head(). So to avoid
+unnecessary skb head expansions, af_iucv must learn to
+1) respect dev->needed_headroom when allocating skbs, and
+2) drop the header reference before cloning the skb.
+
+While at it, also stop hard-coding the LL-header creation stage and just
+use the appropriate helper.
+
+Fixes: a647a02512ca ("s390/qeth: speed-up L3 IQD xmit")
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ net/iucv/af_iucv.c | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -13,6 +13,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+ #include <linux/module.h>
++#include <linux/netdevice.h>
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
+@@ -347,14 +348,14 @@ static int afiucv_hs_send(struct iucv_me
+ if (imsg)
+ memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
+
+- skb_push(skb, ETH_HLEN);
+- memset(skb->data, 0, ETH_HLEN);
+-
+ skb->dev = iucv->hs_dev;
+ if (!skb->dev) {
+ err = -ENODEV;
+ goto err_free;
+ }
++
++ dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
++
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+ err = -ENETDOWN;
+ goto err_free;
+@@ -367,6 +368,8 @@ static int afiucv_hs_send(struct iucv_me
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
++
++ __skb_header_release(skb);
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb) {
+ err = -ENOMEM;
+@@ -466,12 +469,14 @@ static void iucv_sever_path(struct sock
+ /* Send controlling flags through an IUCV socket for HIPER transport */
+ static int iucv_send_ctrl(struct sock *sk, u8 flags)
+ {
++ struct iucv_sock *iucv = iucv_sk(sk);
+ int err = 0;
+ int blen;
+ struct sk_buff *skb;
+ u8 shutdown = 0;
+
+- blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
++ blen = sizeof(struct af_iucv_trans_hdr) +
++ LL_RESERVED_SPACE(iucv->hs_dev);
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ /* controlling flags should be sent anyway */
+ shutdown = sk->sk_shutdown;
+@@ -1134,7 +1139,8 @@ static int iucv_sock_sendmsg(struct sock
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+- headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
++ headroom = sizeof(struct af_iucv_trans_hdr) +
++ LL_RESERVED_SPACE(iucv->hs_dev);
+ linear = len;
+ } else {
+ if (len < PAGE_SIZE) {
diff --git a/patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport b/patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport
new file mode 100644
index 0000000000..d18d0f56c6
--- /dev/null
+++ b/patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport
@@ -0,0 +1,59 @@
+From: Julian Wiedmann <jwi@linux.ibm.com>
+Date: Tue, 18 Jun 2019 20:42:59 +0200
+Subject: net/af_iucv: remove GFP_DMA restriction for HiperTransport
+Git-commit: fdbf6326912d578a31ac4ca0933c919eadf1d54c
+Patch-mainline: v5.2-rc1
+References: bsc#1142221 LTC#179332
+
+af_iucv sockets over z/VM IUCV require that their skbs are allocated
+in DMA memory. This restriction doesn't apply to connections over
+HiperSockets. So only set this limit for z/VM IUCV sockets, thereby
+increasing the likelihood that the large (and linear!) allocations for
+HiperTransport messages succeed.
+
+Fixes: 3881ac441f64 ("af_iucv: add HiperSockets transport")
+Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com>
+Reviewed-by: Ursula Braun <ubraun@linux.ibm.com>
+Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ net/iucv/af_iucv.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -588,7 +588,6 @@ static struct sock *iucv_sock_alloc(stru
+
+ sk->sk_destruct = iucv_sock_destruct;
+ sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
+- sk->sk_allocation = GFP_DMA;
+
+ sock_reset_flag(sk, SOCK_ZAPPED);
+
+@@ -782,6 +781,7 @@ vm_bind:
+ memcpy(iucv->src_user_id, iucv_userid, 8);
+ sk->sk_state = IUCV_BOUND;
+ iucv->transport = AF_IUCV_TRANS_IUCV;
++ sk->sk_allocation |= GFP_DMA;
+ if (!iucv->msglimit)
+ iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
+ goto done_unlock;
+@@ -806,6 +806,8 @@ static int iucv_sock_autobind(struct soc
+ return -EPROTO;
+
+ memcpy(iucv->src_user_id, iucv_userid, 8);
++ iucv->transport = AF_IUCV_TRANS_IUCV;
++ sk->sk_allocation |= GFP_DMA;
+
+ write_lock_bh(&iucv_sk_list.lock);
+ __iucv_auto_name(iucv);
+@@ -1782,6 +1784,8 @@ static int iucv_callback_connreq(struct
+
+ niucv = iucv_sk(nsk);
+ iucv_sock_init(nsk, sk);
++ niucv->transport = AF_IUCV_TRANS_IUCV;
++ nsk->sk_allocation |= GFP_DMA;
+
+ /* Set the new iucv_sock */
+ memcpy(niucv->dst_name, ipuser + 8, 8);
diff --git a/patches.suse/pkey-indicate-old-mkvp-only-if-old-and-current-mkvp-are-different b/patches.suse/pkey-indicate-old-mkvp-only-if-old-and-current-mkvp-are-different
new file mode 100644
index 0000000000..0173b47b67
--- /dev/null
+++ b/patches.suse/pkey-indicate-old-mkvp-only-if-old-and-current-mkvp-are-different
@@ -0,0 +1,32 @@
+From: Ingo Franzki <ifranzki@linux.ibm.com>
+Date: Wed, 20 Feb 2019 14:01:39 +0100
+Subject: pkey: Indicate old mkvp only if old and current mkvp are different
+Git-commit: ebb7c695d3bc7a4986b92edc8d9ef43491be183e
+Patch-mainline: v5.1-rc1
+References: bsc#1137827 LTC#178090
+
+When the CCA master key is set twice with the same master key,
+then the old and the current master key are the same and thus the
+verification patterns are the same, too. The check to report if a
+secure key is currently wrapped by the old master key erroneously
+reports old mkvp in this case.
+
+Reviewed-by: Harald Freudenberger <freude@linux.ibm.com>
+Signed-off-by: Ingo Franzki <ifranzki@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/crypto/pkey_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/crypto/pkey_api.c
++++ b/drivers/s390/crypto/pkey_api.c
+@@ -1046,7 +1046,7 @@ int pkey_verifykey(const struct pkey_sec
+ rc = mkvp_cache_fetch(cardnr, domain, mkvp);
+ if (rc)
+ goto out;
+- if (t->mkvp == mkvp[1]) {
++ if (t->mkvp == mkvp[1] && t->mkvp != mkvp[0]) {
+ DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+ if (pattributes)
+ *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
diff --git a/patches.suse/s390-add-alignment-hints-to-vector-load-and-store b/patches.suse/s390-add-alignment-hints-to-vector-load-and-store
new file mode 100644
index 0000000000..6d8034c0c8
--- /dev/null
+++ b/patches.suse/s390-add-alignment-hints-to-vector-load-and-store
@@ -0,0 +1,48 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 6 Feb 2019 18:06:03 +0100
+Subject: s390: add alignment hints to vector load and store
+Git-commit: 142c52d7bce45d335f48d53fdbf428bb15cf3924
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6907 FATE#327564 LTC#175887
+
+The z14 introduced alignment hints to increase the performance of
+vector loads and stores. The kernel uses an implicit alignmenet
+of 8 bytes for the vector registers, set the alignment hint to 3.
+
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/vx-insn.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/arch/s390/include/asm/vx-insn.h
++++ b/arch/s390/include/asm/vx-insn.h
+@@ -362,23 +362,23 @@
+ .endm
+
+ /* VECTOR LOAD MULTIPLE */
+-.macro VLM vfrom, vto, disp, base
++.macro VLM vfrom, vto, disp, base, hint=3
+ VX_NUM v1, \vfrom
+ VX_NUM v3, \vto
+ GR_NUM b2, \base /* Base register */
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+- MRXBOPC 0, 0x36, v1, v3
++ MRXBOPC \hint, 0x36, v1, v3
+ .endm
+
+ /* VECTOR STORE MULTIPLE */
+-.macro VSTM vfrom, vto, disp, base
++.macro VSTM vfrom, vto, disp, base, hint=3
+ VX_NUM v1, \vfrom
+ VX_NUM v3, \vto
+ GR_NUM b2, \base /* Base register */
+ .word 0xE700 | ((v1&15) << 4) | (v3&15)
+ .word (b2 << 12) | (\disp)
+- MRXBOPC 0, 0x3E, v1, v3
++ MRXBOPC \hint, 0x3E, v1, v3
+ .endm
+
+ /* VECTOR PERMUTE */
diff --git a/patches.suse/s390-airq-use-dma-memory-for-adapter-interrupts b/patches.suse/s390-airq-use-dma-memory-for-adapter-interrupts
new file mode 100644
index 0000000000..005444eb9b
--- /dev/null
+++ b/patches.suse/s390-airq-use-dma-memory-for-adapter-interrupts
@@ -0,0 +1,168 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Thu, 13 Sep 2018 18:57:16 +0200
+Subject: s390/airq: use DMA memory for adapter interrupts
+Git-commit: b50623e5db802e41736f3305cb54c03bc7f0e30a
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Protected virtualization guests have to use shared pages for airq
+notifier bit vectors, because the hypervisor needs to write these bits.
+
+Let us make sure we allocate DMA memory for the notifier bit vectors by
+replacing the kmem_cache with a dma_cache and kalloc() with
+cio_dma_zalloc().
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/airq.h | 2 ++
+ drivers/s390/cio/airq.c | 37 +++++++++++++++++++++++--------------
+ drivers/s390/cio/cio.h | 2 ++
+ drivers/s390/cio/css.c | 1 +
+ 4 files changed, 28 insertions(+), 14 deletions(-)
+
+--- a/arch/s390/include/asm/airq.h
++++ b/arch/s390/include/asm/airq.h
+@@ -10,6 +10,7 @@
+ #define _ASM_S390_AIRQ_H
+
+ #include <linux/bit_spinlock.h>
++#include <linux/dma-mapping.h>
+
+ struct airq_struct {
+ struct hlist_node list; /* Handler queueing. */
+@@ -28,6 +29,7 @@ void unregister_adapter_interrupt(struct
+ /* Adapter interrupt bit vector */
+ struct airq_iv {
+ unsigned long *vector; /* Adapter interrupt bit vector */
++ dma_addr_t vector_dma; /* Adapter interrupt bit vector dma */
+ unsigned long *avail; /* Allocation bit mask for the bit vector */
+ unsigned long *bitlock; /* Lock bit mask for the bit vector */
+ unsigned long *ptr; /* Pointer associated with each bit */
+--- a/drivers/s390/cio/airq.c
++++ b/drivers/s390/cio/airq.c
+@@ -15,9 +15,11 @@
+ #include <linux/mutex.h>
+ #include <linux/rculist.h>
+ #include <linux/slab.h>
++#include <linux/dmapool.h>
+
+ #include <asm/airq.h>
+ #include <asm/isc.h>
++#include <asm/cio.h>
+
+ #include "cio.h"
+ #include "cio_debug.h"
+@@ -26,7 +28,7 @@
+ static DEFINE_SPINLOCK(airq_lists_lock);
+ static struct hlist_head airq_lists[MAX_ISC+1];
+
+-static struct kmem_cache *airq_iv_cache;
++static struct dma_pool *airq_iv_cache;
+
+ /**
+ * register_adapter_interrupt() - register adapter interrupt handler
+@@ -114,6 +116,11 @@ void __init init_airq_interrupts(void)
+ setup_irq(THIN_INTERRUPT, &airq_interrupt);
+ }
+
++static inline unsigned long iv_size(unsigned long bits)
++{
++ return BITS_TO_LONGS(bits) * sizeof(unsigned long);
++}
++
+ /**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+@@ -131,17 +138,19 @@ struct airq_iv *airq_iv_create(unsigned
+ goto out;
+ iv->bits = bits;
+ iv->flags = flags;
+- size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
++ size = iv_size(bits);
+
+ if (flags & AIRQ_IV_CACHELINE) {
+- if ((cache_line_size() * BITS_PER_BYTE) < bits)
++ if ((cache_line_size() * BITS_PER_BYTE) < bits
++ || !airq_iv_cache)
+ goto out_free;
+
+- iv->vector = kmem_cache_zalloc(airq_iv_cache, GFP_KERNEL);
++ iv->vector = dma_pool_zalloc(airq_iv_cache, GFP_KERNEL,
++ &iv->vector_dma);
+ if (!iv->vector)
+ goto out_free;
+ } else {
+- iv->vector = kzalloc(size, GFP_KERNEL);
++ iv->vector = cio_dma_zalloc(size);
+ if (!iv->vector)
+ goto out_free;
+ }
+@@ -177,10 +186,10 @@ out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+- if (iv->flags & AIRQ_IV_CACHELINE)
+- kmem_cache_free(airq_iv_cache, iv->vector);
++ if (iv->flags & AIRQ_IV_CACHELINE && iv->vector)
++ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+- kfree(iv->vector);
++ cio_dma_free(iv->vector, size);
+ kfree(iv);
+ out:
+ return NULL;
+@@ -197,9 +206,9 @@ void airq_iv_release(struct airq_iv *iv)
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ if (iv->flags & AIRQ_IV_CACHELINE)
+- kmem_cache_free(airq_iv_cache, iv->vector);
++ dma_pool_free(airq_iv_cache, iv->vector, iv->vector_dma);
+ else
+- kfree(iv->vector);
++ cio_dma_free(iv->vector, iv_size(iv->bits));
+ kfree(iv->avail);
+ kfree(iv);
+ }
+@@ -294,12 +303,12 @@ unsigned long airq_iv_scan(struct airq_i
+ }
+ EXPORT_SYMBOL(airq_iv_scan);
+
+-static int __init airq_init(void)
++int __init airq_init(void)
+ {
+- airq_iv_cache = kmem_cache_create("airq_iv_cache", cache_line_size(),
+- cache_line_size(), 0, NULL);
++ airq_iv_cache = dma_pool_create("airq_iv_cache", cio_get_dma_css_dev(),
++ cache_line_size(),
++ cache_line_size(), PAGE_SIZE);
+ if (!airq_iv_cache)
+ return -ENOMEM;
+ return 0;
+ }
+-subsys_initcall(airq_init);
+--- a/drivers/s390/cio/cio.h
++++ b/drivers/s390/cio/cio.h
+@@ -135,6 +135,8 @@ extern int cio_commit_config(struct subc
+ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
+ int cio_tm_intrg(struct subchannel *sch);
+
++extern int __init airq_init(void);
++
+ /* Use with care. */
+ #ifdef CONFIG_CCW_CONSOLE
+ extern struct subchannel *cio_probe_console(void);
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -1101,6 +1101,7 @@ static int __init css_bus_init(void)
+ ret = cio_dma_pool_init();
+ if (ret)
+ goto out_unregister_pmn;
++ airq_init();
+ css_init_done = 1;
+
+ /* Enable default isc for I/O subchannels. */
diff --git a/patches.suse/s390-cio-add-basic-protected-virtualization-support b/patches.suse/s390-cio-add-basic-protected-virtualization-support
new file mode 100644
index 0000000000..18a4af23bb
--- /dev/null
+++ b/patches.suse/s390-cio-add-basic-protected-virtualization-support
@@ -0,0 +1,721 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Tue, 26 Mar 2019 12:41:09 +0100
+Subject: s390/cio: add basic protected virtualization support
+Git-commit: 37db8985b2116c89a3cbaf87083a02f83afaba5b
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+As virtio-ccw devices are channel devices, we need to use the
+dma area within the common I/O layer for any communication with
+the hypervisor.
+
+Note that we do not need to use that area for control blocks
+directly referenced by instructions, e.g. the orb.
+
+It handles neither QDIO in the common code, nor any device type specific
+stuff (like channel programs constructed by the DASD driver).
+
+An interesting side effect is that virtio structures are now going to
+get allocated in 31 bit addressable storage.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/ccwdev.h | 4 ++
+ drivers/s390/cio/ccwreq.c | 9 ++---
+ drivers/s390/cio/device.c | 68 ++++++++++++++++++++++++++++++++-------
+ drivers/s390/cio/device_fsm.c | 49 ++++++++++++++++------------
+ drivers/s390/cio/device_id.c | 20 ++++++-----
+ drivers/s390/cio/device_ops.c | 21 ++++++++++--
+ drivers/s390/cio/device_pgid.c | 22 ++++++------
+ drivers/s390/cio/device_status.c | 24 ++++++-------
+ drivers/s390/cio/io_sch.h | 20 ++++++++---
+ drivers/s390/virtio/virtio_ccw.c | 10 -----
+ 10 files changed, 164 insertions(+), 83 deletions(-)
+
+--- a/arch/s390/include/asm/ccwdev.h
++++ b/arch/s390/include/asm/ccwdev.h
+@@ -225,6 +225,10 @@ extern int ccw_device_enable_console(str
+ extern void ccw_device_wait_idle(struct ccw_device *);
+ extern int ccw_device_force_console(struct ccw_device *);
+
++extern void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size);
++extern void ccw_device_dma_free(struct ccw_device *cdev,
++ void *cpu_addr, size_t size);
++
+ int ccw_device_siosl(struct ccw_device *);
+
+ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
+--- a/drivers/s390/cio/ccwreq.c
++++ b/drivers/s390/cio/ccwreq.c
+@@ -62,7 +62,7 @@ static void ccwreq_stop(struct ccw_devic
+ return;
+ req->done = 1;
+ ccw_device_set_timeout(cdev, 0);
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ if (rc && rc != -ENODEV && req->drc)
+ rc = req->drc;
+ req->callback(cdev, req->data, rc);
+@@ -85,7 +85,7 @@ static void ccwreq_do(struct ccw_device
+ continue;
+ }
+ /* Perform start function. */
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ rc = cio_start(sch, cp, (u8) req->mask);
+ if (rc == 0) {
+ /* I/O started successfully. */
+@@ -168,7 +168,7 @@ int ccw_request_cancel(struct ccw_device
+ */
+ static enum io_status ccwreq_status(struct ccw_device *cdev, struct irb *lcirb)
+ {
+- struct irb *irb = &cdev->private->irb;
++ struct irb *irb = &cdev->private->dma_area->irb;
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ enum uc_todo todo;
+
+@@ -186,7 +186,8 @@ static enum io_status ccwreq_status(stru
+ CIO_TRACE_EVENT(2, "sensedata");
+ CIO_HEX_EVENT(2, &cdev->private->dev_id,
+ sizeof(struct ccw_dev_id));
+- CIO_HEX_EVENT(2, &cdev->private->irb.ecw, SENSE_MAX_COUNT);
++ CIO_HEX_EVENT(2, &cdev->private->dma_area->irb.ecw,
++ SENSE_MAX_COUNT);
+ /* Check for command reject. */
+ if (irb->ecw[0] & SNS0_CMD_REJECT)
+ return IO_REJECTED;
+--- a/drivers/s390/cio/device.c
++++ b/drivers/s390/cio/device.c
+@@ -25,6 +25,7 @@
+ #include <linux/timer.h>
+ #include <linux/kernel_stat.h>
+ #include <linux/sched/signal.h>
++#include <linux/dma-mapping.h>
+
+ #include <asm/ccwdev.h>
+ #include <asm/cio.h>
+@@ -730,6 +731,9 @@ ccw_device_release(struct device *dev)
+ struct ccw_device *cdev;
+
+ cdev = to_ccwdev(dev);
++ cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area,
++ sizeof(*cdev->private->dma_area));
++ cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev);
+ /* Release reference of parent subchannel. */
+ put_device(cdev->dev.parent);
+ kfree(cdev->private);
+@@ -739,15 +743,33 @@ ccw_device_release(struct device *dev)
+ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
+ {
+ struct ccw_device *cdev;
++ struct gen_pool *dma_pool;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+- if (cdev) {
+- cdev->private = kzalloc(sizeof(struct ccw_device_private),
+- GFP_KERNEL | GFP_DMA);
+- if (cdev->private)
+- return cdev;
+- }
++ if (!cdev)
++ goto err_cdev;
++ cdev->private = kzalloc(sizeof(struct ccw_device_private),
++ GFP_KERNEL | GFP_DMA);
++ if (!cdev->private)
++ goto err_priv;
++ cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
++ cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
++ dma_pool = cio_gp_dma_create(&cdev->dev, 1);
++ if (!dma_pool)
++ goto err_dma_pool;
++ cdev->private->dma_pool = dma_pool;
++ cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev,
++ sizeof(*cdev->private->dma_area));
++ if (!cdev->private->dma_area)
++ goto err_dma_area;
++ return cdev;
++err_dma_area:
++ cio_gp_dma_destroy(dma_pool, &cdev->dev);
++err_dma_pool:
++ kfree(cdev->private);
++err_priv:
+ kfree(cdev);
++err_cdev:
+ return ERR_PTR(-ENOMEM);
+ }
+
+@@ -927,7 +949,7 @@ io_subchannel_recog_done(struct ccw_devi
+ wake_up(&ccw_device_init_wq);
+ break;
+ case DEV_STATE_OFFLINE:
+- /*
++ /*
+ * We can't register the device in interrupt context so
+ * we schedule a work item.
+ */
+@@ -1105,6 +1127,14 @@ static int io_subchannel_probe(struct su
+ if (!io_priv)
+ goto out_schedule;
+
++ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
++ sizeof(*io_priv->dma_area),
++ &io_priv->dma_area_dma, GFP_KERNEL);
++ if (!io_priv->dma_area) {
++ kfree(io_priv);
++ goto out_schedule;
++ }
++
+ set_io_private(sch, io_priv);
+ css_schedule_eval(sch->schid);
+ return 0;
+@@ -1134,6 +1164,8 @@ io_subchannel_remove (struct subchannel
+ spin_unlock_irq(cdev->ccwlock);
+ ccw_device_unregister(cdev);
+ out_free:
++ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
++ io_priv->dma_area, io_priv->dma_area_dma);
+ kfree(io_priv);
+ sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
+ return 0;
+@@ -1633,13 +1665,19 @@ struct ccw_device * __init ccw_device_cr
+ return ERR_CAST(sch);
+
+ io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
+- if (!io_priv) {
+- put_device(&sch->dev);
+- return ERR_PTR(-ENOMEM);
+- }
++ if (!io_priv)
++ goto err_priv;
++ io_priv->dma_area = dma_alloc_coherent(&sch->dev,
++ sizeof(*io_priv->dma_area),
++ &io_priv->dma_area_dma, GFP_KERNEL);
++ if (!io_priv->dma_area)
++ goto err_dma_area;
+ set_io_private(sch, io_priv);
+ cdev = io_subchannel_create_ccwdev(sch);
+ if (IS_ERR(cdev)) {
++ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
++ io_priv->dma_area, io_priv->dma_area_dma);
++ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ kfree(io_priv);
+ return cdev;
+@@ -1647,6 +1685,12 @@ struct ccw_device * __init ccw_device_cr
+ cdev->drv = drv;
+ ccw_device_set_int_class(cdev);
+ return cdev;
++
++err_dma_area:
++ kfree(io_priv);
++err_priv:
++ put_device(&sch->dev);
++ return ERR_PTR(-ENOMEM);
+ }
+
+ void __init ccw_device_destroy_console(struct ccw_device *cdev)
+@@ -1657,6 +1701,8 @@ void __init ccw_device_destroy_console(s
+ set_io_private(sch, NULL);
+ put_device(&sch->dev);
+ put_device(&cdev->dev);
++ dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
++ io_priv->dma_area, io_priv->dma_area_dma);
+ kfree(io_priv);
+ }
+
+--- a/drivers/s390/cio/device_fsm.c
++++ b/drivers/s390/cio/device_fsm.c
+@@ -66,8 +66,10 @@ static void ccw_timeout_log(struct ccw_d
+ sizeof(struct tcw), 0);
+ } else {
+ printk(KERN_WARNING "cio: orb indicates command mode\n");
+- if ((void *)(addr_t)orb->cmd.cpa == &private->sense_ccw ||
+- (void *)(addr_t)orb->cmd.cpa == cdev->private->iccws)
++ if ((void *)(addr_t)orb->cmd.cpa ==
++ &private->dma_area->sense_ccw ||
++ (void *)(addr_t)orb->cmd.cpa ==
++ cdev->private->dma_area->iccws)
+ printk(KERN_WARNING "cio: last channel program "
+ "(intern):\n");
+ else
+@@ -144,18 +146,22 @@ ccw_device_cancel_halt_clear(struct ccw_
+ void ccw_device_update_sense_data(struct ccw_device *cdev)
+ {
+ memset(&cdev->id, 0, sizeof(cdev->id));
+- cdev->id.cu_type = cdev->private->senseid.cu_type;
+- cdev->id.cu_model = cdev->private->senseid.cu_model;
+- cdev->id.dev_type = cdev->private->senseid.dev_type;
+- cdev->id.dev_model = cdev->private->senseid.dev_model;
++ cdev->id.cu_type = cdev->private->dma_area->senseid.cu_type;
++ cdev->id.cu_model = cdev->private->dma_area->senseid.cu_model;
++ cdev->id.dev_type = cdev->private->dma_area->senseid.dev_type;
++ cdev->id.dev_model = cdev->private->dma_area->senseid.dev_model;
+ }
+
+ int ccw_device_test_sense_data(struct ccw_device *cdev)
+ {
+- return cdev->id.cu_type == cdev->private->senseid.cu_type &&
+- cdev->id.cu_model == cdev->private->senseid.cu_model &&
+- cdev->id.dev_type == cdev->private->senseid.dev_type &&
+- cdev->id.dev_model == cdev->private->senseid.dev_model;
++ return cdev->id.cu_type ==
++ cdev->private->dma_area->senseid.cu_type &&
++ cdev->id.cu_model ==
++ cdev->private->dma_area->senseid.cu_model &&
++ cdev->id.dev_type ==
++ cdev->private->dma_area->senseid.dev_type &&
++ cdev->id.dev_model ==
++ cdev->private->dma_area->senseid.dev_model;
+ }
+
+ /*
+@@ -343,7 +349,7 @@ ccw_device_done(struct ccw_device *cdev,
+ cio_disable_subchannel(sch);
+
+ /* Reset device status. */
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+
+ cdev->private->state = state;
+
+@@ -499,13 +505,14 @@ callback:
+ ccw_device_done(cdev, DEV_STATE_ONLINE);
+ /* Deliver fake irb to device driver, if needed. */
+ if (cdev->private->flags.fake_irb) {
+- create_fake_irb(&cdev->private->irb,
++ create_fake_irb(&cdev->private->dma_area->irb,
+ cdev->private->flags.fake_irb);
+ cdev->private->flags.fake_irb = 0;
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+- &cdev->private->irb);
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ &cdev->private->dma_area->irb);
++ memset(&cdev->private->dma_area->irb, 0,
++ sizeof(struct irb));
+ }
+ ccw_device_report_path_events(cdev);
+ break;
+@@ -661,7 +668,8 @@ ccw_device_online_verify(struct ccw_devi
+
+ if (scsw_actl(&sch->schib.scsw) != 0 ||
+ (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) ||
+- (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
++ (scsw_stctl(&cdev->private->dma_area->irb.scsw) &
++ SCSW_STCTL_STATUS_PEND)) {
+ /*
+ * No final status yet or final status not yet delivered
+ * to the device driver. Can't do path verification now,
+@@ -708,7 +716,7 @@ static int ccw_device_call_handler(struc
+ * - fast notification was requested (primary status)
+ * - unsolicited interrupts
+ */
+- stctl = scsw_stctl(&cdev->private->irb.scsw);
++ stctl = scsw_stctl(&cdev->private->dma_area->irb.scsw);
+ ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
+ (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
+ (stctl == SCSW_STCTL_STATUS_PEND);
+@@ -724,9 +732,9 @@ static int ccw_device_call_handler(struc
+
+ if (cdev->handler)
+ cdev->handler(cdev, cdev->private->intparm,
+- &cdev->private->irb);
++ &cdev->private->dma_area->irb);
+
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ return 1;
+ }
+
+@@ -748,7 +756,8 @@ ccw_device_irq(struct ccw_device *cdev,
+ /* Unit check but no sense data. Need basic sense. */
+ if (ccw_device_do_sense(cdev, irb) != 0)
+ goto call_handler_unsol;
+- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
++ memcpy(&cdev->private->dma_area->irb, irb,
++ sizeof(struct irb));
+ cdev->private->state = DEV_STATE_W4SENSE;
+ cdev->private->intparm = 0;
+ return;
+@@ -831,7 +840,7 @@ ccw_device_w4sense(struct ccw_device *cd
+ if (scsw_fctl(&irb->scsw) &
+ (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) {
+ cdev->private->flags.dosense = 0;
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+ ccw_device_accumulate_irb(cdev, irb);
+ goto call_handler;
+ }
+--- a/drivers/s390/cio/device_id.c
++++ b/drivers/s390/cio/device_id.c
+@@ -98,7 +98,7 @@ static int diag210_to_senseid(struct sen
+ static int diag210_get_dev_info(struct ccw_device *cdev)
+ {
+ struct ccw_dev_id *dev_id = &cdev->private->dev_id;
+- struct senseid *senseid = &cdev->private->senseid;
++ struct senseid *senseid = &cdev->private->dma_area->senseid;
+ struct diag210 diag_data;
+ int rc;
+
+@@ -133,8 +133,10 @@ err_failed:
+ static void snsid_init(struct ccw_device *cdev)
+ {
+ cdev->private->flags.esid = 0;
+- memset(&cdev->private->senseid, 0, sizeof(cdev->private->senseid));
+- cdev->private->senseid.cu_type = 0xffff;
++
++ memset(&cdev->private->dma_area->senseid, 0,
++ sizeof(cdev->private->dma_area->senseid));
++ cdev->private->dma_area->senseid.cu_type = 0xffff;
+ }
+
+ /*
+@@ -142,16 +144,16 @@ static void snsid_init(struct ccw_device
+ */
+ static int snsid_check(struct ccw_device *cdev, void *data)
+ {
+- struct cmd_scsw *scsw = &cdev->private->irb.scsw.cmd;
++ struct cmd_scsw *scsw = &cdev->private->dma_area->irb.scsw.cmd;
+ int len = sizeof(struct senseid) - scsw->count;
+
+ /* Check for incomplete SENSE ID data. */
+ if (len < SENSE_ID_MIN_LEN)
+ goto out_restart;
+- if (cdev->private->senseid.cu_type == 0xffff)
++ if (cdev->private->dma_area->senseid.cu_type == 0xffff)
+ goto out_restart;
+ /* Check for incompatible SENSE ID data. */
+- if (cdev->private->senseid.reserved != 0xff)
++ if (cdev->private->dma_area->senseid.reserved != 0xff)
+ return -EOPNOTSUPP;
+ /* Check for extended-identification information. */
+ if (len > SENSE_ID_BASIC_LEN)
+@@ -169,7 +171,7 @@ out_restart:
+ static void snsid_callback(struct ccw_device *cdev, void *data, int rc)
+ {
+ struct ccw_dev_id *id = &cdev->private->dev_id;
+- struct senseid *senseid = &cdev->private->senseid;
++ struct senseid *senseid = &cdev->private->dma_area->senseid;
+ int vm = 0;
+
+ if (rc && MACHINE_IS_VM) {
+@@ -199,7 +201,7 @@ void ccw_device_sense_id_start(struct cc
+ {
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ struct ccw_request *req = &cdev->private->req;
+- struct ccw1 *cp = cdev->private->iccws;
++ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ CIO_TRACE_EVENT(4, "snsid");
+ CIO_HEX_EVENT(4, &cdev->private->dev_id, sizeof(cdev->private->dev_id));
+@@ -207,7 +209,7 @@ void ccw_device_sense_id_start(struct cc
+ snsid_init(cdev);
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_ID;
+- cp->cda = (u32) (addr_t) &cdev->private->senseid;
++ cp->cda = (u32) (addr_t) &cdev->private->dma_area->senseid;
+ cp->count = sizeof(struct senseid);
+ cp->flags = CCW_FLAG_SLI;
+ /* Request setup. */
+--- a/drivers/s390/cio/device_ops.c
++++ b/drivers/s390/cio/device_ops.c
+@@ -430,8 +430,8 @@ struct ciw *ccw_device_get_ciw(struct cc
+ if (cdev->private->flags.esid == 0)
+ return NULL;
+ for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
+- if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
+- return cdev->private->senseid.ciw + ciw_cnt;
++ if (cdev->private->dma_area->senseid.ciw[ciw_cnt].ct == ct)
++ return cdev->private->dma_area->senseid.ciw + ciw_cnt;
+ return NULL;
+ }
+
+@@ -700,6 +700,23 @@ void ccw_device_get_schid(struct ccw_dev
+ }
+ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
+
++/*
++ * Allocate zeroed dma coherent 31 bit addressable memory using
++ * the subchannels dma pool. Maximal size of allocation supported
++ * is PAGE_SIZE.
++ */
++void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
++{
++ return cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
++}
++EXPORT_SYMBOL(ccw_device_dma_zalloc);
++
++void ccw_device_dma_free(struct ccw_device *cdev, void *cpu_addr, size_t size)
++{
++ cio_gp_dma_free(cdev->private->dma_pool, cpu_addr, size);
++}
++EXPORT_SYMBOL(ccw_device_dma_free);
++
+ EXPORT_SYMBOL(ccw_device_set_options_mask);
+ EXPORT_SYMBOL(ccw_device_set_options);
+ EXPORT_SYMBOL(ccw_device_clear_options);
+--- a/drivers/s390/cio/device_pgid.c
++++ b/drivers/s390/cio/device_pgid.c
+@@ -56,7 +56,7 @@ out:
+ static void nop_build_cp(struct ccw_device *cdev)
+ {
+ struct ccw_request *req = &cdev->private->req;
+- struct ccw1 *cp = cdev->private->iccws;
++ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ cp->cmd_code = CCW_CMD_NOOP;
+ cp->cda = 0;
+@@ -133,9 +133,9 @@ err:
+ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
+ {
+ struct ccw_request *req = &cdev->private->req;
+- struct ccw1 *cp = cdev->private->iccws;
++ struct ccw1 *cp = cdev->private->dma_area->iccws;
+ int i = pathmask_to_pos(req->lpm);
+- struct pgid *pgid = &cdev->private->pgid[i];
++ struct pgid *pgid = &cdev->private->dma_area->pgid[i];
+
+ pgid->inf.fc = fn;
+ cp->cmd_code = CCW_CMD_SET_PGID;
+@@ -299,7 +299,7 @@ static int pgid_cmp(struct pgid *p1, str
+ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
+ int *mismatch, u8 *reserved, u8 *reset)
+ {
+- struct pgid *pgid = &cdev->private->pgid[0];
++ struct pgid *pgid = &cdev->private->dma_area->pgid[0];
+ struct pgid *first = NULL;
+ int lpm;
+ int i;
+@@ -341,7 +341,7 @@ static u8 pgid_to_donepm(struct ccw_devi
+ lpm = 0x80 >> i;
+ if ((cdev->private->pgid_valid_mask & lpm) == 0)
+ continue;
+- pgid = &cdev->private->pgid[i];
++ pgid = &cdev->private->dma_area->pgid[i];
+ if (sch->opm & lpm) {
+ if (pgid->inf.ps.state1 != SNID_STATE1_GROUPED)
+ continue;
+@@ -367,7 +367,8 @@ static void pgid_fill(struct ccw_device
+ int i;
+
+ for (i = 0; i < 8; i++)
+- memcpy(&cdev->private->pgid[i], pgid, sizeof(struct pgid));
++ memcpy(&cdev->private->dma_area->pgid[i], pgid,
++ sizeof(struct pgid));
+ }
+
+ /*
+@@ -434,12 +435,12 @@ out:
+ static void snid_build_cp(struct ccw_device *cdev)
+ {
+ struct ccw_request *req = &cdev->private->req;
+- struct ccw1 *cp = cdev->private->iccws;
++ struct ccw1 *cp = cdev->private->dma_area->iccws;
+ int i = pathmask_to_pos(req->lpm);
+
+ /* Channel program setup. */
+ cp->cmd_code = CCW_CMD_SENSE_PGID;
+- cp->cda = (u32) (addr_t) &cdev->private->pgid[i];
++ cp->cda = (u32) (addr_t) &cdev->private->dma_area->pgid[i];
+ cp->count = sizeof(struct pgid);
+ cp->flags = CCW_FLAG_SLI;
+ req->cp = cp;
+@@ -515,7 +516,8 @@ static void verify_start(struct ccw_devi
+ sch->lpm = sch->schib.pmcw.pam;
+
+ /* Initialize PGID data. */
+- memset(cdev->private->pgid, 0, sizeof(cdev->private->pgid));
++ memset(cdev->private->dma_area->pgid, 0,
++ sizeof(cdev->private->dma_area->pgid));
+ cdev->private->pgid_valid_mask = 0;
+ cdev->private->pgid_todo_mask = sch->schib.pmcw.pam;
+ cdev->private->path_notoper_mask = 0;
+@@ -625,7 +627,7 @@ struct stlck_data {
+ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
+ {
+ struct ccw_request *req = &cdev->private->req;
+- struct ccw1 *cp = cdev->private->iccws;
++ struct ccw1 *cp = cdev->private->dma_area->iccws;
+
+ cp[0].cmd_code = CCW_CMD_STLCK;
+ cp[0].cda = (u32) (addr_t) buf1;
+--- a/drivers/s390/cio/device_status.c
++++ b/drivers/s390/cio/device_status.c
+@@ -78,15 +78,15 @@ ccw_device_accumulate_ecw(struct ccw_dev
+ * are condition that have to be met for the extended control
+ * bit to have meaning. Sick.
+ */
+- cdev->private->irb.scsw.cmd.ectl = 0;
++ cdev->private->dma_area->irb.scsw.cmd.ectl = 0;
+ if ((irb->scsw.cmd.stctl & SCSW_STCTL_ALERT_STATUS) &&
+ !(irb->scsw.cmd.stctl & SCSW_STCTL_INTER_STATUS))
+- cdev->private->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
++ cdev->private->dma_area->irb.scsw.cmd.ectl = irb->scsw.cmd.ectl;
+ /* Check if extended control word is valid. */
+- if (!cdev->private->irb.scsw.cmd.ectl)
++ if (!cdev->private->dma_area->irb.scsw.cmd.ectl)
+ return;
+ /* Copy concurrent sense / model dependent information. */
+- memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw));
++ memcpy(&cdev->private->dma_area->irb.ecw, irb->ecw, sizeof(irb->ecw));
+ }
+
+ /*
+@@ -117,7 +117,7 @@ ccw_device_accumulate_esw(struct ccw_dev
+ if (!ccw_device_accumulate_esw_valid(irb))
+ return;
+
+- cdev_irb = &cdev->private->irb;
++ cdev_irb = &cdev->private->dma_area->irb;
+
+ /* Copy last path used mask. */
+ cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum;
+@@ -209,7 +209,7 @@ ccw_device_accumulate_irb(struct ccw_dev
+ ccw_device_path_notoper(cdev);
+ /* No irb accumulation for transport mode irbs. */
+ if (scsw_is_tm(&irb->scsw)) {
+- memcpy(&cdev->private->irb, irb, sizeof(struct irb));
++ memcpy(&cdev->private->dma_area->irb, irb, sizeof(struct irb));
+ return;
+ }
+ /*
+@@ -218,7 +218,7 @@ ccw_device_accumulate_irb(struct ccw_dev
+ if (!scsw_is_solicited(&irb->scsw))
+ return;
+
+- cdev_irb = &cdev->private->irb;
++ cdev_irb = &cdev->private->dma_area->irb;
+
+ /*
+ * If the clear function had been performed, all formerly pending
+@@ -226,7 +226,7 @@ ccw_device_accumulate_irb(struct ccw_dev
+ * intermediate accumulated status to the device driver.
+ */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC)
+- memset(&cdev->private->irb, 0, sizeof(struct irb));
++ memset(&cdev->private->dma_area->irb, 0, sizeof(struct irb));
+
+ /* Copy bits which are valid only for the start function. */
+ if (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) {
+@@ -328,9 +328,9 @@ ccw_device_do_sense(struct ccw_device *c
+ /*
+ * We have ending status but no sense information. Do a basic sense.
+ */
+- sense_ccw = &to_io_private(sch)->sense_ccw;
++ sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
+ sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
+- sense_ccw->cda = (__u32) __pa(cdev->private->irb.ecw);
++ sense_ccw->cda = (__u32) __pa(cdev->private->dma_area->irb.ecw);
+ sense_ccw->count = SENSE_MAX_COUNT;
+ sense_ccw->flags = CCW_FLAG_SLI;
+
+@@ -363,7 +363,7 @@ ccw_device_accumulate_basic_sense(struct
+
+ if (!(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
+ (irb->scsw.cmd.dstat & DEV_STAT_CHN_END)) {
+- cdev->private->irb.esw.esw0.erw.cons = 1;
++ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ }
+ /* Check if path verification is required. */
+@@ -385,7 +385,7 @@ ccw_device_accumulate_and_sense(struct c
+ /* Check for basic sense. */
+ if (cdev->private->flags.dosense &&
+ !(irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK)) {
+- cdev->private->irb.esw.esw0.erw.cons = 1;
++ cdev->private->dma_area->irb.esw.esw0.erw.cons = 1;
+ cdev->private->flags.dosense = 0;
+ return 0;
+ }
+--- a/drivers/s390/cio/io_sch.h
++++ b/drivers/s390/cio/io_sch.h
+@@ -8,15 +8,20 @@
+ #include "css.h"
+ #include "orb.h"
+
++struct io_subchannel_dma_area {
++ struct ccw1 sense_ccw; /* static ccw for sense command */
++};
++
+ struct io_subchannel_private {
+ union orb orb; /* operation request block */
+- struct ccw1 sense_ccw; /* static ccw for sense command */
+ struct ccw_device *cdev;/* pointer to the child ccw device */
+ struct {
+ unsigned int suspend:1; /* allow suspend */
+ unsigned int prefetch:1;/* deny prefetch */
+ unsigned int inter:1; /* suppress intermediate interrupts */
+ } __packed options;
++ struct io_subchannel_dma_area *dma_area;
++ dma_addr_t dma_area_dma;
+ } __aligned(8);
+
+ #define to_io_private(n) ((struct io_subchannel_private *) \
+@@ -114,6 +119,13 @@ enum cdev_todo {
+ #define FAKE_CMD_IRB 1
+ #define FAKE_TM_IRB 2
+
++struct ccw_device_dma_area {
++ struct senseid senseid; /* SenseID info */
++ struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
++ struct irb irb; /* device status */
++ struct pgid pgid[8]; /* path group IDs per chpid*/
++};
++
+ struct ccw_device_private {
+ struct ccw_device *cdev;
+ struct subchannel *sch;
+@@ -153,11 +165,7 @@ struct ccw_device_private {
+ } __attribute__((packed)) flags;
+ unsigned long intparm; /* user interruption parameter */
+ struct qdio_irq *qdio_data;
+- struct irb irb; /* device status */
+ int async_kill_io_rc;
+- struct senseid senseid; /* SenseID info */
+- struct pgid pgid[8]; /* path group IDs per chpid*/
+- struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */
+ struct work_struct todo_work;
+ enum cdev_todo todo;
+ wait_queue_head_t wait_q;
+@@ -166,6 +174,8 @@ struct ccw_device_private {
+ struct list_head cmb_list; /* list of measured devices */
+ u64 cmb_start_time; /* clock value of cmb reset */
+ void *cmb_wait; /* deferred cmb enable/disable */
++ struct gen_pool *dma_pool;
++ struct ccw_device_dma_area *dma_area;
+ enum interruption_class int_class;
+ };
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -69,7 +69,6 @@ struct virtio_ccw_device {
+ bool device_lost;
+ unsigned int config_ready;
+ void *airq_info;
+- u64 dma_mask;
+ };
+
+ struct vq_info_block_legacy {
+@@ -1234,16 +1233,7 @@ static int virtio_ccw_online(struct ccw_
+ ret = -ENOMEM;
+ goto out_free;
+ }
+-
+ vcdev->vdev.dev.parent = &cdev->dev;
+- cdev->dev.dma_mask = &vcdev->dma_mask;
+- /* we are fine with common virtio infrastructure using 64 bit DMA */
+- ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
+- if (ret) {
+- dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
+- goto out_free;
+- }
+-
+ vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!vcdev->config_block) {
diff --git a/patches.suse/s390-cio-introduce-dma-pools-to-cio b/patches.suse/s390-cio-introduce-dma-pools-to-cio
new file mode 100644
index 0000000000..f2cfac0af7
--- /dev/null
+++ b/patches.suse/s390-cio-introduce-dma-pools-to-cio
@@ -0,0 +1,242 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Tue, 2 Apr 2019 18:47:29 +0200
+Subject: s390/cio: introduce DMA pools to cio
+Git-commit: bb99332a2b558e1f28b4c5011f9ea3b46f1c8806
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+To support protected virtualization cio will need to make sure the
+memory used for communication with the hypervisor is DMA memory.
+
+Let us introduce one global pool for cio.
+
+Our DMA pools are implemented as a gen_pool backed with DMA pages. The
+idea is to avoid each allocation effectively wasting a page, as we
+typically allocate much less than PAGE_SIZE.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/Kconfig | 1
+ arch/s390/include/asm/cio.h | 11 +++
+ drivers/s390/cio/css.c | 133 ++++++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 141 insertions(+), 4 deletions(-)
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -183,6 +183,7 @@ config S390
+ select VIRT_TO_BUS
+ select HAVE_NMI
+ select SWIOTLB
++ select GENERIC_ALLOCATOR
+
+
+ config SCHED_OMIT_FRAME_POINTER
+--- a/arch/s390/include/asm/cio.h
++++ b/arch/s390/include/asm/cio.h
+@@ -6,6 +6,7 @@
+
+ #include <linux/spinlock.h>
+ #include <linux/bitops.h>
++#include <linux/genalloc.h>
+ #include <asm/types.h>
+
+ #define LPM_ANYPATH 0xff
+@@ -337,6 +338,16 @@ struct cio_iplinfo {
+
+ extern int cio_get_iplinfo(struct cio_iplinfo *iplinfo);
+
++extern void *cio_dma_zalloc(size_t size);
++extern void cio_dma_free(void *cpu_addr, size_t size);
++extern struct device *cio_get_dma_css_dev(void);
++
++void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
++ size_t size);
++void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size);
++void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev);
++struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages);
++
+ /* Function from drivers/s390/cio/chsc.c */
+ int chsc_sstpc(void *page, unsigned int op, u16 ctrl, u64 *clock_delta);
+ int chsc_sstpi(void *page, void *result, size_t size);
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -21,6 +21,8 @@
+ #include <linux/reboot.h>
+ #include <linux/suspend.h>
+ #include <linux/proc_fs.h>
++#include <linux/genalloc.h>
++#include <linux/dma-mapping.h>
+ #include <asm/isc.h>
+ #include <asm/crw.h>
+
+@@ -189,6 +191,12 @@ struct subchannel *css_alloc_subchannel(
+ INIT_WORK(&sch->todo_work, css_sch_todo);
+ sch->dev.release = &css_subchannel_release;
+ device_initialize(&sch->dev);
++ /*
++ * The physical addresses of some the dma structures that can
++ * belong to a subchannel need to fit 31 bit width (e.g. ccw).
++ */
++ sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
++ sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
+ return sch;
+
+ err:
+@@ -816,6 +824,13 @@ static int __init setup_css(int nr)
+ dev_set_name(&css->device, "css%x", nr);
+ css->device.groups = cssdev_attr_groups;
+ css->device.release = channel_subsystem_release;
++ /*
++ * We currently allocate notifier bits with this (using
++ * css->device as the device argument with the DMA API)
++ * and are fine with 64 bit addresses.
++ */
++ css->device.coherent_dma_mask = DMA_BIT_MASK(64);
++ css->device.dma_mask = &css->device.coherent_dma_mask;
+
+ mutex_init(&css->mutex);
+ css->cssid = chsc_get_cssid(nr);
+@@ -935,6 +950,111 @@ static struct notifier_block css_power_n
+ .notifier_call = css_power_event,
+ };
+
++#define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
++static struct gen_pool *cio_dma_pool;
++
++/* Currently cio supports only a single css */
++struct device *cio_get_dma_css_dev(void)
++{
++ return &channel_subsystems[0]->device;
++}
++
++struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
++{
++ struct gen_pool *gp_dma;
++ void *cpu_addr;
++ dma_addr_t dma_addr;
++ int i;
++
++ gp_dma = gen_pool_create(3, -1);
++ if (!gp_dma)
++ return NULL;
++ for (i = 0; i < nr_pages; ++i) {
++ cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
++ CIO_DMA_GFP);
++ if (!cpu_addr)
++ return gp_dma;
++ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
++ dma_addr, PAGE_SIZE, -1);
++ }
++ return gp_dma;
++}
++
++static void __gp_dma_free_dma(struct gen_pool *pool,
++ struct gen_pool_chunk *chunk, void *data)
++{
++ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
++
++ dma_free_coherent((struct device *) data, chunk_size,
++ (void *) chunk->start_addr,
++ (dma_addr_t) chunk->phys_addr);
++}
++
++void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
++{
++ if (!gp_dma)
++ return;
++ /* this is quite ugly but no better idea */
++ gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
++ gen_pool_destroy(gp_dma);
++}
++
++static int cio_dma_pool_init(void)
++{
++ /* No need to free up the resources: compiled in */
++ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
++ if (!cio_dma_pool)
++ return -ENOMEM;
++ return 0;
++}
++
++void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
++ size_t size)
++{
++ dma_addr_t dma_addr;
++ unsigned long addr;
++ size_t chunk_size;
++
++ if (!gp_dma)
++ return NULL;
++ addr = gen_pool_alloc(gp_dma, size);
++ while (!addr) {
++ chunk_size = round_up(size, PAGE_SIZE);
++ addr = (unsigned long) dma_alloc_coherent(dma_dev,
++ chunk_size, &dma_addr, CIO_DMA_GFP);
++ if (!addr)
++ return NULL;
++ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
++ addr = gen_pool_alloc(gp_dma, size);
++ }
++ return (void *) addr;
++}
++
++void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
++{
++ if (!cpu_addr)
++ return;
++ memset(cpu_addr, 0, size);
++ gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
++}
++
++/*
++ * Allocate dma memory from the css global pool. Intended for memory not
++ * specific to any single device within the css. The allocated memory
++ * is not guaranteed to be 31-bit addressable.
++ *
++ * Caution: Not suitable for early stuff like console.
++ */
++void *cio_dma_zalloc(size_t size)
++{
++ return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
++}
++
++void cio_dma_free(void *cpu_addr, size_t size)
++{
++ cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
++}
++
+ /*
+ * Now that the driver core is running, we can setup our channel subsystem.
+ * The struct subchannel's are created during probing.
+@@ -976,16 +1096,21 @@ static int __init css_bus_init(void)
+ if (ret)
+ goto out_unregister;
+ ret = register_pm_notifier(&css_power_notifier);
+- if (ret) {
+- unregister_reboot_notifier(&css_reboot_notifier);
+- goto out_unregister;
+- }
++ if (ret)
++ goto out_unregister_rn;
++ ret = cio_dma_pool_init();
++ if (ret)
++ goto out_unregister_pmn;
+ css_init_done = 1;
+
+ /* Enable default isc for I/O subchannels. */
+ isc_register(IO_SCH_ISC);
+
+ return 0;
++out_unregister_pmn:
++ unregister_pm_notifier(&css_power_notifier);
++out_unregister_rn:
++ unregister_reboot_notifier(&css_reboot_notifier);
+ out_unregister:
+ while (i-- > 0) {
+ struct channel_subsystem *css = channel_subsystems[i];
diff --git a/patches.suse/s390-cpu_mf-add-store-cpu-counter-multiple-instruction-support b/patches.suse/s390-cpu_mf-add-store-cpu-counter-multiple-instruction-support
new file mode 100644
index 0000000000..bd8ea5e281
--- /dev/null
+++ b/patches.suse/s390-cpu_mf-add-store-cpu-counter-multiple-instruction-support
@@ -0,0 +1,80 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 29 Aug 2018 17:46:06 +0200
+Subject: s390/cpu_mf: add store cpu counter multiple instruction support
+Git-commit: 778fb10ccc18b16c022be898d8497767c20ea7b5
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Add support for the STORE CPU COUNTER MULTIPLE instruction to extract
+a range of counters from a counter set.
+
+An assembler macro is used to create the instruction opcode because
+the counter set identifier is part of the instruction and, thus,
+cannot be easily specified as parameter.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mf-insn.h | 22 ++++++++++++++++++++++
+ arch/s390/include/asm/cpu_mf.h | 17 +++++++++++++++++
+ 2 files changed, 39 insertions(+)
+
+--- /dev/null
++++ b/arch/s390/include/asm/cpu_mf-insn.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Support for CPU-MF instructions
++ *
++ * Copyright IBM Corp. 2019
++ * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
++ */
++#ifndef _ASM_S390_CPU_MF_INSN_H
++#define _ASM_S390_CPU_MF_INSN_H
++
++#ifdef __ASSEMBLY__
++
++/* Macro to generate the STCCTM instruction with a customized
++ * M3 field designating the counter set.
++ */
++.macro STCCTM r1 m3 db2
++ .insn rsy,0xeb0000000017,\r1,\m3 & 0xf,\db2
++.endm
++
++#endif /* __ASSEMBLY__ */
++
++#endif
+--- a/arch/s390/include/asm/cpu_mf.h
++++ b/arch/s390/include/asm/cpu_mf.h
+@@ -15,6 +15,8 @@
+ #include <linux/errno.h>
+ #include <asm/facility.h>
+
++asm(".include \"asm/cpu_mf-insn.h\"\n");
++
+ #define CPU_MF_INT_SF_IAE (1 << 31) /* invalid entry address */
+ #define CPU_MF_INT_SF_ISE (1 << 30) /* incorrect SDBT entry */
+ #define CPU_MF_INT_SF_PRA (1 << 29) /* program request alert */
+@@ -200,6 +202,21 @@ static inline int ecctr(u64 ctr, u64 *va
+ return cc;
+ }
+
++/* Store CPU counter multiple for a particular counter set */
++static inline int stcctm(u8 set, u64 range, u64 *dest)
++{
++ int cc;
++
++ asm volatile (
++ " STCCTM %2,%3,%1\n"
++ " ipm %0\n"
++ " srl %0,28\n"
++ : "=d" (cc)
++ : "Q" (*dest), "d" (range), "i" (set)
++ : "cc", "memory");
++ return cc;
++}
++
+ /* Store CPU counter multiple for the MT utilization counter set */
+ static inline int stcctm5(u64 num, u64 *val)
+ {
diff --git a/patches.suse/s390-cpu_mf-move-struct-cpu_cf_events-and-per-cpu-variable-to-header-file b/patches.suse/s390-cpu_mf-move-struct-cpu_cf_events-and-per-cpu-variable-to-header-file
new file mode 100644
index 0000000000..91180056f5
--- /dev/null
+++ b/patches.suse/s390-cpu_mf-move-struct-cpu_cf_events-and-per-cpu-variable-to-header-file
@@ -0,0 +1,58 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 8 Aug 2018 10:12:22 +0200
+Subject: s390/cpu_mf: move struct cpu_cf_events and per-CPU variable to header
+ file
+Git-commit: f944bcdf5b8431c68be8bdd13259d27412e45c14
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Make the struct cpu_cf_events and the respective per-CPU variable available
+to in-kernel users. Access to this per-CPU variable shall be done between
+the calls to __kernel_cpumcf_begin() and __kernel_cpumcf_end().
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 9 +++++++++
+ arch/s390/kernel/perf_cpum_cf.c | 11 ++---------
+ 2 files changed, 11 insertions(+), 9 deletions(-)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -49,6 +49,15 @@ static inline void ctr_set_stop(u64 *sta
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+ }
+
++struct cpu_cf_events {
++ struct cpumf_ctr_info info;
++ atomic_t ctr_set[CPUMF_CTR_SET_MAX];
++ u64 state, tx_state;
++ unsigned int flags;
++ unsigned int txn_flags;
++};
++DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
++
+ int __kernel_cpumcf_begin(void);
+ void __kernel_cpumcf_end(void);
+
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -21,15 +21,8 @@
+ #include <asm/irq.h>
+ #include <asm/cpu_mcf.h>
+
+-/* Local CPUMF event structure */
+-struct cpu_cf_events {
+- struct cpumf_ctr_info info;
+- atomic_t ctr_set[CPUMF_CTR_SET_MAX];
+- u64 state, tx_state;
+- unsigned int flags;
+- unsigned int txn_flags;
+-};
+-static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
++/* Per-CPU event structure for the counter facility */
++DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
+ .ctr_set = {
+ [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
diff --git a/patches.suse/s390-cpu_mf-replace-stcctm5-with-the-stcctm-function b/patches.suse/s390-cpu_mf-replace-stcctm5-with-the-stcctm-function
new file mode 100644
index 0000000000..5b7b7a15ab
--- /dev/null
+++ b/patches.suse/s390-cpu_mf-replace-stcctm5-with-the-stcctm-function
@@ -0,0 +1,80 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 29 Aug 2018 18:12:17 +0200
+Subject: s390/cpu_mf: replace stcctm5() with the stcctm() function
+Git-commit: 346d034d7f13da9eb135458a2f6cf14c9b77a637
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Remove the stcctm5() function to extract counters from the MT-diagnostic
+counter set with the stcctm() function. For readability, introduce an
+enum to map the counter sets names to respective numbers for the stcctm
+instruction.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mf.h | 25 +++++++++----------------
+ arch/s390/kernel/vtime.c | 4 ++--
+ 2 files changed, 11 insertions(+), 18 deletions(-)
+
+--- a/arch/s390/include/asm/cpu_mf.h
++++ b/arch/s390/include/asm/cpu_mf.h
+@@ -203,7 +203,15 @@ static inline int ecctr(u64 ctr, u64 *va
+ }
+
+ /* Store CPU counter multiple for a particular counter set */
+-static inline int stcctm(u8 set, u64 range, u64 *dest)
++enum stcctm_ctr_set {
++ EXTENDED = 0,
++ BASIC = 1,
++ PROBLEM_STATE = 2,
++ CRYPTO_ACTIVITY = 3,
++ MT_DIAG = 5,
++ MT_DIAG_CLEARING = 9, /* clears loss-of-MT-ctr-data alert */
++};
++static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
+ {
+ int cc;
+
+@@ -216,21 +224,6 @@ static inline int stcctm(u8 set, u64 ran
+ : "cc", "memory");
+ return cc;
+ }
+-
+-/* Store CPU counter multiple for the MT utilization counter set */
+-static inline int stcctm5(u64 num, u64 *val)
+-{
+- int cc;
+-
+- asm volatile (
+- " .insn rsy,0xeb0000000017,%2,5,%1\n"
+- " ipm %0\n"
+- " srl %0,28\n"
+- : "=d" (cc)
+- : "Q" (*val), "d" (num)
+- : "cc", "memory");
+- return cc;
+-}
+
+ /* Query sampling information */
+ static inline int qsi(struct hws_qsi_info_block *info)
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -68,7 +68,7 @@ static void update_mt_scaling(void)
+ u64 delta, fac, mult, div;
+ int i;
+
+- stcctm5(smp_cpu_mtid + 1, cycles_new);
++ stcctm(MT_DIAG, smp_cpu_mtid + 1, cycles_new);
+ cycles_old = this_cpu_ptr(mt_cycles);
+ fac = 1;
+ mult = div = 0;
+@@ -435,6 +435,6 @@ void vtime_init(void)
+ __this_cpu_write(mt_scaling_jiffies, jiffies);
+ __this_cpu_write(mt_scaling_mult, 1);
+ __this_cpu_write(mt_scaling_div, 1);
+- stcctm5(smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
++ stcctm(MT_DIAG, smp_cpu_mtid + 1, this_cpu_ptr(mt_cycles));
+ }
+ }
diff --git a/patches.suse/s390-cpum_cf-add-ctr_stcctm-function b/patches.suse/s390-cpum_cf-add-ctr_stcctm-function
new file mode 100644
index 0000000000..8b691b2160
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-add-ctr_stcctm-function
@@ -0,0 +1,47 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Fri, 26 Oct 2018 09:48:29 +0200
+Subject: s390/cpum_cf: add ctr_stcctm() function
+Git-commit: 86c0b75715e711c035ae0ed9820ae95f14fe2c0d
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Introduce the ctr_stcctm() function as wrapper function to extract counters
+from a particular counter set. Note that the counter set is part of the
+stcctm instruction opcode, few indirections are necessary to specify the
+counter set as variable.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -49,6 +49,25 @@ static inline void ctr_set_stop(u64 *sta
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+ }
+
++static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
++{
++ switch (set) {
++ case CPUMF_CTR_SET_BASIC:
++ return stcctm(BASIC, range, dest);
++ case CPUMF_CTR_SET_USER:
++ return stcctm(PROBLEM_STATE, range, dest);
++ case CPUMF_CTR_SET_CRYPTO:
++ return stcctm(CRYPTO_ACTIVITY, range, dest);
++ case CPUMF_CTR_SET_EXT:
++ return stcctm(EXTENDED, range, dest);
++ case CPUMF_CTR_SET_MT_DIAG:
++ return stcctm(MT_DIAG_CLEARING, range, dest);
++ case CPUMF_CTR_SET_MAX:
++ return 3;
++ }
++ return 3;
++}
++
+ struct cpu_cf_events {
+ struct cpumf_ctr_info info;
+ atomic_t ctr_set[CPUMF_CTR_SET_MAX];
diff --git a/patches.suse/s390-cpum_cf-add-minimal-in-kernel-interface-for-counter-measurements b/patches.suse/s390-cpum_cf-add-minimal-in-kernel-interface-for-counter-measurements
new file mode 100644
index 0000000000..18ffa6f2cd
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-add-minimal-in-kernel-interface-for-counter-measurements
@@ -0,0 +1,87 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 8 Aug 2018 10:38:43 +0200
+Subject: s390/cpum_cf: Add minimal in-kernel interface for counter measurements
+Git-commit: 17bebcc68eeea3e1189f712dcba39809ad0d7a86
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Introduce a minimal interface for doing counter measurements of small
+units of work within the kernel. Use the kernel_cpumcf_begin() function
+start a measurement session and, later, stop it with kernel_cpumcf_end().
+
+During the measreument session, you can enable and start/stop counter sets
+by using ctr_set_* functions. To make these changes effective use the
+lcctl() function. You can then use the ecctr() function to extract counters
+from the different counter sets.
+Please note that you have to check whether the counter sets to be enabled
+are authorized.
+
+Note that when a measurement session is active, other users cannot perform
+counter measurements. In such cases, kernel_cpumcf_begin() indicates this
+with returning -EBUSY. If the counter facility is not available,
+kernel_cpumcf_begin() returns -ENODEV.
+
+Note that this interface is restricted to the current CPU and, thus,
+preemption must be turned off.
+
+Example:
+
+ u32 state, err;
+ u64 cycles, insn;
+
+ err = kernel_cpumcf_begin();
+ if (err)
+ goto out_busy;
+
+ state = 0;
+ ctr_set_enable(&state, CPUMF_CTR_SET_BASIC);
+ ctr_set_start(&state, CPUMF_CTR_SET_BASIC);
+
+ err = lcctl(state);
+ if (err)
+ goto ;
+
+ /* ... do your work ... */
+
+ ctr_set_stop(&state, CPUMF_CTR_SET_BASIC);
+ err = lcctl(state);
+ if (err)
+ goto out;
+
+ cycles = insn = 0;
+ ecctr(0, &cycles);
+ ecctr(1, &insn);
+
+ /* ... */
+
+ kernel_cpumcf_end();
+out_busy:
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -63,4 +63,18 @@ int __kernel_cpumcf_begin(void);
+ unsigned long kernel_cpumcf_alert(int clear);
+ void __kernel_cpumcf_end(void);
+
++static inline int kernel_cpumcf_begin(void)
++{
++ if (!cpum_cf_avail())
++ return -ENODEV;
++
++ preempt_disable();
++ return __kernel_cpumcf_begin();
++}
++static inline void kernel_cpumcf_end(void)
++{
++ __kernel_cpumcf_end();
++ preempt_enable();
++}
++
+ #endif /* _ASM_S390_CPU_MCF_H */
diff --git a/patches.suse/s390-cpum_cf-add-support-for-cpu-mf-svn-6 b/patches.suse/s390-cpum_cf-add-support-for-cpu-mf-svn-6
new file mode 100644
index 0000000000..2120ccfbff
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-add-support-for-cpu-mf-svn-6
@@ -0,0 +1,164 @@
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Thu, 28 Mar 2019 11:21:47 +0100
+Subject: s390/cpum_cf: Add support for CPU-MF SVN 6
+Git-commit: 46a984ffb86c8542fa510656fa8cb33befe8ee8f
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Add support for the CPU-Measurement Facility counter
+second version number 6. This number is used to detect some
+more counters in the crypto counter set and the extended
+counter set.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/perf_cpum_cf.c | 4 -
+ arch/s390/kernel/perf_cpum_cf_events.c | 107 ++++++++++++++++++++++-----------
+ 2 files changed, 75 insertions(+), 36 deletions(-)
+
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -1,8 +1,8 @@
+ /*
+ * Performance event support for s390x - CPU-measurement Counter Facility
+ *
+- * Copyright IBM Corp. 2012, 2017
+- * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
++ * Copyright IBM Corp. 2012, 2019
++ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+--- a/arch/s390/kernel/perf_cpum_cf_events.c
++++ b/arch/s390/kernel/perf_cpum_cf_events.c
+@@ -29,22 +29,26 @@ CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_
+ CPUMF_EVENT_ATTR(cf_fvn3, PROBLEM_STATE_INSTRUCTIONS, 0x0021);
+ CPUMF_EVENT_ATTR(cf_fvn3, L1D_DIR_WRITES, 0x0004);
+ CPUMF_EVENT_ATTR(cf_fvn3, L1D_PENALTY_CYCLES, 0x0005);
+-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_FUNCTIONS, 0x0040);
+-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_CYCLES, 0x0041);
+-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS, 0x0042);
+-CPUMF_EVENT_ATTR(cf_svn_generic, PRNG_BLOCKED_CYCLES, 0x0043);
+-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_FUNCTIONS, 0x0044);
+-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_CYCLES, 0x0045);
+-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS, 0x0046);
+-CPUMF_EVENT_ATTR(cf_svn_generic, SHA_BLOCKED_CYCLES, 0x0047);
+-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_FUNCTIONS, 0x0048);
+-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_CYCLES, 0x0049);
+-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS, 0x004a);
+-CPUMF_EVENT_ATTR(cf_svn_generic, DEA_BLOCKED_CYCLES, 0x004b);
+-CPUMF_EVENT_ATTR(cf_svn_generic, AES_FUNCTIONS, 0x004c);
+-CPUMF_EVENT_ATTR(cf_svn_generic, AES_CYCLES, 0x004d);
+-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS, 0x004e);
+-CPUMF_EVENT_ATTR(cf_svn_generic, AES_BLOCKED_CYCLES, 0x004f);
++CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_FUNCTIONS, 0x0040);
++CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_CYCLES, 0x0041);
++CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS, 0x0042);
++CPUMF_EVENT_ATTR(cf_svn_12345, PRNG_BLOCKED_CYCLES, 0x0043);
++CPUMF_EVENT_ATTR(cf_svn_12345, SHA_FUNCTIONS, 0x0044);
++CPUMF_EVENT_ATTR(cf_svn_12345, SHA_CYCLES, 0x0045);
++CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS, 0x0046);
++CPUMF_EVENT_ATTR(cf_svn_12345, SHA_BLOCKED_CYCLES, 0x0047);
++CPUMF_EVENT_ATTR(cf_svn_12345, DEA_FUNCTIONS, 0x0048);
++CPUMF_EVENT_ATTR(cf_svn_12345, DEA_CYCLES, 0x0049);
++CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS, 0x004a);
++CPUMF_EVENT_ATTR(cf_svn_12345, DEA_BLOCKED_CYCLES, 0x004b);
++CPUMF_EVENT_ATTR(cf_svn_12345, AES_FUNCTIONS, 0x004c);
++CPUMF_EVENT_ATTR(cf_svn_12345, AES_CYCLES, 0x004d);
++CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS, 0x004e);
++CPUMF_EVENT_ATTR(cf_svn_12345, AES_BLOCKED_CYCLES, 0x004f);
++CPUMF_EVENT_ATTR(cf_svn_6, ECC_FUNCTION_COUNT, 0x0050);
++CPUMF_EVENT_ATTR(cf_svn_6, ECC_CYCLES_COUNT, 0x0051);
++CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT, 0x0052);
++CPUMF_EVENT_ATTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT, 0x0053);
+ CPUMF_EVENT_ATTR(cf_z10, L1I_L2_SOURCED_WRITES, 0x0080);
+ CPUMF_EVENT_ATTR(cf_z10, L1D_L2_SOURCED_WRITES, 0x0081);
+ CPUMF_EVENT_ATTR(cf_z10, L1I_L3_LOCAL_WRITES, 0x0082);
+@@ -260,23 +264,47 @@ static struct attribute *cpumcf_fvn3_pmu
+ NULL,
+ };
+
+-static struct attribute *cpumcf_svn_generic_pmu_event_attr[] __initdata = {
+- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, PRNG_BLOCKED_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, SHA_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, SHA_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, SHA_BLOCKED_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, DEA_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, DEA_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, DEA_BLOCKED_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, AES_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, AES_CYCLES),
+- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_FUNCTIONS),
+- CPUMF_EVENT_PTR(cf_svn_generic, AES_BLOCKED_CYCLES),
++static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
++ NULL,
++};
++
++static struct attribute *cpumcf_svn_6_pmu_event_attr[] __initdata = {
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, SHA_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, DEA_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_FUNCTIONS),
++ CPUMF_EVENT_PTR(cf_svn_12345, AES_BLOCKED_CYCLES),
++ CPUMF_EVENT_PTR(cf_svn_6, ECC_FUNCTION_COUNT),
++ CPUMF_EVENT_PTR(cf_svn_6, ECC_CYCLES_COUNT),
++ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_FUNCTION_COUNT),
++ CPUMF_EVENT_PTR(cf_svn_6, ECC_BLOCKED_CYCLES_COUNT),
+ NULL,
+ };
+
+@@ -560,7 +588,18 @@ __init const struct attribute_group **cp
+ default:
+ cfvn = none;
+ }
+- csvn = cpumcf_svn_generic_pmu_event_attr;
++
++ /* Determine version specific crypto set */
++ switch (ci.csvn) {
++ case 1 ... 5:
++ csvn = cpumcf_svn_12345_pmu_event_attr;
++ break;
++ case 6:
++ csvn = cpumcf_svn_6_pmu_event_attr;
++ break;
++ default:
++ csvn = none;
++ }
+
+ /* Determine model-specific counter set(s) */
+ get_cpu_id(&cpu_id);
diff --git a/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_alert-to-obtain-measurement-alerts b/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_alert-to-obtain-measurement-alerts
new file mode 100644
index 0000000000..63f14990a7
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_alert-to-obtain-measurement-alerts
@@ -0,0 +1,80 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 8 Aug 2018 10:30:37 +0200
+Subject: s390/cpum_cf: introduce kernel_cpumcf_alert() to obtain measurement
+ alerts
+Git-commit: 26b8317f51a20c1e4f61fbd2cc68975faad10b02
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+During a __kernel_cpumcf_begin()/end() session, save measurement alerts
+for the counter facility in the per-CPU cpu_cf_events variable.
+Users can obtain and, optionally, clear the alerts by calling
+kernel_cpumcf_alert() to specifically handle alerts.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 2 ++
+ arch/s390/kernel/perf_cpum_cf.c | 18 ++++++++++++++++++
+ 2 files changed, 20 insertions(+)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -52,6 +52,7 @@ static inline void ctr_set_stop(u64 *sta
+ struct cpu_cf_events {
+ struct cpumf_ctr_info info;
+ atomic_t ctr_set[CPUMF_CTR_SET_MAX];
++ atomic64_t alert;
+ u64 state, tx_state;
+ unsigned int flags;
+ unsigned int txn_flags;
+@@ -59,6 +60,7 @@ struct cpu_cf_events {
+ DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
+
+ int __kernel_cpumcf_begin(void);
++unsigned long kernel_cpumcf_alert(int clear);
+ void __kernel_cpumcf_end(void);
+
+ #endif /* _ASM_S390_CPU_MCF_H */
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -30,6 +30,7 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu
+ [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
+ },
++ .alert = ATOMIC64_INIT(0),
+ .state = 0,
+ .flags = 0,
+ .txn_flags = 0,
+@@ -208,6 +209,9 @@ static void cpumf_measurement_alert(stru
+ if (alert & CPU_MF_INT_CF_MTDA)
+ pr_warn("CPU[%i] MT counter data was lost\n",
+ smp_processor_id());
++
++ /* store alert for special handling by in-kernel users */
++ atomic64_or(alert, &cpuhw->alert);
+ }
+
+ #define PMC_INIT 0
+@@ -258,6 +262,20 @@ int __kernel_cpumcf_begin(void)
+ }
+ EXPORT_SYMBOL(__kernel_cpumcf_begin);
+
++/* Obtain the CPU-measurement alerts for the counter facility */
++unsigned long kernel_cpumcf_alert(int clear)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ unsigned long alert;
++
++ alert = atomic64_read(&cpuhw->alert);
++ if (clear)
++ atomic64_set(&cpuhw->alert, 0);
++
++ return alert;
++}
++EXPORT_SYMBOL(kernel_cpumcf_alert);
++
+ /* Release the CPU-measurement counter facility */
+ void __kernel_cpumcf_end(void)
+ {
diff --git a/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_avail-function b/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_avail-function
new file mode 100644
index 0000000000..01098831af
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_avail-function
@@ -0,0 +1,71 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Thu, 25 Oct 2018 16:58:15 +0200
+Subject: s390/cpum_cf: introduce kernel_cpumcf_avail() function
+Git-commit: 869f4f98fafadddb3a871b9513f24a44479d8296
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+A preparation to move out common CPU-MF counter facility support
+functions, first introduce a function that indicates whether the
+support is ready to use.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 1 +
+ arch/s390/kernel/perf_cpum_cf.c | 20 +++++++++++++++++---
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -59,6 +59,7 @@ struct cpu_cf_events {
+ };
+ DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
+
++bool kernel_cpumcf_avail(void);
+ int __kernel_cpumcf_begin(void);
+ unsigned long kernel_cpumcf_alert(int clear);
+ void __kernel_cpumcf_end(void);
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -36,6 +36,9 @@ DEFINE_PER_CPU(struct cpu_cf_events, cpu
+ .txn_flags = 0,
+ };
+
++/* Indicator whether the CPU-Measurement Counter Facility Support is ready */
++static bool cpum_cf_initalized;
++
+ static enum cpumf_ctr_set get_counter_set(u64 event)
+ {
+ int set = CPUMF_CTR_SET_MAX;
+@@ -236,6 +239,12 @@ static void setup_pmc_cpu(void *flags)
+ lcctl(0);
+ }
+
++bool kernel_cpumcf_avail(void)
++{
++ return cpum_cf_initalized;
++}
++EXPORT_SYMBOL(kernel_cpumcf_avail);
++
+ /* Reserve/release functions for sharing perf hardware */
+ static DEFINE_SPINLOCK(cpumcf_owner_lock);
+ static void *cpumcf_owner;
+@@ -712,8 +721,13 @@ static int __init cpumf_pmu_init(void)
+ cpumf_measurement_alert);
+ return rc;
+ }
+- return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
+- "perf/s390/cf:online",
+- s390_pmu_online_cpu, s390_pmu_offline_cpu);
++
++ rc = cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
++ "perf/s390/cf:online",
++ s390_pmu_online_cpu, s390_pmu_offline_cpu);
++ if (!rc)
++ cpum_cf_initalized = true;
++
++ return rc;
+ }
+ early_initcall(cpumf_pmu_init);
diff --git a/patches.suse/s390-cpum_cf-move-counter-set-controls-to-a-new-header-file b/patches.suse/s390-cpum_cf-move-counter-set-controls-to-a-new-header-file
new file mode 100644
index 0000000000..d77ed79594
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-move-counter-set-controls-to-a-new-header-file
@@ -0,0 +1,164 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Mon, 6 Aug 2018 15:56:43 +0200
+Subject: s390/cpum_cf: move counter set controls to a new header file
+Git-commit: 30e145f811428e1a96738fbc1f98ccc91f412d93
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Move counter set specific controls and functions to the asm/cpu_mcf.h
+header file containg all counter facility support definitions. Also
+adapt few variable names and header file includes. No functional changes.
+
+[ ptesarik: Do not remove #include <asm/cpu_mf.h> from
+ arch/s390/include/asm/perf_event.h. It is still needed for struct
+ sf_raw_sample, because SLE15-SP1 does not contain commit
+ 3d43b981eb841a9493717e6d509f59553dbe8c7a ]
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 52 +++++++++++++++++++++++++++++++++
+ arch/s390/kernel/perf_cpum_cf.c | 45 +---------------------------
+ arch/s390/kernel/perf_cpum_cf_events.c | 1
+ 3 files changed, 56 insertions(+), 42 deletions(-)
+
+--- /dev/null
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -0,0 +1,52 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Counter facility support definitions for the Linux perf
++ *
++ * Copyright IBM Corp. 2019
++ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
++ */
++#ifndef _ASM_S390_CPU_MCF_H
++#define _ASM_S390_CPU_MCF_H
++
++#include <linux/perf_event.h>
++#include <asm/cpu_mf.h>
++
++enum cpumf_ctr_set {
++ CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
++ CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
++ CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
++ CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
++ CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
++
++ /* Maximum number of counter sets */
++ CPUMF_CTR_SET_MAX,
++};
++
++#define CPUMF_LCCTL_ENABLE_SHIFT 16
++#define CPUMF_LCCTL_ACTCTL_SHIFT 0
++static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
++ [CPUMF_CTR_SET_BASIC] = 0x02,
++ [CPUMF_CTR_SET_USER] = 0x04,
++ [CPUMF_CTR_SET_CRYPTO] = 0x08,
++ [CPUMF_CTR_SET_EXT] = 0x01,
++ [CPUMF_CTR_SET_MT_DIAG] = 0x20,
++};
++
++static inline void ctr_set_enable(u64 *state, int ctr_set)
++{
++ *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
++}
++static inline void ctr_set_disable(u64 *state, int ctr_set)
++{
++ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
++}
++static inline void ctr_set_start(u64 *state, int ctr_set)
++{
++ *state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
++}
++static inline void ctr_set_stop(u64 *state, int ctr_set)
++{
++ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
++}
++
++#endif /* _ASM_S390_CPU_MCF_H */
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -13,52 +13,13 @@
+
+ #include <linux/kernel.h>
+ #include <linux/kernel_stat.h>
+-#include <linux/perf_event.h>
+ #include <linux/percpu.h>
+ #include <linux/notifier.h>
+ #include <linux/init.h>
+ #include <linux/export.h>
+ #include <asm/ctl_reg.h>
+ #include <asm/irq.h>
+-#include <asm/cpu_mf.h>
+-
+-enum cpumf_ctr_set {
+- CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
+- CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
+- CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
+- CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
+- CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
+-
+- /* Maximum number of counter sets */
+- CPUMF_CTR_SET_MAX,
+-};
+-
+-#define CPUMF_LCCTL_ENABLE_SHIFT 16
+-#define CPUMF_LCCTL_ACTCTL_SHIFT 0
+-static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
+- [CPUMF_CTR_SET_BASIC] = 0x02,
+- [CPUMF_CTR_SET_USER] = 0x04,
+- [CPUMF_CTR_SET_CRYPTO] = 0x08,
+- [CPUMF_CTR_SET_EXT] = 0x01,
+- [CPUMF_CTR_SET_MT_DIAG] = 0x20,
+-};
+-
+-static void ctr_set_enable(u64 *state, int ctr_set)
+-{
+- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
+-}
+-static void ctr_set_disable(u64 *state, int ctr_set)
+-{
+- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
+-}
+-static void ctr_set_start(u64 *state, int ctr_set)
+-{
+- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
+-}
+-static void ctr_set_stop(u64 *state, int ctr_set)
+-{
+- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+-}
++#include <asm/cpu_mcf.h>
+
+ /* Local CPUMF event structure */
+ struct cpu_hw_events {
+@@ -138,7 +99,7 @@ static int validate_ctr_version(const st
+ * Thus, the counters can only be used if SMT is on and the
+ * counter set is enabled and active.
+ */
+- mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG];
++ mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
+ if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
+ (cpuhw->info.enable_ctl & mtdiag_ctl) &&
+ (cpuhw->info.act_ctl & mtdiag_ctl)))
+@@ -163,7 +124,7 @@ static int validate_ctr_auth(const struc
+ * return with -ENOENT in order to fall back to other
+ * PMUs that might suffice the event request.
+ */
+- ctrs_state = cpumf_state_ctl[hwc->config_base];
++ ctrs_state = cpumf_ctr_ctl[hwc->config_base];
+ if (!(ctrs_state & cpuhw->info.auth_ctl))
+ err = -ENOENT;
+
+--- a/arch/s390/kernel/perf_cpum_cf_events.c
++++ b/arch/s390/kernel/perf_cpum_cf_events.c
+@@ -5,6 +5,7 @@
+
+ #include <linux/slab.h>
+ #include <linux/perf_event.h>
++#include <asm/cpu_mf.h>
+
+
+ /* BEGIN: CPUM_CF COUNTER DEFINITIONS =================================== */
diff --git a/patches.suse/s390-cpum_cf-prepare-for-in-kernel-counter-measurements b/patches.suse/s390-cpum_cf-prepare-for-in-kernel-counter-measurements
new file mode 100644
index 0000000000..cc26918ccc
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-prepare-for-in-kernel-counter-measurements
@@ -0,0 +1,101 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Mon, 6 Aug 2018 17:43:07 +0200
+Subject: s390/cpum_cf: prepare for in-kernel counter measurements
+Git-commit: 3d33345aa3d9ab2ee9b5a5bf2a8842c43603d537
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Prepare the counter facility support to be used by other in-kernel
+users. The first step introduces the __kernel_cpumcf_begin() and
+__kernel_cpumcf_end() functions to reserve the counter facility
+for doing measurements and to release after the measurements are
+done.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 3 +++
+ arch/s390/kernel/perf_cpum_cf.c | 32 ++++++++++++++++++++++++++------
+ 2 files changed, 29 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -49,4 +49,7 @@ static inline void ctr_set_stop(u64 *sta
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+ }
+
++int __kernel_cpumcf_begin(void);
++void __kernel_cpumcf_end(void);
++
+ #endif /* _ASM_S390_CPU_MCF_H */
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -239,25 +239,45 @@ static void setup_pmc_cpu(void *flags)
+ lcctl(0);
+ }
+
+-/* Initialize the CPU-measurement facility */
+-static int reserve_pmc_hardware(void)
++/* Reserve/release functions for sharing perf hardware */
++static DEFINE_SPINLOCK(cpumcf_owner_lock);
++static void *cpumcf_owner;
++
++/* Initialize the CPU-measurement counter facility */
++int __kernel_cpumcf_begin(void)
+ {
+ int flags = PMC_INIT;
++ int err = 0;
++
++ spin_lock(&cpumcf_owner_lock);
++ if (cpumcf_owner)
++ err = -EBUSY;
++ else
++ cpumcf_owner = __builtin_return_address(0);
++ spin_unlock(&cpumcf_owner_lock);
++ if (err)
++ return err;
+
+ on_each_cpu(setup_pmc_cpu, &flags, 1);
+ irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+
+ return 0;
+ }
++EXPORT_SYMBOL(__kernel_cpumcf_begin);
+
+-/* Release the CPU-measurement facility */
+-static void release_pmc_hardware(void)
++/* Release the CPU-measurement counter facility */
++void __kernel_cpumcf_end(void)
+ {
+ int flags = PMC_RELEASE;
+
+ on_each_cpu(setup_pmc_cpu, &flags, 1);
+ irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
++
++ spin_lock(&cpumcf_owner_lock);
++ cpumcf_owner = NULL;
++ spin_unlock(&cpumcf_owner_lock);
+ }
++EXPORT_SYMBOL(__kernel_cpumcf_end);
+
+ /* Release the PMU if event is the last perf event */
+ static void hw_perf_event_destroy(struct perf_event *event)
+@@ -265,7 +285,7 @@ static void hw_perf_event_destroy(struct
+ if (!atomic_add_unless(&num_events, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_events) == 0)
+- release_pmc_hardware();
++ __kernel_cpumcf_end();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ }
+@@ -366,7 +386,7 @@ static int __hw_perf_event_init(struct p
+ /* Initialize for using the CPU-measurement counter facility */
+ if (!atomic_inc_not_zero(&num_events)) {
+ mutex_lock(&pmc_reserve_mutex);
+- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
++ if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
+ err = -EBUSY;
+ else
+ atomic_inc(&num_events);
diff --git a/patches.suse/s390-cpum_cf-rename-per-cpu-counter-facility-structure-and-variables b/patches.suse/s390-cpum_cf-rename-per-cpu-counter-facility-structure-and-variables
new file mode 100644
index 0000000000..08660ddecb
--- /dev/null
+++ b/patches.suse/s390-cpum_cf-rename-per-cpu-counter-facility-structure-and-variables
@@ -0,0 +1,186 @@
+From: Hendrik Brueckner <brueckner@linux.ibm.com>
+Date: Wed, 8 Aug 2018 10:04:23 +0200
+Subject: s390/cpum_cf: rename per-CPU counter facility structure and variables
+Git-commit: f1c0b83173e59c34daec48de92c0c2934e7417b2
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Rename the struct cpu_hw_events to cpu_cf_events and also the respective
+per-CPU variable to make its name more clear. No functional changes.
+
+Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/perf_cpum_cf.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+--- a/arch/s390/kernel/perf_cpum_cf.c
++++ b/arch/s390/kernel/perf_cpum_cf.c
+@@ -22,14 +22,14 @@
+ #include <asm/cpu_mcf.h>
+
+ /* Local CPUMF event structure */
+-struct cpu_hw_events {
++struct cpu_cf_events {
+ struct cpumf_ctr_info info;
+ atomic_t ctr_set[CPUMF_CTR_SET_MAX];
+ u64 state, tx_state;
+ unsigned int flags;
+ unsigned int txn_flags;
+ };
+-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
++static DEFINE_PER_CPU(struct cpu_cf_events, cpu_cf_events) = {
+ .ctr_set = {
+ [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
+ [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
+@@ -62,11 +62,11 @@ static enum cpumf_ctr_set get_counter_se
+
+ static int validate_ctr_version(const struct hw_perf_event *hwc)
+ {
+- struct cpu_hw_events *cpuhw;
++ struct cpu_cf_events *cpuhw;
+ int err = 0;
+ u16 mtdiag_ctl;
+
+- cpuhw = &get_cpu_var(cpu_hw_events);
++ cpuhw = &get_cpu_var(cpu_cf_events);
+
+ /* check required version for counter sets */
+ switch (hwc->config_base) {
+@@ -107,17 +107,17 @@ static int validate_ctr_version(const st
+ break;
+ }
+
+- put_cpu_var(cpu_hw_events);
++ put_cpu_var(cpu_cf_events);
+ return err;
+ }
+
+ static int validate_ctr_auth(const struct hw_perf_event *hwc)
+ {
+- struct cpu_hw_events *cpuhw;
++ struct cpu_cf_events *cpuhw;
+ u64 ctrs_state;
+ int err = 0;
+
+- cpuhw = &get_cpu_var(cpu_hw_events);
++ cpuhw = &get_cpu_var(cpu_cf_events);
+
+ /* Check authorization for cpu counter sets.
+ * If the particular CPU counter set is not authorized,
+@@ -128,7 +128,7 @@ static int validate_ctr_auth(const struc
+ if (!(ctrs_state & cpuhw->info.auth_ctl))
+ err = -ENOENT;
+
+- put_cpu_var(cpu_hw_events);
++ put_cpu_var(cpu_cf_events);
+ return err;
+ }
+
+@@ -139,7 +139,7 @@ static int validate_ctr_auth(const struc
+ */
+ static void cpumf_pmu_enable(struct pmu *pmu)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int err;
+
+ if (cpuhw->flags & PMU_F_ENABLED)
+@@ -162,7 +162,7 @@ static void cpumf_pmu_enable(struct pmu
+ */
+ static void cpumf_pmu_disable(struct pmu *pmu)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ int err;
+ u64 inactive;
+
+@@ -190,13 +190,13 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
+ static void cpumf_measurement_alert(struct ext_code ext_code,
+ unsigned int alert, unsigned long unused)
+ {
+- struct cpu_hw_events *cpuhw;
++ struct cpu_cf_events *cpuhw;
+
+ if (!(alert & CPU_MF_INT_CF_MASK))
+ return;
+
+ inc_irq_stat(IRQEXT_CMC);
+- cpuhw = this_cpu_ptr(&cpu_hw_events);
++ cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ /* Measurement alerts are shared and might happen when the PMU
+ * is not reserved. Ignore these alerts in this case. */
+@@ -221,7 +221,7 @@ static void cpumf_measurement_alert(stru
+ #define PMC_RELEASE 1
+ static void setup_pmc_cpu(void *flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ switch (*((int *) flags)) {
+ case PMC_INIT:
+@@ -472,7 +472,7 @@ static void cpumf_pmu_read(struct perf_e
+
+ static void cpumf_pmu_start(struct perf_event *event, int flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
+@@ -503,7 +503,7 @@ static void cpumf_pmu_start(struct perf_
+
+ static void cpumf_pmu_stop(struct perf_event *event, int flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+@@ -524,7 +524,7 @@ static void cpumf_pmu_stop(struct perf_e
+
+ static int cpumf_pmu_add(struct perf_event *event, int flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ /* Check authorization for the counter set to which this
+ * counter belongs.
+@@ -548,7 +548,7 @@ static int cpumf_pmu_add(struct perf_eve
+
+ static void cpumf_pmu_del(struct perf_event *event, int flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ cpumf_pmu_stop(event, PERF_EF_UPDATE);
+
+@@ -576,7 +576,7 @@ static void cpumf_pmu_del(struct perf_ev
+ */
+ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
+
+@@ -596,7 +596,7 @@ static void cpumf_pmu_start_txn(struct p
+ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
+ {
+ unsigned int txn_flags;
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+
+ WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
+
+@@ -617,7 +617,7 @@ static void cpumf_pmu_cancel_txn(struct
+ */
+ static int cpumf_pmu_commit_txn(struct pmu *pmu)
+ {
+- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ u64 state;
+
+ WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
diff --git a/patches.suse/s390-cpum_cf_diag-add-support-for-cpu-mf-svn-6 b/patches.suse/s390-cpum_cf_diag-add-support-for-cpu-mf-svn-6
new file mode 100644
index 0000000000..96888939cb
--- /dev/null
+++ b/patches.suse/s390-cpum_cf_diag-add-support-for-cpu-mf-svn-6
@@ -0,0 +1,48 @@
+From: Thomas-Mich Richter <tmricht@linux.ibm.com>
+Date: Tue, 23 Apr 2019 11:36:27 +0200
+Subject: s390/cpum_cf_diag: Add support for CPU-MF SVN 6
+Git-commit: 1c410fd6a561af452aba282b1cd3cabef2080d72
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Add support for the CPU-Measurement Facility counter
+second version number 6. This number is used to detect some
+more counters in the crypto counter set and the extended
+counter set.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/perf_cpum_cf_diag.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/arch/s390/kernel/perf_cpum_cf_diag.c b/arch/s390/kernel/perf_cpum_cf_diag.c
+index b6854812d2ed..d4e031f7b9c8 100644
+--- a/arch/s390/kernel/perf_cpum_cf_diag.c
++++ b/arch/s390/kernel/perf_cpum_cf_diag.c
+@@ -306,15 +306,20 @@ static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
+ ctrset_size = 2;
+ break;
+ case CPUMF_CTR_SET_CRYPTO:
+- ctrset_size = 16;
++ if (info->csvn >= 1 && info->csvn <= 5)
++ ctrset_size = 16;
++ else if (info->csvn == 6)
++ ctrset_size = 20;
+ break;
+ case CPUMF_CTR_SET_EXT:
+ if (info->csvn == 1)
+ ctrset_size = 32;
+ else if (info->csvn == 2)
+ ctrset_size = 48;
+- else if (info->csvn >= 3)
++ else if (info->csvn >= 3 && info->csvn <= 5)
+ ctrset_size = 128;
++ else if (info->csvn == 6)
++ ctrset_size = 160;
+ break;
+ case CPUMF_CTR_SET_MT_DIAG:
+ if (info->csvn > 3)
+
diff --git a/patches.suse/s390-cpum_cf_diag-add-support-for-s390-counter-facility-diagnostic-trace b/patches.suse/s390-cpum_cf_diag-add-support-for-s390-counter-facility-diagnostic-trace
new file mode 100644
index 0000000000..3a9710a66a
--- /dev/null
+++ b/patches.suse/s390-cpum_cf_diag-add-support-for-s390-counter-facility-diagnostic-trace
@@ -0,0 +1,798 @@
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Mon, 29 Oct 2018 13:16:38 +0000
+Subject: s390/cpum_cf_diag: Add support for s390 counter facility diagnostic
+ trace
+Git-commit: fe5908bccc565f85cab025695627678cf257f91e
+Patch-mainline: v5.1-rc1
+References: jsc#SLE-6904 FATE#327581
+
+Introduce a PMU device named cpum_cf_diag. It extracts the
+values of all counters in all authorized counter sets and stores
+them as event raw data. This is done with the STORE CPU COUNTER
+MULTIPLE instruction to speed up access. All counter sets
+fit into one buffer. The values of each counter are taken
+when the event is started on the performance sub-system and when
+the event is stopped.
+This results in counter values available at the start and
+at the end of the measurement time frame. The difference is
+calculated for each counter. The differences of all
+counters are then saved as event raw data in the perf.data
+file.
+
+The counter values are accompanied by the time stamps
+when the counter set was started and when the counter set
+was stopped. This data is part of a trailer entry which
+describes the time frame, counter set version numbers,
+CPU speed, and machine type for later analysis.
+
+[ ptesarik: Use sched_clock_base_cc instead of &tod_clock_base[1],
+ because SLE15-SP1 does not contain a backport of upstream commit
+ 6e2ef5e4f6cc57344762932d70d38ba4ec65fa8b ]
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/cpu_mcf.h | 26 +
+ arch/s390/include/asm/perf_event.h | 1
+ arch/s390/kernel/Makefile | 1
+ arch/s390/kernel/perf_cpum_cf_diag.c | 693 +++++++++++++++++++++++++++++++++++
+ 4 files changed, 721 insertions(+)
+
+--- a/arch/s390/include/asm/cpu_mcf.h
++++ b/arch/s390/include/asm/cpu_mcf.h
+@@ -49,6 +49,26 @@ static inline void ctr_set_stop(u64 *sta
+ *state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
+ }
+
++static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets)
++{
++ *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
++}
++
++static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets)
++{
++ *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
++}
++
++static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets)
++{
++ *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
++}
++
++static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets)
++{
++ *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
++}
++
+ static inline int ctr_stcctm(enum cpumf_ctr_set set, u64 range, u64 *dest)
+ {
+ switch (set) {
+@@ -97,4 +117,10 @@ static inline void kernel_cpumcf_end(voi
+ preempt_enable();
+ }
+
++/* Return true if store counter set multiple instruction is available */
++static inline int stccm_avail(void)
++{
++ return test_facility(142);
++}
++
+ #endif /* _ASM_S390_CPU_MCF_H */
+--- a/arch/s390/include/asm/perf_event.h
++++ b/arch/s390/include/asm/perf_event.h
+@@ -52,6 +52,7 @@ struct perf_sf_sde_regs {
+ #define PERF_CPUM_SF_MAX_CTR 2
+ #define PERF_EVENT_CPUM_SF 0xB0000UL /* Event: Basic-sampling */
+ #define PERF_EVENT_CPUM_SF_DIAG 0xBD000UL /* Event: Combined-sampling */
++#define PERF_EVENT_CPUM_CF_DIAG 0xBC000UL /* Event: Counter sets */
+ #define PERF_CPUM_SF_BASIC_MODE 0x0001 /* Basic-sampling flag */
+ #define PERF_CPUM_SF_DIAG_MODE 0x0002 /* Diagnostic-sampling flag */
+ #define PERF_CPUM_SF_MODE_MASK (PERF_CPUM_SF_BASIC_MODE| \
+--- a/arch/s390/kernel/Makefile
++++ b/arch/s390/kernel/Makefile
+@@ -82,6 +82,7 @@ obj-$(CONFIG_UPROBES) += uprobes.o
+
+ obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf.o perf_cpum_sf.o
+ obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o
++obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
+
+ obj-$(CONFIG_TRACEPOINTS) += trace.o
+
+--- /dev/null
++++ b/arch/s390/kernel/perf_cpum_cf_diag.c
+@@ -0,0 +1,693 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Performance event support for s390x - CPU-measurement Counter Sets
++ *
++ * Copyright IBM Corp. 2019
++ * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com>
++ * Thomas Richer <tmricht@linux.ibm.com>
++ */
++#define KMSG_COMPONENT "cpum_cf_diag"
++#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
++
++#include <linux/kernel.h>
++#include <linux/kernel_stat.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/init.h>
++#include <linux/export.h>
++#include <linux/slab.h>
++#include <linux/processor.h>
++
++#include <asm/ctl_reg.h>
++#include <asm/irq.h>
++#include <asm/cpu_mcf.h>
++#include <asm/timex.h>
++#include <asm/debug.h>
++
++#define CF_DIAG_CTRSET_DEF 0xfeef /* Counter set header mark */
++
++static unsigned int cf_diag_cpu_speed;
++static debug_info_t *cf_diag_dbg;
++
++struct cf_diag_csd { /* Counter set data per CPU */
++ size_t used; /* Bytes used in data/start */
++ unsigned char start[PAGE_SIZE]; /* Counter set at event start */
++ unsigned char data[PAGE_SIZE]; /* Counter set at event delete */
++};
++DEFINE_PER_CPU(struct cf_diag_csd, cf_diag_csd);
++
++/* Counter sets are stored as data stream in a page sized memory buffer and
++ * exported to user space via raw data attached to the event sample data.
++ * Each counter set starts with an eight byte header consisting of:
++ * - a two byte eye catcher (0xfeef)
++ * - a one byte counter set number
++ * - a two byte counter set size (indicates the number of counters in this set)
++ * - a three byte reserved value (must be zero) to make the header the same
++ * size as a counter value.
++ * All counter values are eight byte in size.
++ *
++ * All counter sets are followed by a 64 byte trailer.
++ * The trailer consists of a:
++ * - flag field indicating valid fields when corresponding bit set
++ * - the counter facility first and second version number
++ * - the CPU speed if nonzero
++ * - the time stamp the counter sets have been collected
++ * - the time of day (TOD) base value
++ * - the machine type.
++ *
++ * The counter sets are saved when the process is prepared to be executed on a
++ * CPU and saved again when the process is going to be removed from a CPU.
++ * The difference of both counter sets are calculated and stored in the event
++ * sample data area.
++ */
++
++struct cf_ctrset_entry { /* CPU-M CF counter set entry (8 byte) */
++ unsigned int def:16; /* 0-15 Data Entry Format */
++ unsigned int set:16; /* 16-31 Counter set identifier */
++ unsigned int ctr:16; /* 32-47 Number of stored counters */
++ unsigned int res1:16; /* 48-63 Reserved */
++};
++
++struct cf_trailer_entry { /* CPU-M CF_DIAG trailer (64 byte) */
++ /* 0 - 7 */
++ union {
++ struct {
++ unsigned int clock_base:1; /* TOD clock base set */
++ unsigned int speed:1; /* CPU speed set */
++ /* Measurement alerts */
++ unsigned int mtda:1; /* Loss of MT ctr. data alert */
++ unsigned int caca:1; /* Counter auth. change alert */
++ unsigned int lcda:1; /* Loss of counter data alert */
++ };
++ unsigned long flags; /* 0-63 All indicators */
++ };
++ /* 8 - 15 */
++ unsigned int cfvn:16; /* 64-79 Ctr First Version */
++ unsigned int csvn:16; /* 80-95 Ctr Second Version */
++ unsigned int cpu_speed:32; /* 96-127 CPU speed */
++ /* 16 - 23 */
++ unsigned long timestamp; /* 128-191 Timestamp (TOD) */
++ /* 24 - 55 */
++ union {
++ struct {
++ unsigned long progusage1;
++ unsigned long progusage2;
++ unsigned long progusage3;
++ unsigned long tod_base;
++ };
++ unsigned long progusage[4];
++ };
++ /* 56 - 63 */
++ unsigned int mach_type:16; /* Machine type */
++ unsigned int res1:16; /* Reserved */
++ unsigned int res2:32; /* Reserved */
++};
++
++/* Create the trailer data at the end of a page. */
++static void cf_diag_trailer(struct cf_trailer_entry *te)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ struct cpuid cpuid;
++
++ te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */
++ te->csvn = cpuhw->info.csvn;
++
++ get_cpu_id(&cpuid); /* Machine type */
++ te->mach_type = cpuid.machine;
++ te->cpu_speed = cf_diag_cpu_speed;
++ if (te->cpu_speed)
++ te->speed = 1;
++ te->clock_base = 1; /* Save clock base */
++ te->tod_base = sched_clock_base_cc;
++ store_tod_clock((__u64 *)&te->timestamp);
++}
++
++/*
++ * Change the CPUMF state to active.
++ * Enable and activate the CPU-counter sets according
++ * to the per-cpu control state.
++ */
++static void cf_diag_enable(struct pmu *pmu)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ int err;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s pmu %p cpu %d flags %#x state %#llx\n",
++ __func__, pmu, smp_processor_id(), cpuhw->flags,
++ cpuhw->state);
++ if (cpuhw->flags & PMU_F_ENABLED)
++ return;
++
++ err = lcctl(cpuhw->state);
++ if (err) {
++ pr_err("Enabling the performance measuring unit "
++ "failed with rc=%x\n", err);
++ return;
++ }
++ cpuhw->flags |= PMU_F_ENABLED;
++}
++
++/*
++ * Change the CPUMF state to inactive.
++ * Disable and enable (inactive) the CPU-counter sets according
++ * to the per-cpu control state.
++ */
++static void cf_diag_disable(struct pmu *pmu)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ u64 inactive;
++ int err;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s pmu %p cpu %d flags %#x state %#llx\n",
++ __func__, pmu, smp_processor_id(), cpuhw->flags,
++ cpuhw->state);
++ if (!(cpuhw->flags & PMU_F_ENABLED))
++ return;
++
++ inactive = cpuhw->state & ~((1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1);
++ err = lcctl(inactive);
++ if (err) {
++ pr_err("Disabling the performance measuring unit "
++ "failed with rc=%x\n", err);
++ return;
++ }
++ cpuhw->flags &= ~PMU_F_ENABLED;
++}
++
++/* Number of perf events counting hardware events */
++static atomic_t cf_diag_events = ATOMIC_INIT(0);
++
++/* Release the PMU if event is the last perf event */
++static void cf_diag_perf_event_destroy(struct perf_event *event)
++{
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d cf_diag_events %d\n",
++ __func__, event, event->cpu,
++ atomic_read(&cf_diag_events));
++ if (atomic_dec_return(&cf_diag_events) == 0)
++ __kernel_cpumcf_end();
++}
++
++/* Setup the event. Test for authorized counter sets and only include counter
++ * sets which are authorized at the time of the setup. Including unauthorized
++ * counter sets result in specification exception (and panic).
++ */
++static int __hw_perf_event_init(struct perf_event *event)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ struct perf_event_attr *attr = &event->attr;
++ enum cpumf_ctr_set i;
++ int err = 0;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d authorized %#x\n", __func__,
++ event, event->cpu, cpuhw->info.auth_ctl);
++
++ event->hw.config = attr->config;
++ event->hw.config_base = 0;
++ local64_set(&event->count, 0);
++
++ /* Add all authorized counter sets to config_base */
++ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
++ if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
++ event->hw.config_base |= cpumf_ctr_ctl[i];
++
++ /* No authorized counter sets, nothing to count/sample */
++ if (!event->hw.config_base) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ /* Set sample_period to indicate sampling */
++ event->hw.sample_period = attr->sample_period;
++ local64_set(&event->hw.period_left, event->hw.sample_period);
++ event->hw.last_period = event->hw.sample_period;
++out:
++ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
++ __func__, err, event->hw.config_base);
++ return err;
++}
++
++static int cf_diag_event_init(struct perf_event *event)
++{
++ struct perf_event_attr *attr = &event->attr;
++ int err = -ENOENT;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d config %#llx "
++ "sample_type %#llx cf_diag_events %d\n", __func__,
++ event, event->cpu, attr->config, attr->sample_type,
++ atomic_read(&cf_diag_events));
++
++ if (event->attr.config != PERF_EVENT_CPUM_CF_DIAG ||
++ event->attr.type != PERF_TYPE_RAW)
++ goto out;
++
++ /* Raw events are used to access counters directly,
++ * hence do not permit excludes.
++ * This event is usesless without PERF_SAMPLE_RAW to return counter set
++ * values as raw data.
++ */
++ if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv ||
++ !(attr->sample_type & (PERF_SAMPLE_CPU | PERF_SAMPLE_RAW))) {
++ err = -EOPNOTSUPP;
++ goto out;
++ }
++
++ /* Initialize for using the CPU-measurement counter facility */
++ if (atomic_inc_return(&cf_diag_events) == 1) {
++ if (__kernel_cpumcf_begin()) {
++ atomic_dec(&cf_diag_events);
++ err = -EBUSY;
++ goto out;
++ }
++ }
++ event->destroy = cf_diag_perf_event_destroy;
++
++ err = __hw_perf_event_init(event);
++ if (unlikely(err))
++ event->destroy(event);
++out:
++ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
++ return err;
++}
++
++static void cf_diag_read(struct perf_event *event)
++{
++ debug_sprintf_event(cf_diag_dbg, 5, "%s event %p\n", __func__, event);
++}
++
++/* Return the maximum possible counter set size (in number of 8 byte counters)
++ * depending on type and model number.
++ */
++static size_t cf_diag_ctrset_size(enum cpumf_ctr_set ctrset,
++ struct cpumf_ctr_info *info)
++{
++ size_t ctrset_size = 0;
++
++ switch (ctrset) {
++ case CPUMF_CTR_SET_BASIC:
++ if (info->cfvn >= 1)
++ ctrset_size = 6;
++ break;
++ case CPUMF_CTR_SET_USER:
++ if (info->cfvn == 1)
++ ctrset_size = 6;
++ else if (info->cfvn >= 3)
++ ctrset_size = 2;
++ break;
++ case CPUMF_CTR_SET_CRYPTO:
++ ctrset_size = 16;
++ break;
++ case CPUMF_CTR_SET_EXT:
++ if (info->csvn == 1)
++ ctrset_size = 32;
++ else if (info->csvn == 2)
++ ctrset_size = 48;
++ else if (info->csvn >= 3)
++ ctrset_size = 128;
++ break;
++ case CPUMF_CTR_SET_MT_DIAG:
++ if (info->csvn > 3)
++ ctrset_size = 48;
++ break;
++ case CPUMF_CTR_SET_MAX:
++ break;
++ }
++
++ return ctrset_size;
++}
++
++/* Calculate memory needed to store all counter sets together with header and
++ * trailer data. This is independend of the counter set authorization which
++ * can vary depending on the configuration.
++ */
++static size_t cf_diag_ctrset_maxsize(struct cpumf_ctr_info *info)
++{
++ size_t max_size = sizeof(struct cf_trailer_entry);
++ enum cpumf_ctr_set i;
++
++ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
++ size_t size = cf_diag_ctrset_size(i, info);
++
++ if (size)
++ max_size += size * sizeof(u64) +
++ sizeof(struct cf_ctrset_entry);
++ }
++ debug_sprintf_event(cf_diag_dbg, 5, "%s max_size %zu\n", __func__,
++ max_size);
++
++ return max_size;
++}
++
++/* Read a counter set. The counter set number determines which counter set and
++ * the CPUM-CF first and second version number determine the number of
++ * available counters in this counter set.
++ * Each counter set starts with header containing the counter set number and
++ * the number of 8 byte counters.
++ *
++ * The functions returns the number of bytes occupied by this counter set
++ * including the header.
++ * If there is no counter in the counter set, this counter set is useless and
++ * zero is returned on this case.
++ */
++static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
++ size_t room)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ size_t ctrset_size, need = 0;
++ int rc = 3; /* Assume write failure */
++
++ ctrdata->def = CF_DIAG_CTRSET_DEF;
++ ctrdata->set = ctrset;
++ ctrdata->res1 = 0;
++ ctrset_size = cf_diag_ctrset_size(ctrset, &cpuhw->info);
++
++ if (ctrset_size) { /* Save data */
++ need = ctrset_size * sizeof(u64) + sizeof(*ctrdata);
++ if (need <= room)
++ rc = ctr_stcctm(ctrset, ctrset_size,
++ (u64 *)(ctrdata + 1));
++ if (rc != 3)
++ ctrdata->ctr = ctrset_size;
++ else
++ need = 0;
++ }
++
++ debug_sprintf_event(cf_diag_dbg, 6,
++ "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
++ " need %zd rc:%d\n",
++ __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
++ cpuhw->info.csvn, need, rc);
++ return need;
++}
++
++/* Read out all counter sets and save them in the provided data buffer.
++ * The last 64 byte host an artificial trailer entry.
++ */
++static size_t cf_diag_getctr(void *data, size_t sz, unsigned long auth)
++{
++ struct cf_trailer_entry *trailer;
++ size_t offset = 0, done;
++ int i;
++
++ memset(data, 0, sz);
++ sz -= sizeof(*trailer); /* Always room for trailer */
++ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i) {
++ struct cf_ctrset_entry *ctrdata = data + offset;
++
++ if (!(auth & cpumf_ctr_ctl[i]))
++ continue; /* Counter set not authorized */
++
++ done = cf_diag_getctrset(ctrdata, i, sz - offset);
++ offset += done;
++ debug_sprintf_event(cf_diag_dbg, 6,
++ "%s ctrset %d offset %zu done %zu\n",
++ __func__, i, offset, done);
++ }
++ trailer = data + offset;
++ cf_diag_trailer(trailer);
++ return offset + sizeof(*trailer);
++}
++
++/* Calculate the difference for each counter in a counter set. */
++static void cf_diag_diffctrset(u64 *pstart, u64 *pstop, int counters)
++{
++ for (; --counters >= 0; ++pstart, ++pstop)
++ if (*pstop >= *pstart)
++ *pstop -= *pstart;
++ else
++ *pstop = *pstart - *pstop;
++}
++
++/* Scan the counter sets and calculate the difference of each counter
++ * in each set. The result is the increment of each counter during the
++ * period the counter set has been activated.
++ *
++ * Return true on success.
++ */
++static int cf_diag_diffctr(struct cf_diag_csd *csd, unsigned long auth)
++{
++ struct cf_trailer_entry *trailer_start, *trailer_stop;
++ struct cf_ctrset_entry *ctrstart, *ctrstop;
++ size_t offset = 0;
++
++ auth &= (1 << CPUMF_LCCTL_ENABLE_SHIFT) - 1;
++ do {
++ ctrstart = (struct cf_ctrset_entry *)(csd->start + offset);
++ ctrstop = (struct cf_ctrset_entry *)(csd->data + offset);
++
++ if (memcmp(ctrstop, ctrstart, sizeof(*ctrstop))) {
++ pr_err("cpum_cf_diag counter set compare error "
++ "in set %i\n", ctrstart->set);
++ return 0;
++ }
++ auth &= ~cpumf_ctr_ctl[ctrstart->set];
++ if (ctrstart->def == CF_DIAG_CTRSET_DEF) {
++ cf_diag_diffctrset((u64 *)(ctrstart + 1),
++ (u64 *)(ctrstop + 1), ctrstart->ctr);
++ offset += ctrstart->ctr * sizeof(u64) +
++ sizeof(*ctrstart);
++ }
++ debug_sprintf_event(cf_diag_dbg, 6,
++ "%s set %d ctr %d offset %zu auth %lx\n",
++ __func__, ctrstart->set, ctrstart->ctr,
++ offset, auth);
++ } while (ctrstart->def && auth);
++
++ /* Save time_stamp from start of event in stop's trailer */
++ trailer_start = (struct cf_trailer_entry *)(csd->start + offset);
++ trailer_stop = (struct cf_trailer_entry *)(csd->data + offset);
++ trailer_stop->progusage[0] = trailer_start->timestamp;
++
++ return 1;
++}
++
++/* Create perf event sample with the counter sets as raw data. The sample
++ * is then pushed to the event subsystem and the function checks for
++ * possible event overflows. If an event overflow occurs, the PMU is
++ * stopped.
++ *
++ * Return non-zero if an event overflow occurred.
++ */
++static int cf_diag_push_sample(struct perf_event *event,
++ struct cf_diag_csd *csd)
++{
++ struct perf_sample_data data;
++ struct perf_raw_record raw;
++ struct pt_regs regs;
++ int overflow;
++
++ /* Setup perf sample */
++ perf_sample_data_init(&data, 0, event->hw.last_period);
++ memset(&regs, 0, sizeof(regs));
++ memset(&raw, 0, sizeof(raw));
++
++ if (event->attr.sample_type & PERF_SAMPLE_CPU)
++ data.cpu_entry.cpu = event->cpu;
++ if (event->attr.sample_type & PERF_SAMPLE_RAW) {
++ raw.frag.size = csd->used;
++ raw.frag.data = csd->data;
++ raw.size = csd->used;
++ data.raw = &raw;
++ }
++
++ overflow = perf_event_overflow(event, &data, &regs);
++ debug_sprintf_event(cf_diag_dbg, 6,
++ "%s event %p cpu %d sample_type %#llx raw %d "
++ "ov %d\n", __func__, event, event->cpu,
++ event->attr.sample_type, raw.size, overflow);
++ if (overflow)
++ event->pmu->stop(event, 0);
++
++ perf_event_update_userpage(event);
++ return overflow;
++}
++
++static void cf_diag_start(struct perf_event *event, int flags)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
++ struct hw_perf_event *hwc = &event->hw;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d flags %#x hwc-state %#x\n",
++ __func__, event, event->cpu, flags, hwc->state);
++ if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
++ return;
++
++ /* (Re-)enable and activate all counter sets */
++ lcctl(0); /* Reset counter sets */
++ hwc->state = 0;
++ ctr_set_multiple_enable(&cpuhw->state, hwc->config_base);
++ lcctl(cpuhw->state); /* Enable counter sets */
++ csd->used = cf_diag_getctr(csd->start, sizeof(csd->start),
++ event->hw.config_base);
++ ctr_set_multiple_start(&cpuhw->state, hwc->config_base);
++ /* Function cf_diag_enable() starts the counter sets. */
++}
++
++static void cf_diag_stop(struct perf_event *event, int flags)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ struct cf_diag_csd *csd = this_cpu_ptr(&cf_diag_csd);
++ struct hw_perf_event *hwc = &event->hw;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d flags %#x hwc-state %#x\n",
++ __func__, event, event->cpu, flags, hwc->state);
++
++ /* Deactivate all counter sets */
++ ctr_set_multiple_stop(&cpuhw->state, hwc->config_base);
++ local64_inc(&event->count);
++ csd->used = cf_diag_getctr(csd->data, sizeof(csd->data),
++ event->hw.config_base);
++ if (cf_diag_diffctr(csd, event->hw.config_base))
++ cf_diag_push_sample(event, csd);
++ hwc->state |= PERF_HES_STOPPED;
++}
++
++static int cf_diag_add(struct perf_event *event, int flags)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++ int err = 0;
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d flags %#x cpuhw:%p\n",
++ __func__, event, event->cpu, flags, cpuhw);
++
++ if (cpuhw->flags & PMU_F_IN_USE) {
++ err = -EAGAIN;
++ goto out;
++ }
++
++ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
++
++ cpuhw->flags |= PMU_F_IN_USE;
++ if (flags & PERF_EF_START)
++ cf_diag_start(event, PERF_EF_RELOAD);
++out:
++ debug_sprintf_event(cf_diag_dbg, 5, "%s err %d\n", __func__, err);
++ return err;
++}
++
++static void cf_diag_del(struct perf_event *event, int flags)
++{
++ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
++
++ debug_sprintf_event(cf_diag_dbg, 5,
++ "%s event %p cpu %d flags %#x\n",
++ __func__, event, event->cpu, flags);
++
++ cf_diag_stop(event, PERF_EF_UPDATE);
++ ctr_set_multiple_stop(&cpuhw->state, event->hw.config_base);
++ ctr_set_multiple_disable(&cpuhw->state, event->hw.config_base);
++ cpuhw->flags &= ~PMU_F_IN_USE;
++}
++
++CPUMF_EVENT_ATTR(CF_DIAG, CF_DIAG, PERF_EVENT_CPUM_CF_DIAG);
++
++static struct attribute *cf_diag_events_attr[] = {
++ CPUMF_EVENT_PTR(CF_DIAG, CF_DIAG),
++ NULL,
++};
++
++PMU_FORMAT_ATTR(event, "config:0-63");
++
++static struct attribute *cf_diag_format_attr[] = {
++ &format_attr_event.attr,
++ NULL,
++};
++
++static struct attribute_group cf_diag_events_group = {
++ .name = "events",
++ .attrs = cf_diag_events_attr,
++};
++static struct attribute_group cf_diag_format_group = {
++ .name = "format",
++ .attrs = cf_diag_format_attr,
++};
++static const struct attribute_group *cf_diag_attr_groups[] = {
++ &cf_diag_events_group,
++ &cf_diag_format_group,
++ NULL,
++};
++
++/* Performance monitoring unit for s390x */
++static struct pmu cf_diag = {
++ .task_ctx_nr = perf_sw_context,
++ .pmu_enable = cf_diag_enable,
++ .pmu_disable = cf_diag_disable,
++ .event_init = cf_diag_event_init,
++ .add = cf_diag_add,
++ .del = cf_diag_del,
++ .start = cf_diag_start,
++ .stop = cf_diag_stop,
++ .read = cf_diag_read,
++
++ .attr_groups = cf_diag_attr_groups
++};
++
++/* Get the CPU speed, try sampling facility first and CPU attributes second. */
++static void cf_diag_get_cpu_speed(void)
++{
++ if (cpum_sf_avail()) { /* Sampling facility first */
++ struct hws_qsi_info_block si;
++
++ memset(&si, 0, sizeof(si));
++ if (!qsi(&si)) {
++ cf_diag_cpu_speed = si.cpu_speed;
++ return;
++ }
++ }
++
++ if (test_facility(34)) { /* CPU speed extract static part */
++ unsigned long mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
++
++ if (mhz != -1UL)
++ cf_diag_cpu_speed = mhz & 0xffffffff;
++ }
++}
++
++/* Initialize the counter set PMU to generate complete counter set data as
++ * event raw data. This relies on the CPU Measurement Counter Facility device
++ * already being loaded and initialized.
++ */
++static int __init cf_diag_init(void)
++{
++ struct cpumf_ctr_info info;
++ size_t need;
++ int rc;
++
++ if (!kernel_cpumcf_avail() || !stccm_avail() || qctri(&info))
++ return -ENODEV;
++ cf_diag_get_cpu_speed();
++
++ /* Make sure the counter set data fits into predefined buffer. */
++ need = cf_diag_ctrset_maxsize(&info);
++ if (need > sizeof(((struct cf_diag_csd *)0)->start)) {
++ pr_err("Insufficient memory for PMU(cpum_cf_diag) need=%zu\n",
++ need);
++ return -ENOMEM;
++ }
++
++ /* Setup s390dbf facility */
++ cf_diag_dbg = debug_register(KMSG_COMPONENT, 2, 1, 128);
++ if (!cf_diag_dbg) {
++ pr_err("Registration of s390dbf(cpum_cf_diag) failed\n");
++ return -ENOMEM;
++ }
++ debug_register_view(cf_diag_dbg, &debug_sprintf_view);
++
++ rc = perf_pmu_register(&cf_diag, "cpum_cf_diag", PERF_TYPE_RAW);
++ if (rc) {
++ debug_unregister_view(cf_diag_dbg, &debug_sprintf_view);
++ debug_unregister(cf_diag_dbg);
++ pr_err("Registration of PMU(cpum_cf_diag) failed with rc=%i\n",
++ rc);
++ }
++ return rc;
++}
++arch_initcall(cf_diag_init);
diff --git a/patches.suse/s390-cpumf-add-extended-counter-set-definitions-for-model-8561-and-8562 b/patches.suse/s390-cpumf-add-extended-counter-set-definitions-for-model-8561-and-8562
new file mode 100644
index 0000000000..5a6abba04d
--- /dev/null
+++ b/patches.suse/s390-cpumf-add-extended-counter-set-definitions-for-model-8561-and-8562
@@ -0,0 +1,30 @@
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Fri, 12 Apr 2019 11:32:28 +0200
+Subject: s390/cpumf: Add extended counter set definitions for model 8561 and
+ 8562
+Git-commit: 820bace734722715c643dcb5f74b502cb912d4eb
+Patch-mainline: v5.2-rc1
+References: bsc#1142052 LTC#179320
+
+Add the extended counter set definitions for s390 machine types
+8561 and 8262. They are identical with machine types 3906 and
+3907.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/perf_cpum_cf_events.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/s390/kernel/perf_cpum_cf_events.c
++++ b/arch/s390/kernel/perf_cpum_cf_events.c
+@@ -623,6 +623,8 @@ __init const struct attribute_group **cp
+ break;
+ case 0x3906:
+ case 0x3907:
++ case 0x8561:
++ case 0x8562:
+ model = cpumcf_z14_pmu_event_attr;
+ break;
+ default:
diff --git a/patches.suse/s390-cpumf-fix-warning-from-check_processor_id b/patches.suse/s390-cpumf-fix-warning-from-check_processor_id
new file mode 100644
index 0000000000..cd4253575f
--- /dev/null
+++ b/patches.suse/s390-cpumf-fix-warning-from-check_processor_id
@@ -0,0 +1,91 @@
+From: Thomas Richter <tmricht@linux.ibm.com>
+Date: Mon, 18 Mar 2019 15:50:27 +0100
+Subject: s390/cpumf: Fix warning from check_processor_id
+Git-commit: b6ffdf27f3d4f1e9af56effe6f86989170d71e95
+Patch-mainline: v5.1-rc3
+References: jsc#SLE-6904 FATE#327581
+
+Function __hw_perf_event_init() used a CPU variable without
+ensuring CPU preemption has been disabled. This caused the
+following warning in the kernel log:
+
+ [ 7.277085] BUG: using smp_processor_id() in preemptible
+ [00000000] code: cf-csdiag/1892
+ [ 7.277111] caller is cf_diag_event_init+0x13a/0x338
+ [ 7.277122] CPU: 10 PID: 1892 Comm: cf-csdiag Not tainted
+ 5.0.0-20190318.rc0.git0.9e1a11e0f602.300.fc29.s390x+debug #1
+ [ 7.277131] Hardware name: IBM 2964 NC9 712 (LPAR)
+ [ 7.277139] Call Trace:
+ [ 7.277150] ([<000000000011385a>] show_stack+0x82/0xd0)
+ [ 7.277161] [<0000000000b7a71a>] dump_stack+0x92/0xd0
+ [ 7.277174] [<00000000007b7e9c>] check_preemption_disabled+0xe4/0x100
+ [ 7.277183] [<00000000001228aa>] cf_diag_event_init+0x13a/0x338
+ [ 7.277195] [<00000000002cf3aa>] perf_try_init_event+0x72/0xf0
+ [ 7.277204] [<00000000002d0bba>] perf_event_alloc+0x6fa/0xce0
+ [ 7.277214] [<00000000002dc4a8>] __s390x_sys_perf_event_open+0x398/0xd50
+ [ 7.277224] [<0000000000b9e8f0>] system_call+0xdc/0x2d8
+ [ 7.277233] 2 locks held by cf-csdiag/1892:
+ [ 7.277241] #0: 00000000976f5510 (&sig->cred_guard_mutex){+.+.},
+ at: __s390x_sys_perf_event_open+0xd2e/0xd50
+ [ 7.277257] #1: 00000000363b11bd (&pmus_srcu){....},
+ at: perf_event_alloc+0x52e/0xce0
+
+The variable is now accessed in proper context. Use
+get_cpu_var()/put_cpu_var() pair to disable
+preemption during access.
+As the hardware authorization settings apply to all CPUs, it
+does not matter which CPU is used to check the authorization setting.
+
+Remove the event->count assignment. It is not needed as function
+perf_event_alloc() allocates memory for the event with kzalloc() and
+thus count is already set to zero.
+
+Fixes: fe5908bccc56 ("s390/cpum_cf_diag: Add support for s390 counter facility diagnostic trace")
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Reviewed-by: Hendrik Brueckner <brueckner@linux.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/kernel/perf_cpum_cf_diag.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/kernel/perf_cpum_cf_diag.c
++++ b/arch/s390/kernel/perf_cpum_cf_diag.c
+@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(s
+ */
+ static int __hw_perf_event_init(struct perf_event *event)
+ {
+- struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
+ struct perf_event_attr *attr = &event->attr;
++ struct cpu_cf_events *cpuhw;
+ enum cpumf_ctr_set i;
+ int err = 0;
+
+- debug_sprintf_event(cf_diag_dbg, 5,
+- "%s event %p cpu %d authorized %#x\n", __func__,
+- event, event->cpu, cpuhw->info.auth_ctl);
++ debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
++ event, event->cpu);
+
+ event->hw.config = attr->config;
+ event->hw.config_base = 0;
+- local64_set(&event->count, 0);
+
+- /* Add all authorized counter sets to config_base */
++ /* Add all authorized counter sets to config_base. The
++ * the hardware init function is either called per-cpu or just once
++ * for all CPUS (event->cpu == -1). This depends on the whether
++ * counting is started for all CPUs or on a per workload base where
++ * the perf event moves from one CPU to another CPU.
++ * Checking the authorization on any CPU is fine as the hardware
++ * applies the same authorization settings to all CPUs.
++ */
++ cpuhw = &get_cpu_var(cpu_cf_events);
+ for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
+ if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
+ event->hw.config_base |= cpumf_ctr_ctl[i];
++ put_cpu_var(cpu_cf_events);
+
+ /* No authorized counter sets, nothing to count/sample */
+ if (!event->hw.config_base) {
diff --git a/patches.suse/s390-mm-force-swiotlb-for-protected-virtualization b/patches.suse/s390-mm-force-swiotlb-for-protected-virtualization
new file mode 100644
index 0000000000..f94672ef2d
--- /dev/null
+++ b/patches.suse/s390-mm-force-swiotlb-for-protected-virtualization
@@ -0,0 +1,240 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Thu, 13 Sep 2018 18:57:16 +0200
+Subject: s390/mm: force swiotlb for protected virtualization
+Git-commit: 64e1f0c531d1072cd97939bf0d8df42b26713543
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+On s390, protected virtualization guests have to use bounced I/O
+buffers. That requires some plumbing.
+
+Let us make sure, any device that uses DMA API with direct ops correctly
+is spared from the problems, that a hypervisor attempting I/O to a
+non-shared page would bring.
+
+[ ptesarik: config SWIOTLB was added to arch/s390/Kconfig, because
+ SLE15-SP1 does not contain commit
+ 09230cbc1baba68e0ca1e7c489344ce5d35c6f27, which itself depends on
+ commit f21254cdd147d703ed9b79382cab8aff5a966397, but the latter
+ would change CONFIG_SWIOTLB for armv7hl in Leap 15.1. ]
+[ ptesarik: s390_pv_dma_ops is implemented in arch-specific code;
+ commit 55897af63091ebc2c3f239c6a6666f748113ac50 looks too intrusive
+ for a backport. ]
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/Kconfig | 7 ++
+ arch/s390/include/asm/dma-mapping.h | 3 -
+ arch/s390/include/asm/mem_encrypt.h | 17 ++++++
+ arch/s390/mm/init.c | 98 ++++++++++++++++++++++++++++++++++++
+ 4 files changed, 124 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/Kconfig
++++ b/arch/s390/Kconfig
+@@ -1,3 +1,6 @@
++config ARCH_HAS_MEM_ENCRYPT
++ def_bool y
++
+ config MMU
+ def_bool y
+
+@@ -62,6 +65,9 @@ config PCI_QUIRKS
+ config ARCH_SUPPORTS_UPROBES
+ def_bool y
+
++config SWIOTLB
++ bool
++
+ config S390
+ def_bool y
+ select ARCH_BINFMT_ELF_STATE
+@@ -179,6 +185,7 @@ config S390
+ select ARCH_HAS_SCALED_CPUTIME
+ select VIRT_TO_BUS
+ select HAVE_NMI
++ select SWIOTLB
+
+
+ config SCHED_OMIT_FRAME_POINTER
+--- a/arch/s390/include/asm/dma-mapping.h
++++ b/arch/s390/include/asm/dma-mapping.h
+@@ -11,10 +11,11 @@
+ #define DMA_ERROR_CODE (~(dma_addr_t) 0x0)
+
+ extern const struct dma_map_ops s390_pci_dma_ops;
++extern const struct dma_map_ops *s390_dma_ops;
+
+ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
+ {
+- return &dma_noop_ops;
++ return s390_dma_ops;
+ }
+
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+--- /dev/null
++++ b/arch/s390/include/asm/mem_encrypt.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef S390_MEM_ENCRYPT_H__
++#define S390_MEM_ENCRYPT_H__
++
++#ifndef __ASSEMBLY__
++
++#define sme_me_mask 0ULL
++
++static inline bool sme_active(void) { return false; }
++extern bool sev_active(void);
++
++int set_memory_encrypted(unsigned long addr, int numpages);
++int set_memory_decrypted(unsigned long addr, int numpages);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* S390_MEM_ENCRYPT_H__ */
+--- a/arch/s390/mm/init.c
++++ b/arch/s390/mm/init.c
+@@ -17,6 +17,7 @@
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+ #include <linux/swap.h>
++#include <linux/swiotlb.h>
+ #include <linux/smp.h>
+ #include <linux/init.h>
+ #include <linux/pagemap.h>
+@@ -29,6 +30,7 @@
+ #include <linux/cma.h>
+ #include <linux/gfp.h>
+ #include <linux/memblock.h>
++#include <linux/dma-mapping.h>
+ #include <asm/processor.h>
+ #include <linux/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -41,6 +43,8 @@
+ #include <asm/ctl_reg.h>
+ #include <asm/sclp.h>
+ #include <asm/set_memory.h>
++#include <asm/dma-mapping.h>
++#include <asm/uv.h>
+
+ pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+
+@@ -48,6 +52,9 @@ unsigned long empty_zero_page, zero_page
+ EXPORT_SYMBOL(empty_zero_page);
+ EXPORT_SYMBOL(zero_page_mask);
+
++const struct dma_map_ops *s390_dma_ops = &dma_noop_ops;
++EXPORT_SYMBOL(s390_dma_ops);
++
+ static void __init setup_zero_pages(void)
+ {
+ unsigned int order;
+@@ -119,6 +126,95 @@ void mark_rodata_ro(void)
+ pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
+ }
+
++int set_memory_encrypted(unsigned long addr, int numpages)
++{
++ int i;
++
++ /* make specified pages unshared, (swiotlb, dma_free) */
++ for (i = 0; i < numpages; ++i) {
++ uv_remove_shared(addr);
++ addr += PAGE_SIZE;
++ }
++ return 0;
++}
++
++int set_memory_decrypted(unsigned long addr, int numpages)
++{
++ int i;
++ /* make specified pages shared (swiotlb, dma_alloca) */
++ for (i = 0; i < numpages; ++i) {
++ uv_set_shared(addr);
++ addr += PAGE_SIZE;
++ }
++ return 0;
++}
++
++/* are we a protected virtualization guest? */
++bool sev_active(void)
++{
++ return is_prot_virt_guest();
++}
++
++static void *s390_pv_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp,
++ unsigned long attrs)
++{
++ void *ret;
++
++ if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
++ gfp |= GFP_DMA;
++ ret = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
++
++ /* share */
++ if (ret)
++ set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
++
++ return ret;
++}
++
++static void s390_pv_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_addr,
++ unsigned long attrs)
++{
++ if (!vaddr)
++ return;
++
++ /* unshare */
++ set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
++
++ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
++}
++
++static const struct dma_map_ops s390_pv_dma_ops = {
++ .alloc = s390_pv_alloc_coherent,
++ .free = s390_pv_free_coherent,
++ .map_page = swiotlb_map_page,
++ .unmap_page = swiotlb_unmap_page,
++ .map_sg = swiotlb_map_sg_attrs,
++ .unmap_sg = swiotlb_unmap_sg_attrs,
++ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = swiotlb_sync_single_for_device,
++ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = swiotlb_sync_sg_for_device,
++ .dma_supported = swiotlb_dma_supported,
++ .mapping_error = swiotlb_dma_mapping_error,
++ .max_mapping_size = swiotlb_max_mapping_size,
++};
++
++/* protected virtualization */
++static void pv_init(void)
++{
++ if (!is_prot_virt_guest())
++ return;
++
++ /* make sure bounce buffers are shared */
++ swiotlb_init(1);
++ swiotlb_update_mem_attributes();
++ swiotlb_force = SWIOTLB_FORCE;
++ /* use swiotlb_dma_ops */
++ s390_dma_ops = &s390_pv_dma_ops;
++}
++
+ void __init mem_init(void)
+ {
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+@@ -127,6 +223,8 @@ void __init mem_init(void)
+ set_max_mapnr(max_low_pfn);
+ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+
++ pv_init();
++
+ /* Setup guest page hinting */
+ cmma_init();
+
diff --git a/patches.suse/s390-remove-the-unused-dma_capable-helper b/patches.suse/s390-remove-the-unused-dma_capable-helper
new file mode 100644
index 0000000000..578c749189
--- /dev/null
+++ b/patches.suse/s390-remove-the-unused-dma_capable-helper
@@ -0,0 +1,27 @@
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 20 Dec 2017 15:56:46 +0100
+Subject: s390: remove the unused dma_capable helper
+Git-commit: 8fbc02ac62677603e75a67385fccd0c0bc69fa5c
+Patch-mainline: v4.16-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/dma-mapping.h | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/arch/s390/include/asm/dma-mapping.h
++++ b/arch/s390/include/asm/dma-mapping.h
+@@ -22,11 +22,4 @@ static inline void dma_cache_sync(struct
+ {
+ }
+
+-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+-{
+- if (!dev->dma_mask)
+- return false;
+- return addr + size - 1 <= *dev->dma_mask;
+-}
+-
+ #endif /* _ASM_S390_DMA_MAPPING_H */
diff --git a/patches.suse/s390-report-new-cpu-capabilities b/patches.suse/s390-report-new-cpu-capabilities
new file mode 100644
index 0000000000..6b3e2927c0
--- /dev/null
+++ b/patches.suse/s390-report-new-cpu-capabilities
@@ -0,0 +1,66 @@
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Tue, 5 Feb 2019 16:15:01 +0100
+Subject: s390: report new CPU capabilities
+Git-commit: a8fd61688dfad6fdce95fa64cacd8a66595697b8
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6907 FATE#327564 LTC#175887
+
+Add hardware capability bits and features tags to /proc/cpuinfo
+for 4 new CPU features:
+ "Vector-Enhancements Facility 2" (tag "vxe2", hwcap 2^15)
+ "Vector-Packed-Decimal-Enhancement Facility" (tag "vxp", hwcap 2^16)
+ "Enhanced-Sort Facility" (tag "sort", hwcap 2^17)
+ "Deflate-Conversion Facility" (tag "dflt", hwcap 2^18)
+
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ arch/s390/include/asm/elf.h | 4 ++++
+ arch/s390/kernel/processor.c | 3 ++-
+ arch/s390/kernel/setup.c | 8 ++++++++
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/include/asm/elf.h
++++ b/arch/s390/include/asm/elf.h
+@@ -106,6 +106,10 @@
+ #define HWCAP_S390_VXRS_BCD 4096
+ #define HWCAP_S390_VXRS_EXT 8192
+ #define HWCAP_S390_GS 16384
++#define HWCAP_S390_VXRS_EXT2 32768
++#define HWCAP_S390_VXRS_PDE 65536
++#define HWCAP_S390_SORT 131072
++#define HWCAP_S390_DFLT 262144
+
+ /* Internal bits, not exposed via elf */
+ #define HWCAP_INT_SIE 1UL
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -108,7 +108,8 @@ static void show_cpu_summary(struct seq_
+ {
+ static const char *hwcap_str[] = {
+ "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
+- "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
++ "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
++ "vxe2", "vxp", "sort", "dflt"
+ };
+ static const char * const int_hwcap_str[] = {
+ "sie"
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -785,7 +785,15 @@ static int __init setup_hwcaps(void)
+ elf_hwcap |= HWCAP_S390_VXRS_EXT;
+ if (test_facility(135))
+ elf_hwcap |= HWCAP_S390_VXRS_BCD;
++ if (test_facility(148))
++ elf_hwcap |= HWCAP_S390_VXRS_EXT2;
++ if (test_facility(152))
++ elf_hwcap |= HWCAP_S390_VXRS_PDE;
+ }
++ if (test_facility(150))
++ elf_hwcap |= HWCAP_S390_SORT;
++ if (test_facility(151))
++ elf_hwcap |= HWCAP_S390_DFLT;
+
+ /*
+ * Guarded storage support HWCAP_S390_GS is bit 12.
diff --git a/patches.suse/virtio-s390-add-indirection-to-indicators-access b/patches.suse/virtio-s390-add-indirection-to-indicators-access
new file mode 100644
index 0000000000..1f80e1283e
--- /dev/null
+++ b/patches.suse/virtio-s390-add-indirection-to-indicators-access
@@ -0,0 +1,125 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Mon, 3 Dec 2018 17:18:07 +0100
+Subject: virtio/s390: add indirection to indicators access
+Git-commit: 22a4a639b9cebff4568f32202e96d6f286251b72
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+This will come in handy soon when we pull out the indicators from
+virtio_ccw_device to a memory area that is shared with the hypervisor
+(in particular for protected virtualization guests).
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Pierre Morel <pmorel@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 40 ++++++++++++++++++++++++---------------
+ 1 file changed, 25 insertions(+), 15 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -71,6 +71,16 @@ struct virtio_ccw_device {
+ void *airq_info;
+ };
+
++static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
++{
++ return &vcdev->indicators;
++}
++
++static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
++{
++ return &vcdev->indicators2;
++}
++
+ struct vq_info_block_legacy {
+ __u64 queue;
+ __u32 align;
+@@ -341,17 +351,17 @@ static void virtio_ccw_drop_indicator(st
+ ccw->cda = (__u32)(unsigned long) thinint_area;
+ } else {
+ /* payload is the address of the indicators */
+- indicatorp = kmalloc(sizeof(&vcdev->indicators),
++ indicatorp = kmalloc(sizeof(indicators(vcdev)),
+ GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ return;
+ *indicatorp = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+- ccw->count = sizeof(&vcdev->indicators);
++ ccw->count = sizeof(indicators(vcdev));
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ }
+ /* Deregister indicators from host. */
+- vcdev->indicators = 0;
++ *indicators(vcdev) = 0;
+ ccw->flags = 0;
+ ret = ccw_io_helper(vcdev, ccw,
+ vcdev->is_thinint ?
+@@ -654,10 +664,10 @@ static int virtio_ccw_find_vqs(struct vi
+ * We need a data area under 2G to communicate. Our payload is
+ * the address of the indicators.
+ */
+- indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL);
++ indicatorp = kmalloc(sizeof(indicators(vcdev)), GFP_DMA | GFP_KERNEL);
+ if (!indicatorp)
+ goto out;
+- *indicatorp = (unsigned long) &vcdev->indicators;
++ *indicatorp = (unsigned long) indicators(vcdev);
+ if (vcdev->is_thinint) {
+ ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
+ if (ret)
+@@ -666,21 +676,21 @@ static int virtio_ccw_find_vqs(struct vi
+ }
+ if (!vcdev->is_thinint) {
+ /* Register queue indicators with host. */
+- vcdev->indicators = 0;
++ *indicators(vcdev) = 0;
+ ccw->cmd_code = CCW_CMD_SET_IND;
+ ccw->flags = 0;
+- ccw->count = sizeof(&vcdev->indicators);
++ ccw->count = sizeof(indicators(vcdev));
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
+ if (ret)
+ goto out;
+ }
+ /* Register indicators2 with host for config changes */
+- *indicatorp = (unsigned long) &vcdev->indicators2;
+- vcdev->indicators2 = 0;
++ *indicatorp = (unsigned long) indicators2(vcdev);
++ *indicators2(vcdev) = 0;
+ ccw->cmd_code = CCW_CMD_SET_CONF_IND;
+ ccw->flags = 0;
+- ccw->count = sizeof(&vcdev->indicators2);
++ ccw->count = sizeof(indicators2(vcdev));
+ ccw->cda = (__u32)(unsigned long) indicatorp;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
+ if (ret)
+@@ -1072,17 +1082,17 @@ static void virtio_ccw_int_handler(struc
+ vcdev->err = -EIO;
+ }
+ virtio_ccw_check_activity(vcdev, activity);
+- for_each_set_bit(i, &vcdev->indicators,
+- sizeof(vcdev->indicators) * BITS_PER_BYTE) {
++ for_each_set_bit(i, indicators(vcdev),
++ sizeof(*indicators(vcdev)) * BITS_PER_BYTE) {
+ /* The bit clear must happen before the vring kick. */
+- clear_bit(i, &vcdev->indicators);
++ clear_bit(i, indicators(vcdev));
+ barrier();
+ vq = virtio_ccw_vq_by_ind(vcdev, i);
+ vring_interrupt(0, vq);
+ }
+- if (test_bit(0, &vcdev->indicators2)) {
++ if (test_bit(0, indicators2(vcdev))) {
+ virtio_config_changed(&vcdev->vdev);
+- clear_bit(0, &vcdev->indicators2);
++ clear_bit(0, indicators2(vcdev));
+ }
+ }
+
diff --git a/patches.suse/virtio-s390-dma-support-for-virtio-ccw b/patches.suse/virtio-s390-dma-support-for-virtio-ccw
new file mode 100644
index 0000000000..dc3f7fd72d
--- /dev/null
+++ b/patches.suse/virtio-s390-dma-support-for-virtio-ccw
@@ -0,0 +1,99 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Fri, 26 Apr 2019 20:32:37 +0200
+Subject: virtio/s390: DMA support for virtio-ccw
+Git-commit: f35f54f11f7f96dc6d43fa79c4a7297a9115627f
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Currently virtio-ccw devices do not work if the device has
+VIRTIO_F_IOMMU_PLATFORM. In future we do want to support DMA API with
+virtio-ccw.
+
+Let us do the plumbing, so the feature VIRTIO_F_IOMMU_PLATFORM works
+with virtio-ccw.
+
+Let us also switch from legacy avail/used accessors to the DMA aware
+ones (even if it isn't strictly necessary), and remove the legacy
+accessors (we were the last users).
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 16 +++++++++++++---
+ include/linux/virtio.h | 17 -----------------
+ 2 files changed, 13 insertions(+), 20 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -69,6 +69,7 @@ struct virtio_ccw_device {
+ bool device_lost;
+ unsigned int config_ready;
+ void *airq_info;
++ u64 dma_mask;
+ };
+
+ struct vq_info_block_legacy {
+@@ -542,8 +543,8 @@ static struct virtqueue *virtio_ccw_setu
+ info->info_block->s.desc = queue;
+ info->info_block->s.index = i;
+ info->info_block->s.num = info->num;
+- info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+- info->info_block->s.used = (__u64)virtqueue_get_used(vq);
++ info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
++ info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
+ ccw->count = sizeof(info->info_block->s);
+ }
+ ccw->cmd_code = CCW_CMD_SET_VQ;
+@@ -1233,6 +1234,16 @@ static int virtio_ccw_online(struct ccw_
+ ret = -ENOMEM;
+ goto out_free;
+ }
++
++ vcdev->vdev.dev.parent = &cdev->dev;
++ cdev->dev.dma_mask = &vcdev->dma_mask;
++ /* we are fine with common virtio infrastructure using 64 bit DMA */
++ ret = dma_set_mask_and_coherent(&cdev->dev, DMA_BIT_MASK(64));
++ if (ret) {
++ dev_warn(&cdev->dev, "Failed to enable 64-bit DMA.\n");
++ goto out_free;
++ }
++
+ vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+ GFP_DMA | GFP_KERNEL);
+ if (!vcdev->config_block) {
+@@ -1247,7 +1258,6 @@ static int virtio_ccw_online(struct ccw_
+
+ vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */
+
+- vcdev->vdev.dev.parent = &cdev->dev;
+ vcdev->vdev.dev.release = virtio_ccw_release_dev;
+ vcdev->vdev.config = &virtio_ccw_config_ops;
+ vcdev->cdev = cdev;
+--- a/include/linux/virtio.h
++++ b/include/linux/virtio.h
+@@ -89,23 +89,6 @@ dma_addr_t virtqueue_get_desc_addr(struc
+ dma_addr_t virtqueue_get_avail_addr(struct virtqueue *vq);
+ dma_addr_t virtqueue_get_used_addr(struct virtqueue *vq);
+
+-/*
+- * Legacy accessors -- in almost all cases, these are the wrong functions
+- * to use.
+- */
+-static inline void *virtqueue_get_desc(struct virtqueue *vq)
+-{
+- return virtqueue_get_vring(vq)->desc;
+-}
+-static inline void *virtqueue_get_avail(struct virtqueue *vq)
+-{
+- return virtqueue_get_vring(vq)->avail;
+-}
+-static inline void *virtqueue_get_used(struct virtqueue *vq)
+-{
+- return virtqueue_get_vring(vq)->used;
+-}
+-
+ /**
+ * virtio_device - representation of a device using virtio
+ * @index: unique position on the virtio bus
diff --git a/patches.suse/virtio-s390-make-airq-summary-indicators-dma b/patches.suse/virtio-s390-make-airq-summary-indicators-dma
new file mode 100644
index 0000000000..64a7342e63
--- /dev/null
+++ b/patches.suse/virtio-s390-make-airq-summary-indicators-dma
@@ -0,0 +1,116 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Tue, 26 Mar 2019 19:03:47 +0100
+Subject: virtio/s390: make airq summary indicators DMA
+Git-commit: 39c7dcb158924f84e04f4c2433d164eee845a732
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+The hypervisor needs to interact with the summary indicators, so these
+need to be DMA memory as well (at least for protected virtualization
+guests).
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -143,11 +143,17 @@ static int virtio_ccw_use_airq = 1;
+
+ struct airq_info {
+ rwlock_t lock;
+- u8 summary_indicator;
++ u8 summary_indicator_idx;
+ struct airq_struct airq;
+ struct airq_iv *aiv;
+ };
+ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
++static u8 *summary_indicators;
++
++static inline u8 *get_summary_indicator(struct airq_info *info)
++{
++ return summary_indicators + info->summary_indicator_idx;
++}
+
+ #define CCW_CMD_SET_VQ 0x13
+ #define CCW_CMD_VDEV_RESET 0x33
+@@ -212,7 +218,7 @@ static void virtio_airq_handler(struct a
+ break;
+ vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai));
+ }
+- info->summary_indicator = 0;
++ *(get_summary_indicator(info)) = 0;
+ smp_wmb();
+ /* Walk through indicators field, summary indicator not active. */
+ for (ai = 0;;) {
+@@ -224,7 +230,7 @@ static void virtio_airq_handler(struct a
+ read_unlock(&info->lock);
+ }
+
+-static struct airq_info *new_airq_info(void)
++static struct airq_info *new_airq_info(int index)
+ {
+ struct airq_info *info;
+ int rc;
+@@ -240,7 +246,8 @@ static struct airq_info *new_airq_info(v
+ return NULL;
+ }
+ info->airq.handler = virtio_airq_handler;
+- info->airq.lsi_ptr = &info->summary_indicator;
++ info->summary_indicator_idx = index;
++ info->airq.lsi_ptr = get_summary_indicator(info);
+ info->airq.lsi_mask = 0xff;
+ info->airq.isc = VIRTIO_AIRQ_ISC;
+ rc = register_adapter_interrupt(&info->airq);
+@@ -262,7 +269,7 @@ static unsigned long get_airq_indicator(
+
+ for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
+ if (!airq_areas[i])
+- airq_areas[i] = new_airq_info();
++ airq_areas[i] = new_airq_info(i);
+ info = airq_areas[i];
+ if (!info)
+ return 0;
+@@ -348,7 +355,7 @@ static void virtio_ccw_drop_indicator(st
+ if (!thinint_area)
+ return;
+ thinint_area->summary_indicator =
+- (unsigned long) &airq_info->summary_indicator;
++ (unsigned long) get_summary_indicator(airq_info);
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->count = sizeof(*thinint_area);
+@@ -616,7 +623,7 @@ static int virtio_ccw_register_adapter_i
+ }
+ info = vcdev->airq_info;
+ thinint_area->summary_indicator =
+- (unsigned long) &info->summary_indicator;
++ (unsigned long) get_summary_indicator(info);
+ thinint_area->isc = VIRTIO_AIRQ_ISC;
+ ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
+ ccw->flags = CCW_FLAG_SLI;
+@@ -1443,8 +1450,17 @@ static void __init no_auto_parse(void)
+
+ static int __init virtio_ccw_init(void)
+ {
++ int rc;
++
+ /* parse no_auto string before we do anything further */
+ no_auto_parse();
+- return ccw_driver_register(&virtio_ccw_driver);
++
++ summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS);
++ if (!summary_indicators)
++ return -ENOMEM;
++ rc = ccw_driver_register(&virtio_ccw_driver);
++ if (rc)
++ cio_dma_free(summary_indicators, MAX_AIRQ_AREAS);
++ return rc;
+ }
+ device_initcall(virtio_ccw_init);
diff --git a/patches.suse/virtio-s390-use-cacheline-aligned-airq-bit-vectors b/patches.suse/virtio-s390-use-cacheline-aligned-airq-bit-vectors
new file mode 100644
index 0000000000..0cef5fad15
--- /dev/null
+++ b/patches.suse/virtio-s390-use-cacheline-aligned-airq-bit-vectors
@@ -0,0 +1,33 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Thu, 23 May 2019 16:50:07 +0200
+Subject: virtio/s390: use cacheline aligned airq bit vectors
+Git-commit: 01b3fb1ea00d5b2af77f41da69dd9dc859c12748
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+The flag AIRQ_IV_CACHELINE was recently added to airq_iv_create(). Let
+us use it! We actually wanted the vector to span a cacheline all along.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -219,7 +219,8 @@ static struct airq_info *new_airq_info(v
+ if (!info)
+ return NULL;
+ rwlock_init(&info->lock);
+- info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR);
++ info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR
++ | AIRQ_IV_CACHELINE);
+ if (!info->aiv) {
+ kfree(info);
+ return NULL;
diff --git a/patches.suse/virtio-s390-use-dma-memory-for-ccw-i-o-and-classic-notifiers b/patches.suse/virtio-s390-use-dma-memory-for-ccw-i-o-and-classic-notifiers
new file mode 100644
index 0000000000..9167398fdd
--- /dev/null
+++ b/patches.suse/virtio-s390-use-dma-memory-for-ccw-i-o-and-classic-notifiers
@@ -0,0 +1,510 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Mon, 1 Oct 2018 19:01:58 +0200
+Subject: virtio/s390: use DMA memory for ccw I/O and classic notifiers
+Git-commit: 48720ba56891570e3b750b271d80efb631478630
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+Before virtio-ccw could get away with not using DMA API for the pieces of
+memory it does ccw I/O with. With protected virtualization this has to
+change, since the hypervisor needs to read and sometimes also write these
+pieces of memory.
+
+The hypervisor is supposed to poke the classic notifiers, if these are
+used, out of band with regards to ccw I/O. So these need to be allocated
+as DMA memory (which is shared memory for protected virtualization
+guests).
+
+Let us factor out everything from struct virtio_ccw_device that needs to
+be DMA memory in a satellite that is allocated as such.
+
+Note: The control blocks of I/O instructions do not need to be shared.
+These are marshalled by the ultravisor.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reviewed-by: Pierre Morel <pmorel@linux.ibm.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Michael Mueller <mimu@linux.ibm.com>
+Tested-by: Michael Mueller <mimu@linux.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 169 ++++++++++++++++++++-------------------
+ 1 file changed, 89 insertions(+), 80 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -49,9 +49,15 @@ struct vq_config_block {
+ #define VIRTIO_CCW_CONFIG_SIZE 0x100
+ /* same as PCI config space size, should be enough for all drivers */
+
++struct vcdev_dma_area {
++ unsigned long indicators;
++ unsigned long indicators2;
++ struct vq_config_block config_block;
++ __u8 status;
++};
++
+ struct virtio_ccw_device {
+ struct virtio_device vdev;
+- __u8 *status;
+ __u8 config[VIRTIO_CCW_CONFIG_SIZE];
+ struct ccw_device *cdev;
+ __u32 curr_io;
+@@ -61,24 +67,22 @@ struct virtio_ccw_device {
+ spinlock_t lock;
+ struct mutex io_lock; /* Serializes I/O requests */
+ struct list_head virtqueues;
+- unsigned long indicators;
+- unsigned long indicators2;
+- struct vq_config_block *config_block;
+ bool is_thinint;
+ bool going_away;
+ bool device_lost;
+ unsigned int config_ready;
+ void *airq_info;
++ struct vcdev_dma_area *dma_area;
+ };
+
+ static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
+ {
+- return &vcdev->indicators;
++ return &vcdev->dma_area->indicators;
+ }
+
+ static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
+ {
+- return &vcdev->indicators2;
++ return &vcdev->dma_area->indicators2;
+ }
+
+ struct vq_info_block_legacy {
+@@ -339,8 +343,8 @@ static void virtio_ccw_drop_indicator(st
+ struct airq_info *airq_info = vcdev->airq_info;
+
+ if (vcdev->is_thinint) {
+- thinint_area = kzalloc(sizeof(*thinint_area),
+- GFP_DMA | GFP_KERNEL);
++ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(*thinint_area));
+ if (!thinint_area)
+ return;
+ thinint_area->summary_indicator =
+@@ -351,8 +355,8 @@ static void virtio_ccw_drop_indicator(st
+ ccw->cda = (__u32)(unsigned long) thinint_area;
+ } else {
+ /* payload is the address of the indicators */
+- indicatorp = kmalloc(sizeof(indicators(vcdev)),
+- GFP_DMA | GFP_KERNEL);
++ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(indicators(vcdev)));
+ if (!indicatorp)
+ return;
+ *indicatorp = 0;
+@@ -372,8 +376,8 @@ static void virtio_ccw_drop_indicator(st
+ "Failed to deregister indicators (%d)\n", ret);
+ else if (vcdev->is_thinint)
+ virtio_ccw_drop_indicators(vcdev);
+- kfree(indicatorp);
+- kfree(thinint_area);
++ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
++ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
+ }
+
+ static inline long __do_kvm_notify(struct subchannel_id schid,
+@@ -420,15 +424,15 @@ static int virtio_ccw_read_vq_conf(struc
+ {
+ int ret;
+
+- vcdev->config_block->index = index;
++ vcdev->dma_area->config_block.index = index;
+ ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
+ ccw->flags = 0;
+ ccw->count = sizeof(struct vq_config_block);
+- ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
++ ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block);
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+ if (ret)
+ return ret;
+- return vcdev->config_block->num ?: -ENOENT;
++ return vcdev->dma_area->config_block.num ?: -ENOENT;
+ }
+
+ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
+@@ -473,7 +477,8 @@ static void virtio_ccw_del_vq(struct vir
+ ret, index);
+
+ vring_del_virtqueue(vq);
+- kfree(info->info_block);
++ ccw_device_dma_free(vcdev->cdev, info->info_block,
++ sizeof(*info->info_block));
+ kfree(info);
+ }
+
+@@ -483,7 +488,7 @@ static void virtio_ccw_del_vqs(struct vi
+ struct ccw1 *ccw;
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return;
+
+@@ -492,7 +497,7 @@ static void virtio_ccw_del_vqs(struct vi
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_ccw_del_vq(vq, ccw);
+
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ }
+
+ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
+@@ -515,8 +520,8 @@ static struct virtqueue *virtio_ccw_setu
+ err = -ENOMEM;
+ goto out_err;
+ }
+- info->info_block = kzalloc(sizeof(*info->info_block),
+- GFP_DMA | GFP_KERNEL);
++ info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(*info->info_block));
+ if (!info->info_block) {
+ dev_warn(&vcdev->cdev->dev, "no info block\n");
+ err = -ENOMEM;
+@@ -580,7 +585,8 @@ out_err:
+ if (vq)
+ vring_del_virtqueue(vq);
+ if (info) {
+- kfree(info->info_block);
++ ccw_device_dma_free(vcdev->cdev, info->info_block,
++ sizeof(*info->info_block));
+ }
+ kfree(info);
+ return ERR_PTR(err);
+@@ -594,7 +600,8 @@ static int virtio_ccw_register_adapter_i
+ struct virtio_thinint_area *thinint_area = NULL;
+ struct airq_info *info;
+
+- thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL);
++ thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(*thinint_area));
+ if (!thinint_area) {
+ ret = -ENOMEM;
+ goto out;
+@@ -630,7 +637,7 @@ static int virtio_ccw_register_adapter_i
+ virtio_ccw_drop_indicators(vcdev);
+ }
+ out:
+- kfree(thinint_area);
++ ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
+ return ret;
+ }
+
+@@ -646,7 +653,7 @@ static int virtio_ccw_find_vqs(struct vi
+ int ret, i;
+ struct ccw1 *ccw;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return -ENOMEM;
+
+@@ -664,7 +671,8 @@ static int virtio_ccw_find_vqs(struct vi
+ * We need a data area under 2G to communicate. Our payload is
+ * the address of the indicators.
+ */
+- indicatorp = kmalloc(sizeof(indicators(vcdev)), GFP_DMA | GFP_KERNEL);
++ indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(indicators(vcdev)));
+ if (!indicatorp)
+ goto out;
+ *indicatorp = (unsigned long) indicators(vcdev);
+@@ -696,12 +704,16 @@ static int virtio_ccw_find_vqs(struct vi
+ if (ret)
+ goto out;
+
+- kfree(indicatorp);
+- kfree(ccw);
++ if (indicatorp)
++ ccw_device_dma_free(vcdev->cdev, indicatorp,
++ sizeof(indicators(vcdev)));
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ return 0;
+ out:
+- kfree(indicatorp);
+- kfree(ccw);
++ if (indicatorp)
++ ccw_device_dma_free(vcdev->cdev, indicatorp,
++ sizeof(indicators(vcdev)));
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ virtio_ccw_del_vqs(vdev);
+ return ret;
+ }
+@@ -711,12 +723,12 @@ static void virtio_ccw_reset(struct virt
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ struct ccw1 *ccw;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return;
+
+ /* Zero status bits. */
+- *vcdev->status = 0;
++ vcdev->dma_area->status = 0;
+
+ /* Send a reset ccw on device. */
+ ccw->cmd_code = CCW_CMD_VDEV_RESET;
+@@ -724,7 +736,7 @@ static void virtio_ccw_reset(struct virt
+ ccw->count = 0;
+ ccw->cda = 0;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET);
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ }
+
+ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
+@@ -735,11 +747,11 @@ static u64 virtio_ccw_get_features(struc
+ u64 rc;
+ struct ccw1 *ccw;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return 0;
+
+- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
++ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ if (!features) {
+ rc = 0;
+ goto out_free;
+@@ -772,8 +784,8 @@ static u64 virtio_ccw_get_features(struc
+ rc |= (u64)le32_to_cpu(features->features) << 32;
+
+ out_free:
+- kfree(features);
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ return rc;
+ }
+
+@@ -791,11 +803,11 @@ static int virtio_ccw_finalize_features(
+ return -EINVAL;
+ }
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return -ENOMEM;
+
+- features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL);
++ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ if (!features) {
+ ret = -ENOMEM;
+ goto out_free;
+@@ -827,8 +839,8 @@ static int virtio_ccw_finalize_features(
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
+
+ out_free:
+- kfree(features);
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, features, sizeof(*features));
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+
+ return ret;
+ }
+@@ -842,11 +854,12 @@ static void virtio_ccw_get_config(struct
+ void *config_area;
+ unsigned long flags;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return;
+
+- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
++ config_area = ccw_device_dma_zalloc(vcdev->cdev,
++ VIRTIO_CCW_CONFIG_SIZE);
+ if (!config_area)
+ goto out_free;
+
+@@ -868,8 +881,8 @@ static void virtio_ccw_get_config(struct
+ memcpy(buf, config_area + offset, len);
+
+ out_free:
+- kfree(config_area);
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ }
+
+ static void virtio_ccw_set_config(struct virtio_device *vdev,
+@@ -881,11 +894,12 @@ static void virtio_ccw_set_config(struct
+ void *config_area;
+ unsigned long flags;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return;
+
+- config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL);
++ config_area = ccw_device_dma_zalloc(vcdev->cdev,
++ VIRTIO_CCW_CONFIG_SIZE);
+ if (!config_area)
+ goto out_free;
+
+@@ -904,61 +918,61 @@ static void virtio_ccw_set_config(struct
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
+
+ out_free:
+- kfree(config_area);
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ }
+
+ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+ {
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+- u8 old_status = *vcdev->status;
++ u8 old_status = vcdev->dma_area->status;
+ struct ccw1 *ccw;
+
+ if (vcdev->revision < 1)
+- return *vcdev->status;
++ return vcdev->dma_area->status;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return old_status;
+
+ ccw->cmd_code = CCW_CMD_READ_STATUS;
+ ccw->flags = 0;
+- ccw->count = sizeof(*vcdev->status);
+- ccw->cda = (__u32)(unsigned long)vcdev->status;
++ ccw->count = sizeof(vcdev->dma_area->status);
++ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+ /*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+- * handler anyway), vcdev->status was not overwritten and we just
++ * handler anyway), vcdev->dma_area->status was not overwritten and we just
+ * return the old status, which is fine.
+ */
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+
+- return *vcdev->status;
++ return vcdev->dma_area->status;
+ }
+
+ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
+ {
+ struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+- u8 old_status = *vcdev->status;
++ u8 old_status = vcdev->dma_area->status;
+ struct ccw1 *ccw;
+ int ret;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return;
+
+ /* Write the status to the host. */
+- *vcdev->status = status;
++ vcdev->dma_area->status = status;
+ ccw->cmd_code = CCW_CMD_WRITE_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(status);
+- ccw->cda = (__u32)(unsigned long)vcdev->status;
++ ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status;
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
+ /* Write failed? We assume status is unchanged. */
+ if (ret)
+- *vcdev->status = old_status;
+- kfree(ccw);
++ vcdev->dma_area->status = old_status;
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ }
+
+ static const struct virtio_config_ops virtio_ccw_config_ops = {
+@@ -983,8 +997,8 @@ static void virtio_ccw_release_dev(struc
+ struct virtio_device *dev = dev_to_virtio(_d);
+ struct virtio_ccw_device *vcdev = to_vc_device(dev);
+
+- kfree(vcdev->status);
+- kfree(vcdev->config_block);
++ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
++ sizeof(*vcdev->dma_area));
+ kfree(vcdev);
+ }
+
+@@ -1192,12 +1206,12 @@ static int virtio_ccw_set_transport_rev(
+ struct ccw1 *ccw;
+ int ret;
+
+- ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
++ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ if (!ccw)
+ return -ENOMEM;
+- rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL);
++ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
+ if (!rev) {
+- kfree(ccw);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
+ return -ENOMEM;
+ }
+
+@@ -1227,8 +1241,8 @@ static int virtio_ccw_set_transport_rev(
+ }
+ } while (ret == -EOPNOTSUPP);
+
+- kfree(ccw);
+- kfree(rev);
++ ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
++ ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev));
+ return ret;
+ }
+
+@@ -1245,14 +1259,10 @@ static int virtio_ccw_online(struct ccw_
+ goto out_free;
+ }
+ vcdev->vdev.dev.parent = &cdev->dev;
+- vcdev->config_block = kzalloc(sizeof(*vcdev->config_block),
+- GFP_DMA | GFP_KERNEL);
+- if (!vcdev->config_block) {
+- ret = -ENOMEM;
+- goto out_free;
+- }
+- vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL);
+- if (!vcdev->status) {
++ vcdev->cdev = cdev;
++ vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
++ sizeof(*vcdev->dma_area));
++ if (!vcdev->dma_area) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+@@ -1261,7 +1271,6 @@ static int virtio_ccw_online(struct ccw_
+
+ vcdev->vdev.dev.release = virtio_ccw_release_dev;
+ vcdev->vdev.config = &virtio_ccw_config_ops;
+- vcdev->cdev = cdev;
+ init_waitqueue_head(&vcdev->wait_q);
+ INIT_LIST_HEAD(&vcdev->virtqueues);
+ spin_lock_init(&vcdev->lock);
+@@ -1292,8 +1301,8 @@ out_put:
+ return ret;
+ out_free:
+ if (vcdev) {
+- kfree(vcdev->status);
+- kfree(vcdev->config_block);
++ ccw_device_dma_free(vcdev->cdev, vcdev->dma_area,
++ sizeof(*vcdev->dma_area));
+ }
+ kfree(vcdev);
+ return ret;
diff --git a/patches.suse/virtio-s390-use-vring_create_virtqueue b/patches.suse/virtio-s390-use-vring_create_virtqueue
new file mode 100644
index 0000000000..ab4fb2419b
--- /dev/null
+++ b/patches.suse/virtio-s390-use-vring_create_virtqueue
@@ -0,0 +1,114 @@
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Fri, 26 Apr 2019 20:32:36 +0200
+Subject: virtio/s390: use vring_create_virtqueue
+Git-commit: 3279beac545190ed252cd3df0eb41056537463df
+Patch-mainline: v5.2-rc1
+References: jsc#SLE-6197 FATE#327012 bsc#1140559 LTC#173150
+
+The commit 2a2d1382fe9d ("virtio: Add improved queue allocation API")
+establishes a new way of allocating virtqueues (as a part of the effort
+that taught DMA to virtio rings).
+
+In the future we will want virtio-ccw to use the DMA API as well.
+
+Let us switch from the legacy method of allocating virtqueues to
+vring_create_virtqueue() as the first step into that direction.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Acked-by: Petr Tesarik <ptesarik@suse.com>
+---
+ drivers/s390/virtio/virtio_ccw.c | 30 +++++++++++-------------------
+ 1 file changed, 11 insertions(+), 19 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -111,7 +111,6 @@ struct virtio_rev_info {
+ struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+ int num;
+- void *queue;
+ union {
+ struct vq_info_block s;
+ struct vq_info_block_legacy l;
+@@ -426,7 +425,6 @@ static void virtio_ccw_del_vq(struct vir
+ struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev);
+ struct virtio_ccw_vq_info *info = vq->priv;
+ unsigned long flags;
+- unsigned long size;
+ int ret;
+ unsigned int index = vq->index;
+
+@@ -464,8 +462,6 @@ static void virtio_ccw_del_vq(struct vir
+ ret, index);
+
+ vring_del_virtqueue(vq);
+- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+- free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ kfree(info);
+ }
+@@ -497,8 +493,9 @@ static struct virtqueue *virtio_ccw_setu
+ int err;
+ struct virtqueue *vq = NULL;
+ struct virtio_ccw_vq_info *info;
+- unsigned long size = 0; /* silence the compiler */
++ u64 queue;
+ unsigned long flags;
++ bool may_reduce;
+
+ /* Allocate queue. */
+ info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL);
+@@ -519,33 +516,30 @@ static struct virtqueue *virtio_ccw_setu
+ err = info->num;
+ goto out_err;
+ }
+- size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
+- info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
+- if (info->queue == NULL) {
+- dev_warn(&vcdev->cdev->dev, "no queue\n");
+- err = -ENOMEM;
+- goto out_err;
+- }
++ may_reduce = vcdev->revision > 0;
++ vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN,
++ vdev, true, may_reduce, ctx,
++ virtio_ccw_kvm_notify, callback, name);
+
+- vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev,
+- true, ctx, info->queue, virtio_ccw_kvm_notify,
+- callback, name);
+ if (!vq) {
+ /* For now, we fail if we can't get the requested size. */
+ dev_warn(&vcdev->cdev->dev, "no vq\n");
+ err = -ENOMEM;
+ goto out_err;
+ }
++ /* it may have been reduced */
++ info->num = virtqueue_get_vring_size(vq);
+
+ /* Register it with the host. */
++ queue = virtqueue_get_desc_addr(vq);
+ if (vcdev->revision == 0) {
+- info->info_block->l.queue = (__u64)info->queue;
++ info->info_block->l.queue = queue;
+ info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
+ info->info_block->l.index = i;
+ info->info_block->l.num = info->num;
+ ccw->count = sizeof(info->info_block->l);
+ } else {
+- info->info_block->s.desc = (__u64)info->queue;
++ info->info_block->s.desc = queue;
+ info->info_block->s.index = i;
+ info->info_block->s.num = info->num;
+ info->info_block->s.avail = (__u64)virtqueue_get_avail(vq);
+@@ -575,8 +569,6 @@ out_err:
+ if (vq)
+ vring_del_virtqueue(vq);
+ if (info) {
+- if (info->queue)
+- free_pages_exact(info->queue, size);
+ kfree(info->info_block);
+ }
+ kfree(info);
diff --git a/series.conf b/series.conf
index 3f57f28cf1..6bf2a4d50f 100644
--- a/series.conf
+++ b/series.conf
@@ -11302,6 +11302,7 @@
patches.fixes/0001-USB-serial-console-fix-use-after-free-after-failed-s.patch
patches.fixes/0001-usb-renesas_usbhs-Fix-DMAC-sequence-for-receiving-ze.patch
patches.fixes/0001-usb-usbtest-fix-NULL-pointer-dereference.patch
+ patches.drivers/mei-me-add-gemini-lake-devices-id.patch
patches.suse/msft-hv-1507-Drivers-hv-vmbus-Fix-bugs-in-rescind-handling.patch
patches.drivers/media-dvb-i2c-transfers-over-usb-cannot-be-done-from
patches.drivers/media-s5p-cec-add-NACK-detection-support
@@ -20210,6 +20211,14 @@
patches.fixes/scsi-core-Add-VENDOR_SPECIFIC-sense-code-definitions.patch
patches.drivers/scsi-arcmsr-avoid-do_gettimeofday.patch
patches.drivers/scsi-qla2xxx-Fix-logo-flag-for-qlt_free_session_done.patch
+ patches.suse/s390-remove-the-unused-dma_capable-helper
+ patches.suse/dma-mapping-take-dma_pfn_offset-into-account-in-dma_max_pfn
+ patches.suse/arm64-don-t-override-dma_max_pfn
+ patches.suse/mips-fix-an-off-by-one-in-dma_capable
+ patches.suse/dma-mapping-move-swiotlb-arch-helpers-to-a-new-header
+ patches.suse/dma-mapping-move-dma_mark_clean-to-dma-direct-h
+ patches.suse/dma-direct-add-support-for-allocation-from-zone_dma-and-zone_dma32
+ patches.suse/dma-direct-retry-allocations-using-gfp_dma-for-small-masks
patches.drivers/swiotlb-suppress-warning-when-__GFP_NOWARN-is-set
patches.drivers/dmaengine-dmatest-fix-container_of-member-in-dmatest
patches.drivers/dmaengine-qcom_hidma-check-pending-interrupts
@@ -29594,6 +29603,7 @@
patches.drivers/ice-Fix-incorrect-comment-for-action-type.patch
patches.drivers/ice-Do-not-check-INTEVENT-bit-for-OICR-interrupts.patch
patches.drivers/ice-Fix-insufficient-memory-issue-in-ice_aq_manage_m.patch
+ patches.suse/dma-direct-don-t-retry-allocation-for-no-op-gfp_dma
patches.fixes/block-do-not-use-interruptible-wait-anywhere.patch
patches.fixes/loop-remove-cmd-rq-member.patch
patches.fixes/loop-handle-short-DIO-reads.patch
@@ -30603,6 +30613,7 @@
patches.drivers/0002-dma-mapping-move-dma-configuration-to-bus-infrastruc.patch
patches.drivers/0003-drivers-remove-force-dma-flag-from-buses.patch
patches.fixes/scsi-reduce-use-of-block-bounce-buffers.patch
+ patches.suse/iommu-helper-mark-iommu_is_span_boundary-as-inline
patches.drivers/hwmon-k10temp-add-support-for-stoney-ridge-and-bristol.patch
patches.drivers/hwmon-k10temp-display-both-tctl-and-tdie.patch
patches.drivers/hwmon-ltc2990-Fix-incorrect-conversion-of-negative-t
@@ -39786,6 +39797,7 @@
patches.drivers/nvmet-rdma-fix-possible-bogus-dereference-under-heav.patch
patches.fixes/blk-cgroup-increase-number-of-supported-policies.patch
patches.drivers/mei-ignore-not-found-client-in-the-enumeration.patch
+ patches.drivers/mei-bus-need-to-unlink-client-before-freeing.patch
patches.suse/msft-hv-1753-Tools-hv-Fix-a-bug-in-the-key-delete-code.patch
patches.drivers/misc-hmc6352-fix-potential-Spectre-v1.patch
patches.drivers/0001-fpga-dfl-fme-fix-return-value-check-in-in-pr_mgmt_in.patch
@@ -44749,6 +44761,7 @@
patches.suse/msft-hv-1831-Drivers-hv-vmbus-Check-for-ring-when-getting-debug-i.patch
patches.suse/msft-hv-1832-hv_balloon-avoid-touching-uninitialized-struct-page-.patch
patches.suse/msft-hv-1833-vmbus-fix-subchannel-removal.patch
+ patches.drivers/mei-me-add-denverton-innovation-engine-device-IDs.patch
patches.drivers/char-mwave-fix-potential-Spectre-v1-vulnerability.patch
patches.drivers/mmc-sdhci-iproc-handle-mmc_of_parse-errors-during-pr.patch
patches.drivers/mmc-dw_mmc-bluefield-Fix-the-license-information.patch
@@ -45453,8 +45466,21 @@
patches.arch/s390-setup-set-control-program-code-via-diag-318
patches.arch/s390-pci-improve-bar-check
patches.arch/s390-pci-map-iov-resources
+ patches.suse/s390-add-alignment-hints-to-vector-load-and-store
patches.arch/s390-jump_label-Use-jdd-constraint-on-gcc9.patch
patches.arch/s390-ism-ignore-some-errors-during-deregistration
+ patches.suse/pkey-indicate-old-mkvp-only-if-old-and-current-mkvp-are-different
+ patches.suse/s390-cpum_cf-move-counter-set-controls-to-a-new-header-file
+ patches.suse/s390-cpum_cf-prepare-for-in-kernel-counter-measurements
+ patches.suse/s390-cpum_cf-rename-per-cpu-counter-facility-structure-and-variables
+ patches.suse/s390-cpu_mf-move-struct-cpu_cf_events-and-per-cpu-variable-to-header-file
+ patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_alert-to-obtain-measurement-alerts
+ patches.suse/s390-cpum_cf-add-minimal-in-kernel-interface-for-counter-measurements
+ patches.suse/s390-cpu_mf-add-store-cpu-counter-multiple-instruction-support
+ patches.suse/s390-cpu_mf-replace-stcctm5-with-the-stcctm-function
+ patches.suse/s390-cpum_cf-introduce-kernel_cpumcf_avail-function
+ patches.suse/s390-cpum_cf-add-ctr_stcctm-function
+ patches.suse/s390-cpum_cf_diag-add-support-for-s390-counter-facility-diagnostic-trace
patches.fixes/0001-s390-vfio_ap-link-the-vfio_ap-devices-to-the-vfio_ap.patch
patches.drivers/clocksource-drivers-sun5i-Fail-gracefully-when-clock.patch
patches.drivers/clocksource-drivers-exynos_mct-Move-one-shot-check-f.patch
@@ -46161,6 +46187,7 @@
patches.arch/ARM-imx6q-cpuidle-fix-bug-that-CPU-might-not-wake-up.patch
patches.fixes/s390-vtime-steal-time-exponential-moving-average.patch
patches.fixes/vfio-ccw-only-free-cp-on-final-interrupt
+ patches.suse/s390-cpumf-fix-warning-from-check_processor_id
patches.drm/drm-tegra-hub-Fix-dereference-before-check.patch
patches.drm/drm-Fix-drm_release-and-device-unplug.patch
patches.drm/drm-vgem-fix-use-after-free-when-drm_gem_handle_crea.patch
@@ -46558,9 +46585,12 @@
patches.arch/x86-cpu-hygon-fix-phys_proc_id-calculation-logic-for-multi-die-processors.patch
patches.suse/x86-smpboot-Rename-match_die-to-match_pkg.patch
patches.arch/x86-microcode-fix-the-ancient-deprecated-microcode-loading-method.patch
+ patches.suse/s390-cpum_cf-add-support-for-cpu-mf-svn-6
patches.arch/s390-uv-introduce-guest-side-ultravisor-code
patches.arch/s390-protvirt-add-memory-sharing-for-diag-308-set-store
patches.arch/s390-protvirt-block-kernel-command-line-alteration
+ patches.suse/s390-cpum_cf_diag-add-support-for-cpu-mf-svn-6
+ patches.suse/s390-report-new-cpu-capabilities
patches.arch/s390-pci-mark-command-line-parser-data-_initdata
patches.arch/s390-pci-remove-unused-define
patches.arch/s390-pci-move-everything-irq-related-to-pci_irq-c
@@ -47340,6 +47370,8 @@
patches.drivers/mfd-tps65912-spi-Add-missing-of-table-registration.patch
patches.drivers/mfd-da9063-Fix-OTP-control-register-names-to-match-d.patch
patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch
+ patches.suse/virtio-s390-use-vring_create_virtqueue
+ patches.suse/virtio-s390-dma-support-for-virtio-ccw
patches.suse/userfaultfd-use-RCU-to-free-the-task-struct-when-for.patch
patches.fixes/mm-mincore-c-make-mincore-more-conservative.patch
patches.drivers/rapidio-fix-a-NULL-pointer-dereference-when-create_w.patch
@@ -47549,6 +47581,7 @@
patches.drm/0002-drm-i915-gvt-refine-ggtt-range-validation.patch
patches.drm/0003-drm-i915-gvt-Fix-cmd-length-of-VEB_DI_IECP.patch
patches.drm/drm-msm-fix-fb-references-in-async-update.patch
+ patches.drm/drm-vc4-fix-fb-references-in-async-update.patch
patches.drm/drm-don-t-block-fb-changes-for-async-plane-updates.patch
patches.drm/drm-arm-mali-dp-Add-a-loop-around-the-second-set-CVA.patch
patches.drm/drm-arm-hdlcd-Actually-validate-CRTC-modes.patch
@@ -47675,6 +47708,8 @@
patches.drivers/IB-hfi1-Validate-fault-injection-opcode-user-input.patch
patches.drivers/IB-hfi1-Create-inline-to-get-extended-headers.patch
patches.drivers/net-udp_gso-Allow-TX-timestamp-with-UDP-GSO.patch
+ patches.suse/net-af_iucv-remove-gfp_dma-restriction-for-hipertransport
+ patches.suse/net-af_iucv-build-proper-skbs-for-hipertransport
patches.drivers/net-hns3-Fix-inconsistent-indenting.patch
patches.fixes/tcp-refine-memory-limit-test-in-tcp_fragment.patch
patches.drivers/Bluetooth-Fix-regression-with-minimum-encryption-key.patch
@@ -47710,6 +47745,14 @@
patches.drm/drm-imx-only-send-event-on-crtc-disable-if-kept-disa.patch
patches.fixes/scsi-target-iblock-fix-overrun-in-write-same-emulation
patches.drivers/dmaengine-imx-sdma-remove-BD_INTR-for-channel0.patch
+ patches.suse/s390-mm-force-swiotlb-for-protected-virtualization
+ patches.suse/s390-cio-introduce-dma-pools-to-cio
+ patches.suse/s390-cio-add-basic-protected-virtualization-support
+ patches.suse/s390-airq-use-dma-memory-for-adapter-interrupts
+ patches.suse/virtio-s390-use-cacheline-aligned-airq-bit-vectors
+ patches.suse/virtio-s390-add-indirection-to-indicators-access
+ patches.suse/virtio-s390-use-dma-memory-for-ccw-i-o-and-classic-notifiers
+ patches.suse/virtio-s390-make-airq-summary-indicators-dma
patches.fixes/crypto-ccp-fix-AES-CFB-error-exposed-by-new-test-vec.patch
patches.fixes/crypto-ccp-Fix-3DES-complaint-from-ccp-crypto-module.patch
patches.fixes/crypto-talitos-rename-alternative-AEAD-algos.patch
@@ -47723,6 +47766,7 @@
patches.fixes/crypto-arm64-sha2-ce-correct-digest-for-empty-data-i.patch
patches.fixes/crypto-ghash-fix-unaligned-memory-access-in-ghash_se.patch
patches.fixes/crypto-chacha20poly1305-fix-atomic-sleep-when-using-.patch
+ patches.fixes/crypto-talitos-fix-max-key-size-for-sha384-and-sha51.patch
patches.fixes/lib-scatterlist-Fix-mapping-iterator-when-sg-offset-.patch
patches.fixes/crypto-ccp-Validate-the-the-error-value-used-to-inde.patch
patches.drivers/pwm-stm32-Use-3-cells-of_xlate.patch
@@ -47850,6 +47894,7 @@
patches.fixes/0002-ocfs2-add-locking-filter-debugfs-file.patch
patches.fixes/0003-ocfs2-add-first-lock-wait-time-in-locking_state.patch
patches.arch/kvm-svm-avic-do-not-send-avic-doorbell-to-self
+ patches.suse/s390-cpumf-add-extended-counter-set-definitions-for-model-8561-and-8562
patches.arch/powerpc-watchpoint-Restore-NV-GPRs-while-returning-f.patch
patches.arch/powerpc-mm-drconf-Use-NUMA_NO_NODE-on-failures-inste.patch
patches.arch/powerpc-mm-Fix-node-look-up-with-numa-off-boot.patch
@@ -47876,6 +47921,16 @@
patches.drivers/PCI-Return-error-if-cannot-probe-VF.patch
patches.drivers/PCI-Always-allow-probing-with-driver_override.patch
patches.fixes/PCI-P2PDMA-Fix-missing-check-for-dma_virt_ops.patch
+ patches.drivers/dmaengine-hsu-Revert-set-HSU_CH_MTSR-to-memory-width.patch
+ patches.drivers/clk-qcom-Fix-Wunused-const-variable.patch
+ patches.drivers/clk-tegra210-fix-PLLU-and-PLLU_OUT1.patch
+ patches.drivers/clk-rockchip-Don-t-yell-about-bad-mmc-phases-when-ge.patch
+ patches.drivers/ALSA-hda-realtek-Fixed-Headphone-Mic-can-t-record-on.patch
+ patches.drivers/ALSA-hda-realtek-apply-ALC891-headset-fixup-to-one-D.patch
+ patches.drivers/ALSA-seq-Break-too-long-mutex-context-in-the-write-l.patch
+ patches.drivers/ALSA-hda-hdmi-Remove-duplicated-define.patch
+ patches.drivers/ALSA-hda-hdmi-Fix-i915-reverse-port-pin-mapping.patch
+ patches.drivers/ALSA-hda-Don-t-resume-forcibly-i915-HDMI-DP-codec.patch
# davem/net-next
patches.drivers/cxgb4-Enable-hash-filter-with-offload.patch
@@ -47990,7 +48045,6 @@
patches.fixes/libnvdimm-bus-prevent-duplicate-device_unregister-calls.patch
# out-of-tree patches
- patches.arch/mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-.patch
patches.drivers/firmware-arm_sdei-fix-wrong-of_node_put-in-init-function.patch
patches.drivers/net-mvpp2-fix-condition-for-setting-up-link-interrup.patch
patches.suse/nvme-multipath-round-robin-I-O-policy.patch
@@ -48016,6 +48070,7 @@
patches.arch/powerpc-pseries-Update-SCM-hcall-op-codes-in-hvcall..patch
patches.arch/powerpc-papr_scm-Update-drc_pmem_unbind-to-use-H_SCM.patch
patches.arch/powerpc-papr_scm-Force-a-scm-unbind-if-initial-scm-b.patch
+ patches.arch/mm-nvdimm-add-is_ioremap_addr-and-use-that-to-check-.patch
########################################################
# end of sorted patches
@@ -48143,6 +48198,8 @@
patches.arch/s390-sles15sp1-kmsg-update-2019-01-10.patch
patches.arch/s390-sles15sp1-kmsg-update-2019-03-08.patch
+ patches.arch/s390-dma-provide-proper-ARCH_ZONE_DMA_BITS
+
########################################################
# VM/FS patches
########################################################
@@ -48480,6 +48537,7 @@
##########################################################
# Sound
##########################################################
+ patches.kabi/hda-relaxed_resume-flag-kabi-fix.patch
########################################################
# printk
@@ -48735,6 +48793,10 @@
patches.kabi/kabi-fixup-blk_mq_register_dev.patch
patches.kabi/scsi-cxgbi-kabi-fix-handle-completion-etc
+ patches.kabi/s390-mm-force-swiotlb-for-protected-virtualization
+ patches.kabi/s390-cio-add-basic-protected-virtualization-support
+ patches.kabi/s390-airq-use-dma-memory-for-adapter-interrupts
+ patches.kabi/iommu-helper-mark-iommu_is_span_boundary-as-inline
########################################################
# You'd better have a good reason for adding a patch