Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOlaf Hering <ohering@suse.de>2019-05-16 19:01:16 +0200
committerOlaf Hering <ohering@suse.de>2019-05-16 19:01:16 +0200
commit199b28d60c31246adb32feb4ff33957cfe1bd310 (patch)
tree91ea0745822fe3bc2df24acbe4a5a4a9136554f8
parent375957d1484d9f4d9285dad97b0071e9039688ef (diff)
parent99503d8cd1887f0497e6c5037cc1e504206de437 (diff)
Merge remote-tracking branch 'kerncvs/SLE15' into SLE15-AZURE
-rw-r--r--patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch3
-rw-r--r--patches.arch/powerpc-numa-improve-control-of-topology-updates.patch3
-rw-r--r--patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch3
-rw-r--r--patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch3
-rw-r--r--patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch2
-rw-r--r--patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch2
-rw-r--r--patches.arch/x86-msr-index-cleanup-bit-defines.patch2
-rw-r--r--patches.arch/x86-speculation-consolidate-cpu-whitelists.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-bug_msbds_only.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch3
-rw-r--r--patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch2
-rw-r--r--patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch3
-rw-r--r--patches.arch/x86-speculation-mds-add-smt-warning-message.patch3
-rw-r--r--patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch2
-rw-r--r--patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch2
-rw-r--r--patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch2
-rw-r--r--patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch3
-rw-r--r--patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch3
-rw-r--r--patches.drivers/PCI-Mark-AMD-Stoney-Radeon-R7-GPU-ATS-as-broken.patch43
-rw-r--r--patches.drivers/PCI-Mark-Atheros-AR9462-to-avoid-bus-reset.patch38
-rw-r--r--patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch50
-rw-r--r--patches.drivers/ipmi-ssif-compare-block-number-correctly-for-multi-p.patch2
-rw-r--r--patches.drm/0001-drm-i915-gvt-Fix-mmap-range-check.patch2
-rw-r--r--patches.fixes/CIFS-keep-FileInfo-handle-live-during-oplock-break.patch186
-rw-r--r--patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch3
-rw-r--r--patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch87
-rw-r--r--patches.suse/0003-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch213
-rw-r--r--patches.suse/0004-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch124
-rw-r--r--patches.suse/0006-btrfs-extent-tree-Use-btrfs_ref-to-refactor-add_pinn.patch70
-rw-r--r--patches.suse/0007-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_in.patch370
-rw-r--r--patches.suse/0008-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_fr.patch257
-rw-r--r--patches.suse/0009-btrfs-qgroup-Don-t-scan-leaf-if-we-re-modifying-relo.patch68
-rw-r--r--patches.suse/dccp-do-not-use-ipv6-header-for-ipv4-flow.patch37
-rw-r--r--patches.suse/genetlink-Fix-a-memory-leak-on-error-path.patch45
-rw-r--r--patches.suse/net-aquantia-fix-rx-checksum-offload-for-UDP-TCP-ove.patch39
-rw-r--r--patches.suse/net-rose-fix-a-possible-stack-overflow.patch129
-rw-r--r--patches.suse/net-stmmac-fix-memory-corruption-with-large-MTUs.patch62
-rw-r--r--patches.suse/packets-Always-register-packet-sk-in-the-same-order.patch69
-rw-r--r--patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch24
-rw-r--r--patches.suse/sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch38
-rw-r--r--patches.suse/tcp-do-not-use-ipv6-header-for-ipv4-flow.patch43
-rw-r--r--patches.suse/thunderx-eliminate-extra-calls-to-put_page-for-pages.patch62
-rw-r--r--patches.suse/thunderx-enable-page-recycling-for-non-XDP-case.patch62
-rw-r--r--patches.suse/tun-add-a-missing-rcu_read_unlock-in-error-path.patch29
-rw-r--r--patches.suse/tun-properly-test-for-IFF_UP.patch80
-rw-r--r--patches.suse/vxlan-Don-t-call-gro_cells_destroy-before-device-is-.patch45
-rw-r--r--series.conf70
50 files changed, 2326 insertions, 72 deletions
diff --git a/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch b/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
index ce83ac8841..9f5a29a14b 100644
--- a/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
+++ b/patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
@@ -5,8 +5,7 @@ Subject: [PATCH] powerpc/numa: document topology_updates_enabled, disable by
default
References: bsc#1133584
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Patch-mainline: v5.2-rc1
Git-commit: 558f86493df09f68f79fe056d9028d317a3ce8ab
Changing the NUMA associations for CPUs and memory at runtime is
diff --git a/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch b/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
index f62c6c2bae..3a04a469dd 100644
--- a/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
+++ b/patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
@@ -4,8 +4,7 @@ Date: Thu, 18 Apr 2019 13:56:57 -0500
Subject: [PATCH] powerpc/numa: improve control of topology updates
References: bsc#1133584
-Patch-mainline: queued
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Patch-mainline: v5.2-rc1
Git-commit: 2d4d9b308f8f8dec68f6dbbff18c68ec7c6bd26f
When booted with "topology_updates=no", or when "off" is written to
diff --git a/patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch b/patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch
index 2e49b98154..50b4452a92 100644
--- a/patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch
+++ b/patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch
@@ -4,9 +4,8 @@ Date: Tue, 2 Oct 2018 10:35:59 -0500
Subject: [PATCH] powerpc/pseries: Track LMB nid instead of using device tree
References: bsc#1108270
-Patch-mainline: queued
+Patch-mainline: v5.2-rc1
Git-commit: b2d3b5ee66f2a04a918cc043cec0c9ed3de58f40
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
When removing memory we need to remove the memory from the node
it was added to instead of looking up the node it should be in
diff --git a/patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch b/patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch
index 35a8322a0d..0eb3f926d5 100644
--- a/patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch
+++ b/patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch
@@ -4,9 +4,8 @@ Date: Wed, 16 Jan 2019 14:47:44 -0200
Subject: [PATCH] powerpc/tm: Avoid machine crash on rt_sigreturn()
References: bsc#1118338
-Patch-mainline: queued
+Patch-mainline: v5.2-rc1
Git-commit: e620d45065c7b5b8d6ae11217c09c09380103b83
-Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
There is a kernel crash that happens if rt_sigreturn() is called inside
a transactional block.
diff --git a/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
index ef439ddf46..551ba9ae99 100644
--- a/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
+++ b/patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
@@ -2,7 +2,7 @@ From: Andi Kleen <ak@linux.intel.com>
Date: Fri, 18 Jan 2019 16:50:23 -0800
Subject: x86/kvm: Expose X86_FEATURE_MD_CLEAR to guests
Git-commit: 6c4dbbd14730c43f4ed808a9c42ca41625925c22
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
X86_FEATURE_MD_CLEAR is a new CPUID bit which is set when microcode
diff --git a/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
index 72dc41269e..29ec748a0f 100644
--- a/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
+++ b/patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 27 Feb 2019 12:48:14 +0100
Subject: x86/kvm/vmx: Add MDS protection when L1D Flush is not active
Git-commit: 650b68a0622f933444a6d66936abb3103029413b
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
CPUs which are affected by L1TF and MDS mitigate MDS with the L1D Flush on
diff --git a/patches.arch/x86-msr-index-cleanup-bit-defines.patch b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
index 4fed2b84e7..9f5a73614e 100644
--- a/patches.arch/x86-msr-index-cleanup-bit-defines.patch
+++ b/patches.arch/x86-msr-index-cleanup-bit-defines.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 21 Feb 2019 12:36:50 +0100
Subject: x86/msr-index: Cleanup bit defines
Git-commit: d8eabc37310a92df40d07c5a8afc53cebf996716
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Greg pointed out that speculation related bit defines are using (1 << N)
diff --git a/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
index a8953a02a2..f30478b90b 100644
--- a/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
+++ b/patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 27 Feb 2019 10:10:23 +0100
Subject: x86/speculation: Consolidate CPU whitelists
Git-commit: 36ad35131adacc29b328b9c8b6277a8bf0d6fd5d
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
The CPU vulnerability whitelists have some overlap and there are more
diff --git a/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
index 3b673bacec..fad4ad16e9 100644
--- a/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
+++ b/patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
@@ -2,7 +2,7 @@ From: Andi Kleen <ak@linux.intel.com>
Date: Fri, 18 Jan 2019 16:50:16 -0800
Subject: x86/speculation/mds: Add basic bug infrastructure for MDS
Git-commit: ed5194c2732c8084af9fd159c146ea92bf137128
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Microarchitectural Data Sampling (MDS), is a class of side channel attacks
diff --git a/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
index b6ebab3b9e..de1249b660 100644
--- a/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
+++ b/patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 1 Mar 2019 20:21:08 +0100
Subject: x86/speculation/mds: Add BUG_MSBDS_ONLY
Git-commit: e261f209c3666e842fd645a1e31f001c3a26def9
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
This bug bit is set on CPUs which are only affected by Microarchitectural
diff --git a/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
index d3303c9f25..dec6360c5c 100644
--- a/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
+++ b/patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Tue, 2 Apr 2019 09:59:33 -0500
Subject: x86/speculation/mds: Add mds=full,nosmt cmdline option
-Git-repo: tip/tip
Git-commit: d71eb0ce109a124b0fa714832823b9452f2762cf
-Patch-mainline: Queued in a subsystem tree
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Add the mds=full,nosmt cmdline option. This is like mds=full, but with
diff --git a/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
index 3d563bb801..a92f385657 100644
--- a/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
+++ b/patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Feb 2019 23:13:06 +0100
Subject: x86/speculation/mds: Add mds_clear_cpu_buffers()
Git-commit: 6a9e529272517755904b7afa639f6db59ddb793e
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
The Microarchitectural Data Sampling (MDS) vulernabilities are mitigated by
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
index 0cc8bfbf56..4854312bca 100644
--- a/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
+++ b/patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Feb 2019 22:04:08 +0100
Subject: x86/speculation/mds: Add mitigation control for MDS
Git-commit: bc1241700acd82ec69fde98c5763ce51086269f8
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Now that the mitigations are in place, add a command line parameter to
diff --git a/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
index 4f0e1f0f07..67da3599f5 100644
--- a/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
+++ b/patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 20 Feb 2019 09:40:40 +0100
Subject: x86/speculation/mds: Add mitigation mode VMWERV
Git-commit: 22dd8365088b6403630b82423cf906491859b65e
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
In virtualized environments it can happen that the host has the microcode
diff --git a/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
index 4405b7c895..3f3718f858 100644
--- a/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
+++ b/patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Wed, 17 Apr 2019 16:39:02 -0500
Subject: x86/speculation/mds: Add 'mitigations=' support for MDS
-Git-repo: tip/tip
Git-commit: 5c14068f87d04adc73ba3f41c2a303d3c3d1fa12
-Patch-mainline: Queued in a subsystem tree
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Add MDS to the new 'mitigations=' cmdline option.
diff --git a/patches.arch/x86-speculation-mds-add-smt-warning-message.patch b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
index 0ba3c2f544..28772aceeb 100644
--- a/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
+++ b/patches.arch/x86-speculation-mds-add-smt-warning-message.patch
@@ -2,8 +2,7 @@ From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Tue, 2 Apr 2019 10:00:51 -0500
Subject: x86/speculation/mds: Add SMT warning message
Git-commit: 39226ef02bfb43248b7db12a4fdccb39d95318e3
-Git-repo: tip/tip
-Patch-mainline: Queued in a subsystem tree
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
MDS is vulnerable with SMT. Make that clear with a one-time printk
diff --git a/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
index 2a1c7ad669..f0fa6a4e0b 100644
--- a/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
+++ b/patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Feb 2019 22:51:43 +0100
Subject: x86/speculation/mds: Add sysfs reporting for MDS
Git-commit: 8a4b06d391b0a42a373808979b5028f5c84d9c6a
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Add the sysfs reporting file for MDS. It exposes the vulnerability and
diff --git a/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
index 8501ae91ae..38c5a27958 100644
--- a/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
+++ b/patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Feb 2019 23:42:51 +0100
Subject: x86/speculation/mds: Clear CPU buffers on exit to user
Git-commit: 04dcbdb8057827b043b3c71aa397c4c63e67d086
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Add a static key which controls the invocation of the CPU buffer clear
diff --git a/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
index e55065d925..e89ae29353 100644
--- a/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
+++ b/patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
@@ -2,7 +2,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 18 Feb 2019 23:04:01 +0100
Subject: x86/speculation/mds: Conditionally clear CPU buffers on idle entry
Git-commit: 07f07f55a29cb705e221eda7894dd67ab81ef343
-Patch-mainline: v5.1-rc1
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
Add a static key which controls the invocation of the CPU buffer clear
diff --git a/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
index c9762dddd2..8ad7e115fb 100644
--- a/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
+++ b/patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
@@ -1,9 +1,8 @@
From: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Date: Fri, 12 Apr 2019 17:50:58 -0400
Subject: x86/speculation/mds: Print SMT vulnerable on MSBDS with mitigations off
-Git-repo: tip/tip
Git-commit: e2c3c94788b08891dcf3dbe608f9880523ecd71b
-Patch-mainline: Queued in a subsystem tree
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
This code is only for CPUs which are affected by MSBDS, but are *not*
diff --git a/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
index cfcda3d0e4..90fe611911 100644
--- a/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
+++ b/patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
@@ -1,9 +1,8 @@
From: Josh Poimboeuf <jpoimboe@redhat.com>
Date: Tue, 2 Apr 2019 10:00:14 -0500
Subject: x86/speculation: Move arch_smt_update() call to after mitigation decisions
-Git-repo: tip/tip
Git-commit: 7c3658b20194a5b3209a143f63bc9c643c6a3ae2
-Patch-mainline: Queued in a subsystem tree
+Patch-mainline: v5.2-rc1
References: bsc#1111331, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130
arch_smt_update() now has a dependency on both Spectre v2 and MDS
diff --git a/patches.drivers/PCI-Mark-AMD-Stoney-Radeon-R7-GPU-ATS-as-broken.patch b/patches.drivers/PCI-Mark-AMD-Stoney-Radeon-R7-GPU-ATS-as-broken.patch
new file mode 100644
index 0000000000..1cf26f86de
--- /dev/null
+++ b/patches.drivers/PCI-Mark-AMD-Stoney-Radeon-R7-GPU-ATS-as-broken.patch
@@ -0,0 +1,43 @@
+From d28ca864c493637f3c957f4ed9348a94fca6de60 Mon Sep 17 00:00:00 2001
+From: Nikolai Kostrigin <nickel@altlinux.org>
+Date: Mon, 8 Apr 2019 13:37:25 +0300
+Subject: [PATCH] PCI: Mark AMD Stoney Radeon R7 GPU ATS as broken
+Git-commit: d28ca864c493637f3c957f4ed9348a94fca6de60
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+ATS is broken on the Radeon R7 GPU (at least for Stoney Ridge based laptop)
+and causes IOMMU stalls and system failure. Disable ATS on these devices
+to make them usable again with IOMMU enabled.
+
+Thanks to Joerg Roedel <jroedel@suse.de> for help.
+
+[bhelgaas: In the email thread mentioned below, Alex suspects the real
+problem is in sbios or iommu, so it may affect only certain systems, and it
+may affect other devices in those systems as well. However, per Joerg we
+lack the ability to debug further, so this quirk is the best we can do for
+now.]
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=194521
+Link: https://lore.kernel.org/lkml/20190408103725.30426-1-nickel@altlinux.org
+Fixes: 9b44b0b09dec ("PCI: Mark AMD Stoney GPU ATS as broken")
+Signed-off-by: Nikolai Kostrigin <nickel@altlinux.org>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Joerg Roedel <jroedel@suse.de>
+Cc: stable@vger.kernel.org
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/pci/quirks.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4983,6 +4983,7 @@ static void quirk_no_ats(struct pci_dev
+
+ /* AMD Stoney platform GPU */
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats);
+ #endif /* CONFIG_PCI_ATS */
+
+ static void quirk_no_ext_tags(struct pci_dev *pdev)
diff --git a/patches.drivers/PCI-Mark-Atheros-AR9462-to-avoid-bus-reset.patch b/patches.drivers/PCI-Mark-Atheros-AR9462-to-avoid-bus-reset.patch
new file mode 100644
index 0000000000..39d5025202
--- /dev/null
+++ b/patches.drivers/PCI-Mark-Atheros-AR9462-to-avoid-bus-reset.patch
@@ -0,0 +1,38 @@
+From 6afb7e26978da5e86e57e540fdce65c8b04f398a Mon Sep 17 00:00:00 2001
+From: James Prestwood <james.prestwood@linux.intel.com>
+Date: Mon, 7 Jan 2019 13:32:48 -0800
+Subject: [PATCH] PCI: Mark Atheros AR9462 to avoid bus reset
+Git-commit: 6afb7e26978da5e86e57e540fdce65c8b04f398a
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+When using PCI passthrough with this device, the host machine locks up
+completely when starting the VM, requiring a hard reboot. Add a quirk to
+avoid bus resets on this device.
+
+Fixes: c3e59ee4e766 ("PCI: Mark Atheros AR93xx to avoid bus reset")
+Link: https://lore.kernel.org/linux-pci/20190107213248.3034-1-james.prestwood@linux.intel.com
+Signed-off-by: James Prestwood <james.prestwood@linux.intel.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org # v3.14+
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/pci/quirks.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 68bee35fcafa..9b9e28854a58 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -3408,6 +3408,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
+ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
++DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
+
+ /*
+ * Root port on some Cavium CN8xxx chips do not successfully complete a bus
+--
+2.16.4
+
diff --git a/patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch b/patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch
new file mode 100644
index 0000000000..9ad535a87b
--- /dev/null
+++ b/patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch
@@ -0,0 +1,50 @@
+From d3f48ec0954c6aac736ab21c34a35d7554409112 Mon Sep 17 00:00:00 2001
+From: Brian Masney <masneyb@onstation.org>
+Date: Wed, 24 Apr 2019 05:25:03 -0400
+Subject: [PATCH] backlight: lm3630a: Return 0 on success in update_status functions
+Git-commit: d3f48ec0954c6aac736ab21c34a35d7554409112
+Patch-mainline: v5.2-rc1
+References: bsc#1051510
+
+lm3630a_bank_a_update_status() and lm3630a_bank_b_update_status()
+both return the brightness value if the brightness was successfully
+updated. Writing to these attributes via sysfs would cause a 'Bad
+address' error to be returned. These functions should return 0 on
+success, so let's change it to correct that error.
+
+Fixes: 28e64a68a2ef ("backlight: lm3630: apply chip revision")
+Signed-off-by: Brian Masney <masneyb@onstation.org>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Acked-by: Daniel Thompson <daniel.thompson@linaro.org>
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Acked-by: Takashi Iwai <tiwai@suse.de>
+
+---
+ drivers/video/backlight/lm3630a_bl.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index 2030a6b77a09..ef2553f452ca 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -201,7 +201,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+ LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+- return bl->props.brightness;
++ return 0;
+
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access\n");
+@@ -278,7 +278,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+ LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+- return bl->props.brightness;
++ return 0;
+
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
+--
+2.16.4
+
diff --git a/patches.drivers/ipmi-ssif-compare-block-number-correctly-for-multi-p.patch b/patches.drivers/ipmi-ssif-compare-block-number-correctly-for-multi-p.patch
index 60159f93ca..db21f5422c 100644
--- a/patches.drivers/ipmi-ssif-compare-block-number-correctly-for-multi-p.patch
+++ b/patches.drivers/ipmi-ssif-compare-block-number-correctly-for-multi-p.patch
@@ -4,7 +4,7 @@ Date: Wed, 24 Apr 2019 11:50:43 +0000
Subject: [PATCH] ipmi:ssif: compare block number correctly for multi-part return messages
Git-commit: 55be8658c7e2feb11a5b5b33ee031791dbd23a69
Patch-mainline: v5.2-rc1
-References: bsc#1051510
+References: bsc#1051510, bsc#1135120
According to ipmi spec, block number is a number that is incremented,
starting with 0, for each new block of message data returned using the
diff --git a/patches.drm/0001-drm-i915-gvt-Fix-mmap-range-check.patch b/patches.drm/0001-drm-i915-gvt-Fix-mmap-range-check.patch
index b92d55bb53..643bb857f6 100644
--- a/patches.drm/0001-drm-i915-gvt-Fix-mmap-range-check.patch
+++ b/patches.drm/0001-drm-i915-gvt-Fix-mmap-range-check.patch
@@ -4,7 +4,7 @@ Date: Fri, 11 Jan 2019 13:58:53 +0800
Subject: drm/i915/gvt: Fix mmap range check
Git-commit: 51b00d8509dc69c98740da2ad07308b630d3eb7d
Patch-mainline: v5.0-rc3
-References: bsc#1120902
+References: bsc#1120902, CVE-2019-11085, bsc#1135278
This is to fix missed mmap range check on vGPU bar2 region
and only allow to map vGPU allocated GMADDR range, which means
diff --git a/patches.fixes/CIFS-keep-FileInfo-handle-live-during-oplock-break.patch b/patches.fixes/CIFS-keep-FileInfo-handle-live-during-oplock-break.patch
new file mode 100644
index 0000000000..39873058a0
--- /dev/null
+++ b/patches.fixes/CIFS-keep-FileInfo-handle-live-during-oplock-break.patch
@@ -0,0 +1,186 @@
+From: Aurelien Aptel <aaptel@suse.com>
+Date: Fri, 29 Mar 2019 10:49:12 +0100
+Subject: [PATCH] CIFS: keep FileInfo handle live during oplock break
+Git-commit: b98749cac4a695f084a5ff076f4510b23e353ecd
+References: bsc#1106284, bsc#1131565
+Patch-mainline: v5.1-rc6
+
+In the oplock break handler, writing pending changes from pages puts
+the FileInfo handle. If the refcount reaches zero it closes the handle
+and waits for any oplock break handler to return, thus causing a deadlock.
+
+To prevent this situation:
+
+* We add a wait flag to cifsFileInfo_put() to decide whether we should
+ wait for running/pending oplock break handlers
+
+* We keep an additionnal reference of the SMB FileInfo handle so that
+ for the rest of the handler putting the handle won't close it.
+ - The ref is bumped everytime we queue the handler via the
+ cifs_queue_oplock_break() helper.
+ - The ref is decremented at the end of the handler
+
+This bug was triggered by xfstest 464.
+
+Also important fix to address the various reports of
+oops in smb2_push_mandatory_locks
+
+Signed-off-by: Aurelien Aptel <aaptel@suse.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+CC: Stable <stable@vger.kernel.org>
+---
+ fs/cifs/cifsglob.h | 2 ++
+ fs/cifs/file.c | 30 +++++++++++++++++++++++++-----
+ fs/cifs/misc.c | 25 +++++++++++++++++++++++--
+ fs/cifs/smb2misc.c | 6 +++---
+ 4 files changed, 53 insertions(+), 10 deletions(-)
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 5b18d4585740..585ad3207cb1 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -1333,6 +1333,7 @@ cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+ }
+
+ struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_hdlr);
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
+
+ #define CIFS_CACHE_READ_FLG 1
+@@ -1855,6 +1856,7 @@ GLOBAL_EXTERN spinlock_t gidsidlock;
+ #endif /* CONFIG_CIFS_ACL */
+
+ void cifs_oplock_break(struct work_struct *work);
++void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
+
+ extern const struct slow_work_ops cifs_oplock_break_ops;
+ extern struct workqueue_struct *cifsiod_wq;
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 89006e044973..9c0ccc06d172 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -360,12 +360,30 @@ cifsFileInfo_get(struct cifsFileInfo *cifs_file)
+ return cifs_file;
+ }
+
+-/*
+- * Release a reference on the file private data. This may involve closing
+- * the filehandle out on the server. Must be called without holding
+- * tcon->open_file_lock and cifs_file->file_info_lock.
++/**
++ * cifsFileInfo_put - release a reference of file priv data
++ *
++ * Always potentially wait for oplock handler. See _cifsFileInfo_put().
+ */
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
++{
++ _cifsFileInfo_put(cifs_file, true);
++}
++
++/**
++ * _cifsFileInfo_put - release a reference of file priv data
++ *
++ * This may involve closing the filehandle @cifs_file out on the
++ * server. Must be called without holding tcon->open_file_lock and
++ * cifs_file->file_info_lock.
++ *
++ * If @wait_for_oplock_handler is true and we are releasing the last
++ * reference, wait for any running oplock break handler of the file
++ * and cancel any pending one. If calling this function from the
++ * oplock break handler, you need to pass false.
++ *
++ */
++void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
+ {
+ struct inode *inode = d_inode(cifs_file->dentry);
+ struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
+@@ -414,7 +432,8 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
+
+ spin_unlock(&tcon->open_file_lock);
+
+- oplock_break_cancelled = cancel_work_sync(&cifs_file->oplock_break);
++ oplock_break_cancelled = wait_oplock_handler ?
++ cancel_work_sync(&cifs_file->oplock_break) : false;
+
+ if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+@@ -4603,6 +4622,7 @@ void cifs_oplock_break(struct work_struct *work)
+ cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ }
++ _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
+ cifs_done_oplock_break(cinode);
+ }
+
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index bee203055b30..1e1626a2cfc3 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -501,8 +501,7 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &pCifsInode->flags);
+
+- queue_work(cifsoplockd_wq,
+- &netfile->oplock_break);
++ cifs_queue_oplock_break(netfile);
+ netfile->oplock_break_cancelled = false;
+
+ spin_unlock(&tcon->open_file_lock);
+@@ -607,6 +606,28 @@ void cifs_put_writer(struct cifsInodeInfo *cinode)
+ spin_unlock(&cinode->writers_lock);
+ }
+
++/**
++ * cifs_queue_oplock_break - queue the oplock break handler for cfile
++ *
++ * This function is called from the demultiplex thread when it
++ * receives an oplock break for @cfile.
++ *
++ * Assumes the tcon->open_file_lock is held.
++ * Assumes cfile->file_info_lock is NOT held.
++ */
++void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
++{
++ /*
++ * Bump the handle refcount now while we hold the
++ * open_file_lock to enforce the validity of it for the oplock
++ * break handler. The matching put is done at the end of the
++ * handler.
++ */
++ cifsFileInfo_get(cfile);
++
++ queue_work(cifsoplockd_wq, &cfile->oplock_break);
++}
++
+ void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
+ {
+ clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 0e3570e40ff8..e311f58dc1c8 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -555,7 +555,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
+ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+
+- queue_work(cifsoplockd_wq, &cfile->oplock_break);
++ cifs_queue_oplock_break(cfile);
+ kfree(lw);
+ return true;
+ }
+@@ -712,8 +712,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+ &cinode->flags);
+ spin_unlock(&cfile->file_info_lock);
+- queue_work(cifsoplockd_wq,
+- &cfile->oplock_break);
++
++ cifs_queue_oplock_break(cfile);
+
+ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+--
+2.16.4
+
+
diff --git a/patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch b/patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
index 67fa11cf5e..da6a698744 100644
--- a/patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
+++ b/patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
@@ -3,8 +3,7 @@ Date: Tue, 30 Apr 2019 18:57:09 +0200
Subject: [PATCH] nvme-multipath: split bios with the ns_head bio_set before
submitting
Git-commit: 525aa5a705d86e193726ee465d1a975265fabf19
-Git-repo: git://git.infradead.org/nvme.git
-Patch-Mainline: queued in subsystem maintainer tree
+Patch-Mainline: v5.2-rc1
References: bsc#1103259, bsc#1131673
If the bio is moved to a different queue via blk_steal_bios() and
diff --git a/patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch b/patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch
new file mode 100644
index 0000000000..6eac1cfa99
--- /dev/null
+++ b/patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch
@@ -0,0 +1,87 @@
+From 965fe8a6e29ede784cb38b97bb894aac1e8337a6 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Fri, 10 May 2019 12:45:05 +0800
+Patch-mainline: Submitted, 10 May 2019
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Subject: [PATCH] btrfs: extent-tree: Fix a bug that btrfs is unable to add
+ pinned bytes
+
+Commit ddf30cf03fb5 ("btrfs: extent-tree: Use btrfs_ref to refactor
+add_pinned_bytes()") refactored add_pinned_bytes(), but during that
+refactor, there are two callers which add the pinned bytes instead
+of subtracting.
+
+That refactor misses those two caller, causing incorrect pinned bytes
+calculation and resulting unexpected ENOSPC error.
+
+Fix it by refactoring add_pinned_bytes() to add_pinned_bytes() and
+sub_pinned_bytes() to explicitly show what we're doing.
+
+Reported-by: kernel test robot <rong.a.chen@intel.com>
+Fixes: ddf30cf03fb5 ("btrfs: extent-tree: Use btrfs_ref to refactor add_pinned_bytes()")
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/extent-tree.c | 36 ++++++++++++++++++++++++------------
+ 1 file changed, 24 insertions(+), 12 deletions(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -767,25 +767,37 @@ static struct btrfs_space_info *__find_s
+ return NULL;
+ }
+
++static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
++{
++ if (ref->type == BTRFS_REF_METADATA) {
++ if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
++ return BTRFS_BLOCK_GROUP_SYSTEM;
++ else
++ return BTRFS_BLOCK_GROUP_METADATA;
++ }
++ return BTRFS_BLOCK_GROUP_DATA;
++}
++
+ static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_ref *ref)
+ {
+ struct btrfs_space_info *space_info;
+- s64 num_bytes = -ref->len;
+- u64 flags;
++ u64 flags = generic_ref_to_space_flags(ref);
+
+- if (ref->type == BTRFS_REF_METADATA) {
+- if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
+- flags = BTRFS_BLOCK_GROUP_SYSTEM;
+- else
+- flags = BTRFS_BLOCK_GROUP_METADATA;
+- } else {
+- flags = BTRFS_BLOCK_GROUP_DATA;
+- }
++ space_info = __find_space_info(fs_info, flags);
++ ASSERT(space_info);
++ percpu_counter_add(&space_info->total_bytes_pinned, ref->len);
++}
++
++static void sub_pinned_bytes(struct btrfs_fs_info *fs_info,
++ struct btrfs_ref *ref)
++{
++ struct btrfs_space_info *space_info;
++ u64 flags = generic_ref_to_space_flags(ref);
+
+ space_info = __find_space_info(fs_info, flags);
+ ASSERT(space_info);
+- percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
++ percpu_counter_add(&space_info->total_bytes_pinned, -ref->len);
+ }
+
+ /*
+@@ -2129,7 +2141,7 @@ int btrfs_inc_extent_ref(struct btrfs_tr
+ &old_ref_mod, &new_ref_mod);
+
+ if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+- add_pinned_bytes(fs_info, generic_ref);
++ sub_pinned_bytes(fs_info, generic_ref);
+
+ return ret;
+ }
diff --git a/patches.suse/0003-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch b/patches.suse/0003-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
new file mode 100644
index 0000000000..7b7a9005e9
--- /dev/null
+++ b/patches.suse/0003-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
@@ -0,0 +1,213 @@
+From ed4f255b9bacb774c99ded17647f138c3f61546d Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:31 +0800
+Git-commit: ed4f255b9bacb774c99ded17647f138c3f61546d
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 3/9] btrfs: delayed-ref: Use btrfs_ref to refactor
+ btrfs_add_delayed_tree_ref()
+
+btrfs_add_delayed_tree_ref() has a longer and longer parameter list, and
+some callers like btrfs_inc_extent_ref() are using @owner as level for
+delayed tree ref.
+
+Instead of making the parameter list longer, use btrfs_ref to refactor
+it, so each parameter assignment should be self-explaining without dirty
+level/owner trick, and provides the basis for later refactoring.
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 24 +++++++++++++++++-------
+ fs/btrfs/delayed-ref.h | 3 +--
+ fs/btrfs/extent-tree.c | 48 ++++++++++++++++++++++++++++--------------------
+ 3 files changed, 46 insertions(+), 29 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -723,8 +723,7 @@ static void init_delayed_ref_common(stru
+ */
+ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+- u64 bytenr, u64 num_bytes, u64 parent,
+- u64 ref_root, int level, int action,
++ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op,
+ int *old_ref_mod, int *new_ref_mod)
+ {
+@@ -733,10 +732,18 @@ int btrfs_add_delayed_tree_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
+- bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
++ bool is_system;
++ int action = generic_ref->action;
++ int level = generic_ref->tree_ref.level;
+ int ret;
++ u64 bytenr = generic_ref->bytenr;
++ u64 num_bytes = generic_ref->len;
++ u64 parent = generic_ref->parent;
+ u8 ref_type;
+
++ is_system = (generic_ref->real_root == BTRFS_CHUNK_TREE_OBJECTID);
++
++ ASSERT(generic_ref->type == BTRFS_REF_METADATA && generic_ref->action);
+ BUG_ON(extent_op && extent_op->is_data);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
+ if (!ref)
+@@ -747,8 +754,8 @@ int btrfs_add_delayed_tree_ref(struct bt
+ else
+ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
+- ref_root, action, ref_type);
+- ref->root = ref_root;
++ generic_ref->tree_ref.root, action, ref_type);
++ ref->root = generic_ref->tree_ref.root;
+ ref->parent = parent;
+ ref->level = level;
+
+@@ -757,14 +764,17 @@ int btrfs_add_delayed_tree_ref(struct bt
+ goto free_ref;
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+- is_fstree(ref_root)) {
++ is_fstree(generic_ref->real_root) &&
++ is_fstree(generic_ref->tree_ref.root) &&
++ !generic_ref->skip_qgroup) {
+ record = kzalloc(sizeof(*record), GFP_NOFS);
+ if (!record)
+ goto free_head_ref;
+ }
+
+ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
+- ref_root, 0, action, false, is_system);
++ generic_ref->tree_ref.root, 0, action, false,
++ is_system);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -346,8 +346,7 @@ static inline void btrfs_put_delayed_ref
+
+ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+- u64 bytenr, u64 num_bytes, u64 parent,
+- u64 ref_root, int level, int action,
++ struct btrfs_ref *generic_ref,
+ struct btrfs_delayed_extent_op *extent_op,
+ int *old_ref_mod, int *new_ref_mod);
+ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2113,18 +2113,20 @@ int btrfs_inc_extent_ref(struct btrfs_tr
+ u64 bytenr, u64 num_bytes, u64 parent,
+ u64 root_objectid, u64 owner, u64 offset)
+ {
++ struct btrfs_ref generic_ref = { 0 };
+ int old_ref_mod, new_ref_mod;
+ int ret;
+
+ BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
+ root_objectid == BTRFS_TREE_LOG_OBJECTID);
+
++ btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_REF, bytenr,
++ num_bytes, parent);
++ generic_ref.real_root = root_objectid;
+ if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+- num_bytes, parent,
+- root_objectid, (int)owner,
+- BTRFS_ADD_DELAYED_REF, NULL,
+- &old_ref_mod, &new_ref_mod);
++ btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
++ NULL, &old_ref_mod, &new_ref_mod);
+ } else {
+ ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ num_bytes, parent,
+@@ -7121,18 +7123,20 @@ void btrfs_free_tree_block(struct btrfs_
+ u64 parent, int last_ref)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
++ struct btrfs_ref generic_ref = { 0 };
+ int pin = 1;
+ int ret;
+
++ btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF,
++ buf->start, buf->len, parent);
++ btrfs_init_tree_ref(&generic_ref, btrfs_header_level(buf),
++ root->root_key.objectid);
++
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
+ int old_ref_mod, new_ref_mod;
+
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, buf->start,
+- buf->len, parent,
+- root->root_key.objectid,
+- btrfs_header_level(buf),
+- BTRFS_DROP_DELAYED_REF, NULL,
+- &old_ref_mod, &new_ref_mod);
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref, NULL,
++ &old_ref_mod, &new_ref_mod);
+ BUG_ON(ret); /* -ENOMEM */
+ pin = old_ref_mod >= 0 && new_ref_mod < 0;
+ }
+@@ -7183,6 +7187,7 @@ int btrfs_free_extent(struct btrfs_trans
+ u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+ u64 owner, u64 offset)
+ {
++ struct btrfs_ref generic_ref = { 0 };
+ int old_ref_mod, new_ref_mod;
+ int ret;
+
+@@ -7190,6 +7195,9 @@ int btrfs_free_extent(struct btrfs_trans
+ return 0;
+
+
++ btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, bytenr,
++ num_bytes, parent);
++ generic_ref.real_root = root_objectid;
+ /*
+ * tree log blocks never actually go into the extent allocation
+ * tree, just update pinning info and exit early.
+@@ -7201,11 +7209,9 @@ int btrfs_free_extent(struct btrfs_trans
+ old_ref_mod = new_ref_mod = 0;
+ ret = 0;
+ } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+- num_bytes, parent,
+- root_objectid, (int)owner,
+- BTRFS_DROP_DELAYED_REF, NULL,
+- &old_ref_mod, &new_ref_mod);
++ btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
++ NULL, &old_ref_mod, &new_ref_mod);
+ } else {
+ ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+ num_bytes, parent,
+@@ -8335,6 +8341,7 @@ struct extent_buffer *btrfs_alloc_tree_b
+ struct btrfs_block_rsv *block_rsv;
+ struct extent_buffer *buf;
+ struct btrfs_delayed_extent_op *extent_op;
++ struct btrfs_ref generic_ref = { 0 };
+ u64 flags = 0;
+ int ret;
+ u32 blocksize = fs_info->nodesize;
+@@ -8388,10 +8395,11 @@ struct extent_buffer *btrfs_alloc_tree_b
+ extent_op->is_data = false;
+ extent_op->level = level;
+
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, ins.objectid,
+- ins.offset, parent,
+- root_objectid, level,
+- BTRFS_ADD_DELAYED_EXTENT,
++ btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
++ ins.objectid, ins.offset, parent);
++ generic_ref.real_root = root->root_key.objectid;
++ btrfs_init_tree_ref(&generic_ref, level, root_objectid);
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
+ extent_op, NULL, NULL);
+ if (ret)
+ goto out_free_delayed;
diff --git a/patches.suse/0004-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch b/patches.suse/0004-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
new file mode 100644
index 0000000000..cefdb23914
--- /dev/null
+++ b/patches.suse/0004-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
@@ -0,0 +1,124 @@
+From 76675593b69f2fcd57e24d9dd2a9b278f0130d0b Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:32 +0800
+Git-commit: 76675593b69f2fcd57e24d9dd2a9b278f0130d0b
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 4/9] btrfs: delayed-ref: Use btrfs_ref to refactor
+ btrfs_add_delayed_data_ref()
+
+Just like btrfs_add_delayed_tree_ref(), use btrfs_ref to refactor
+btrfs_add_delayed_data_ref().
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/delayed-ref.c | 16 ++++++++++++----
+ fs/btrfs/delayed-ref.h | 4 +---
+ fs/btrfs/extent-tree.c | 22 ++++++++++------------
+ 3 files changed, 23 insertions(+), 19 deletions(-)
+
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -815,9 +815,7 @@ free_ref:
+ */
+ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+- u64 bytenr, u64 num_bytes,
+- u64 parent, u64 ref_root,
+- u64 owner, u64 offset, u64 reserved, int action,
++ struct btrfs_ref *generic_ref, u64 reserved,
+ int *old_ref_mod, int *new_ref_mod)
+ {
+ struct btrfs_delayed_data_ref *ref;
+@@ -825,9 +823,17 @@ int btrfs_add_delayed_data_ref(struct bt
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_qgroup_extent_record *record = NULL;
+ int qrecord_inserted;
++ int action = generic_ref->action;
+ int ret;
++ u64 bytenr = generic_ref->bytenr;
++ u64 num_bytes = generic_ref->len;
++ u64 parent = generic_ref->parent;
++ u64 ref_root = generic_ref->data_ref.ref_root;
++ u64 owner = generic_ref->data_ref.ino;
++ u64 offset = generic_ref->data_ref.offset;
+ u8 ref_type;
+
++ ASSERT(generic_ref->type == BTRFS_REF_DATA && action);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
+ if (!ref)
+ return -ENOMEM;
+@@ -851,7 +857,9 @@ int btrfs_add_delayed_data_ref(struct bt
+ }
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+- is_fstree(ref_root)) {
++ is_fstree(ref_root) &&
++ is_fstree(generic_ref->real_root) &&
++ !generic_ref->skip_qgroup) {
+ record = kzalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -351,9 +351,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ int *old_ref_mod, int *new_ref_mod);
+ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+- u64 bytenr, u64 num_bytes,
+- u64 parent, u64 ref_root,
+- u64 owner, u64 offset, u64 reserved, int action,
++ struct btrfs_ref *generic_ref, u64 reserved,
+ int *old_ref_mod, int *new_ref_mod);
+ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+ struct btrfs_trans_handle *trans,
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2128,10 +2128,8 @@ int btrfs_inc_extent_ref(struct btrfs_tr
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
+ NULL, &old_ref_mod, &new_ref_mod);
+ } else {
+- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+- num_bytes, parent,
+- root_objectid, owner, offset,
+- 0, BTRFS_ADD_DELAYED_REF,
++ btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
++ ret = btrfs_add_delayed_data_ref(fs_info, trans, &generic_ref, 0,
+ &old_ref_mod, &new_ref_mod);
+ }
+
+@@ -7213,10 +7211,8 @@ int btrfs_free_extent(struct btrfs_trans
+ ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
+ NULL, &old_ref_mod, &new_ref_mod);
+ } else {
+- ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+- num_bytes, parent,
+- root_objectid, owner, offset,
+- 0, BTRFS_DROP_DELAYED_REF,
++ btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
++ ret = btrfs_add_delayed_data_ref(fs_info, trans, &generic_ref, 0,
+ &old_ref_mod, &new_ref_mod);
+ }
+
+@@ -8167,14 +8163,16 @@ int btrfs_alloc_reserved_file_extent(str
+ struct btrfs_key *ins)
+ {
+ struct btrfs_fs_info *fs_info = trans->fs_info;
++ struct btrfs_ref generic_ref = { 0 };
+ int ret;
+
+ BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
+
+- ret = btrfs_add_delayed_data_ref(fs_info, trans, ins->objectid,
+- ins->offset, 0, root_objectid, owner,
+- offset, ram_bytes,
+- BTRFS_ADD_DELAYED_EXTENT, NULL, NULL);
++ btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
++ ins->objectid, ins->offset, 0);
++ btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
++ ret = btrfs_add_delayed_data_ref(fs_info, trans, &generic_ref,
++ ram_bytes, NULL, NULL);
+ return ret;
+ }
+
diff --git a/patches.suse/0006-btrfs-extent-tree-Use-btrfs_ref-to-refactor-add_pinn.patch b/patches.suse/0006-btrfs-extent-tree-Use-btrfs_ref-to-refactor-add_pinn.patch
new file mode 100644
index 0000000000..7eaad9adcd
--- /dev/null
+++ b/patches.suse/0006-btrfs-extent-tree-Use-btrfs_ref-to-refactor-add_pinn.patch
@@ -0,0 +1,70 @@
+From ddf30cf03fb53b9a0ad0f355a69dbedf416edde9 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:34 +0800
+Git-commit: ddf30cf03fb53b9a0ad0f355a69dbedf416edde9
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 6/9] btrfs: extent-tree: Use btrfs_ref to refactor
+ add_pinned_bytes()
+
+Since add_pinned_bytes() only needs to know if the extent is metadata
+and if it's a chunk tree extent, btrfs_ref is a perfect match for it, as
+we don't need various owner/level trick to determine extent type.
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/extent-tree.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -767,14 +767,15 @@ static struct btrfs_space_info *__find_s
+ return NULL;
+ }
+
+-static void add_pinned_bytes(struct btrfs_fs_info *fs_info, s64 num_bytes,
+- u64 owner, u64 root_objectid)
++static void add_pinned_bytes(struct btrfs_fs_info *fs_info,
++ struct btrfs_ref *ref)
+ {
+ struct btrfs_space_info *space_info;
++ s64 num_bytes = -ref->len;
+ u64 flags;
+
+- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+- if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
++ if (ref->type == BTRFS_REF_METADATA) {
++ if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID)
+ flags = BTRFS_BLOCK_GROUP_SYSTEM;
+ else
+ flags = BTRFS_BLOCK_GROUP_METADATA;
+@@ -2134,7 +2135,7 @@ int btrfs_inc_extent_ref(struct btrfs_tr
+ }
+
+ if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+- add_pinned_bytes(fs_info, -num_bytes, owner, root_objectid);
++ add_pinned_bytes(fs_info, &generic_ref);
+
+ return ret;
+ }
+@@ -7167,8 +7168,7 @@ void btrfs_free_tree_block(struct btrfs_
+ }
+ out:
+ if (pin)
+- add_pinned_bytes(fs_info, buf->len, btrfs_header_level(buf),
+- root->root_key.objectid);
++ add_pinned_bytes(fs_info, &generic_ref);
+
+ if (last_ref) {
+ /*
+@@ -7217,7 +7217,7 @@ int btrfs_free_extent(struct btrfs_trans
+ }
+
+ if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
+- add_pinned_bytes(fs_info, num_bytes, owner, root_objectid);
++ add_pinned_bytes(fs_info, &generic_ref);
+
+ return ret;
+ }
diff --git a/patches.suse/0007-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_in.patch b/patches.suse/0007-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_in.patch
new file mode 100644
index 0000000000..4920713e43
--- /dev/null
+++ b/patches.suse/0007-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_in.patch
@@ -0,0 +1,370 @@
+From 82fa113fccc41fe5204b4ce35341d69ebde0020f Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:35 +0800
+Git-commit: 82fa113fccc41fe5204b4ce35341d69ebde0020f
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 7/9] btrfs: extent-tree: Use btrfs_ref to refactor
+ btrfs_inc_extent_ref()
+
+Use the new btrfs_ref structure and replace parameter list to clean up
+the usage of owner and level to distinguish the extent types.
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/ctree.h | 4 +--
+ fs/btrfs/extent-tree.c | 54 +++++++++++++++++++++++++++++--------------------
+ fs/btrfs/file.c | 18 +++++++++++-----
+ fs/btrfs/inode.c | 10 +++++----
+ fs/btrfs/ioctl.c | 15 +++++++++----
+ fs/btrfs/relocation.c | 41 ++++++++++++++++++++++++-------------
+ fs/btrfs/tree-log.c | 12 ++++++++--
+ 7 files changed, 99 insertions(+), 55 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -53,6 +53,7 @@ extern struct kmem_cache *btrfs_bit_radi
+ extern struct kmem_cache *btrfs_path_cachep;
+ extern struct kmem_cache *btrfs_free_space_cachep;
+ struct btrfs_ordered_sum;
++struct btrfs_ref;
+
+ #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ #define STATIC noinline
+@@ -2731,8 +2732,7 @@ int btrfs_finish_extent_commit(struct bt
+ struct btrfs_fs_info *fs_info);
+ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+- u64 bytenr, u64 num_bytes, u64 parent,
+- u64 root_objectid, u64 owner, u64 offset);
++ struct btrfs_ref *generic_ref);
+
+ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info);
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2111,31 +2111,25 @@ int btrfs_discard_extent(struct btrfs_fs
+ /* Can return -ENOMEM */
+ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+- u64 bytenr, u64 num_bytes, u64 parent,
+- u64 root_objectid, u64 owner, u64 offset)
++ struct btrfs_ref *generic_ref)
+ {
+- struct btrfs_ref generic_ref = { 0 };
+ int old_ref_mod, new_ref_mod;
+ int ret;
+
+- BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
+- root_objectid == BTRFS_TREE_LOG_OBJECTID);
++ ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
++ generic_ref->action);
++ BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
++ generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
+
+- btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_REF, bytenr,
+- num_bytes, parent);
+- generic_ref.real_root = root_objectid;
+- if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+- btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
++ if (generic_ref->type == BTRFS_REF_METADATA)
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, generic_ref,
+ NULL, &old_ref_mod, &new_ref_mod);
+- } else {
+- btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
+- ret = btrfs_add_delayed_data_ref(fs_info, trans, &generic_ref, 0,
++ else
++ ret = btrfs_add_delayed_data_ref(fs_info, trans, generic_ref, 0,
+ &old_ref_mod, &new_ref_mod);
+- }
+
+ if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0)
+- add_pinned_bytes(fs_info, &generic_ref);
++ add_pinned_bytes(fs_info, generic_ref);
+
+ return ret;
+ }
+@@ -3254,7 +3248,10 @@ static int __btrfs_mod_ref(struct btrfs_
+ u32 nritems;
+ struct btrfs_key key;
+ struct btrfs_file_extent_item *fi;
++ struct btrfs_ref generic_ref = { 0 };
++ bool for_reloc = btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC);
+ int i;
++ int action;
+ int level;
+ int ret = 0;
+
+@@ -3272,6 +3269,10 @@ static int __btrfs_mod_ref(struct btrfs_
+ parent = buf->start;
+ else
+ parent = 0;
++ if (inc)
++ action = BTRFS_ADD_DELAYED_REF;
++ else
++ action = BTRFS_DROP_DELAYED_REF;
+
+ for (i = 0; i < nritems; i++) {
+ if (level == 0) {
+@@ -3289,10 +3290,15 @@ static int __btrfs_mod_ref(struct btrfs_
+
+ num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
+ key.offset -= btrfs_file_extent_offset(buf, fi);
++ btrfs_init_generic_ref(&generic_ref, action, bytenr,
++ num_bytes, parent);
++ btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
++ key.offset);
++ generic_ref.real_root = root->root_key.objectid;
++ generic_ref.skip_qgroup = for_reloc;
+ if (inc)
+- ret = btrfs_inc_extent_ref(trans, fs_info, bytenr,
+- num_bytes, parent, ref_root,
+- key.objectid, key.offset);
++ ret = btrfs_inc_extent_ref(trans, fs_info,
++ &generic_ref);
+ else
+ ret = btrfs_free_extent(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+@@ -3302,10 +3308,14 @@ static int __btrfs_mod_ref(struct btrfs_
+ } else {
+ bytenr = btrfs_node_blockptr(buf, i);
+ num_bytes = fs_info->nodesize;
++ btrfs_init_generic_ref(&generic_ref, action, bytenr,
++ num_bytes, parent);
++ generic_ref.real_root = root->root_key.objectid;
++ btrfs_init_tree_ref(&generic_ref, level - 1, ref_root);
++ generic_ref.skip_qgroup = for_reloc;
+ if (inc)
+- ret = btrfs_inc_extent_ref(trans, fs_info, bytenr,
+- num_bytes, parent, ref_root,
+- level - 1, 0);
++ ret = btrfs_inc_extent_ref(trans, fs_info,
++ &generic_ref);
+ else
+ ret = btrfs_free_extent(trans, fs_info, bytenr,
+ num_bytes, parent, ref_root,
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -762,6 +762,7 @@ int __btrfs_drop_extents(struct btrfs_tr
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct extent_buffer *leaf;
+ struct btrfs_file_extent_item *fi;
++ struct btrfs_ref ref = { 0 };
+ struct btrfs_key key;
+ struct btrfs_key new_key;
+ u64 ino = btrfs_ino(BTRFS_I(inode));
+@@ -918,11 +919,15 @@ next_slot:
+ btrfs_mark_buffer_dirty(leaf);
+
+ if (update_refs && disk_bytenr > 0) {
+- ret = btrfs_inc_extent_ref(trans, fs_info,
+- disk_bytenr, num_bytes, 0,
++ btrfs_init_generic_ref(&ref,
++ BTRFS_ADD_DELAYED_REF,
++ disk_bytenr, num_bytes, 0);
++ btrfs_init_data_ref(&ref,
+ root->root_key.objectid,
+ new_key.objectid,
+ start - extent_offset);
++ ret = btrfs_inc_extent_ref(trans, fs_info,
++ &ref);
+ BUG_ON(ret); /* -ENOMEM */
+ }
+ key.offset = start;
+@@ -1151,6 +1156,7 @@ int btrfs_mark_extent_written(struct btr
+ struct extent_buffer *leaf;
+ struct btrfs_path *path;
+ struct btrfs_file_extent_item *fi;
++ struct btrfs_ref ref = { 0 };
+ struct btrfs_key key;
+ struct btrfs_key new_key;
+ u64 bytenr;
+@@ -1296,9 +1302,11 @@ again:
+ extent_end - split);
+ btrfs_mark_buffer_dirty(leaf);
+
+- ret = btrfs_inc_extent_ref(trans, fs_info, bytenr, num_bytes,
+- 0, root->root_key.objectid,
+- ino, orig_offset);
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, bytenr,
++ num_bytes, 0);
++ btrfs_init_data_ref(&ref, root->root_key.objectid, ino,
++ orig_offset);
++ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -2519,6 +2519,7 @@ static noinline int relink_extent_backre
+ struct btrfs_file_extent_item *item;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_trans_handle *trans;
++ struct btrfs_ref ref = { 0 };
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+@@ -2689,10 +2690,11 @@ again:
+ inode_add_bytes(inode, len);
+ btrfs_release_path(path);
+
+- ret = btrfs_inc_extent_ref(trans, fs_info, new->bytenr,
+- new->disk_len, 0,
+- backref->root_id, backref->inum,
+- new->file_pos); /* start - extent_offset */
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new->bytenr,
++ new->disk_len, 0);
++ btrfs_init_data_ref(&ref, backref->root_id, backref->inum,
++ new->file_pos); /* start - extent_offset */
++ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out_free_path;
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3774,13 +3774,18 @@ process_slot:
+ datal);
+
+ if (disko) {
++ struct btrfs_ref ref = { 0 };
++
+ inode_add_bytes(inode, datal);
++ btrfs_init_generic_ref(&ref,
++ BTRFS_ADD_DELAYED_REF, disko,
++ diskl, 0);
++ btrfs_init_data_ref(&ref,
++ root->root_key.objectid,
++ btrfs_ino(BTRFS_I(inode)),
++ new_key.offset - datao);
+ ret = btrfs_inc_extent_ref(trans,
+- fs_info,
+- disko, diskl, 0,
+- root->root_key.objectid,
+- btrfs_ino(BTRFS_I(inode)),
+- new_key.offset - datao);
++ fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans,
+ ret);
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1679,6 +1679,8 @@ int replace_file_extents(struct btrfs_tr
+
+ nritems = btrfs_header_nritems(leaf);
+ for (i = 0; i < nritems; i++) {
++ struct btrfs_ref ref = { 0 };
++
+ cond_resched();
+ btrfs_item_key_to_cpu(leaf, &key, i);
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+@@ -1739,10 +1741,12 @@ int replace_file_extents(struct btrfs_tr
+ dirty = 1;
+
+ key.offset -= btrfs_file_extent_offset(leaf, fi);
+- ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+- num_bytes, parent,
+- btrfs_header_owner(leaf),
+- key.objectid, key.offset);
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
++ num_bytes, parent);
++ ref.real_root = root->root_key.objectid;
++ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
++ key.objectid, key.offset);
++ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+@@ -1792,6 +1796,7 @@ int replace_path(struct btrfs_trans_hand
+ struct btrfs_fs_info *fs_info = dest->fs_info;
+ struct extent_buffer *eb;
+ struct extent_buffer *parent;
++ struct btrfs_ref ref = { 0 };
+ struct btrfs_key key;
+ u64 old_bytenr;
+ u64 new_bytenr;
+@@ -1950,13 +1955,17 @@ again:
+ path->slots[level], old_ptr_gen);
+ btrfs_mark_buffer_dirty(path->nodes[level]);
+
+- ret = btrfs_inc_extent_ref(trans, fs_info, old_bytenr,
+- blocksize, path->nodes[level]->start,
+- src->root_key.objectid, level - 1, 0);
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
++ blocksize, path->nodes[level]->start);
++ ref.skip_qgroup = true;
++ btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
++ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ BUG_ON(ret);
+- ret = btrfs_inc_extent_ref(trans, fs_info, new_bytenr,
+- blocksize, 0, dest->root_key.objectid,
+- level - 1, 0);
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
++ blocksize, 0);
++ ref.skip_qgroup = true;
++ btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
++ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ BUG_ON(ret);
+
+ ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
+@@ -2756,6 +2765,7 @@ static int do_relocation(struct btrfs_tr
+ rc->backref_cache.path[node->level] = node;
+ list_for_each_entry(edge, &node->upper, list[LOWER]) {
+ struct btrfs_key first_key;
++ struct btrfs_ref ref = { 0 };
+
+ cond_resched();
+
+@@ -2853,11 +2863,14 @@ static int do_relocation(struct btrfs_tr
+ trans->transid);
+ btrfs_mark_buffer_dirty(upper->eb);
+
++ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
++ node->eb->start, blocksize,
++ upper->eb->start);
++ ref.real_root = root->root_key.objectid;
++ btrfs_init_tree_ref(&ref, node->level,
++ btrfs_header_owner(upper->eb));
+ ret = btrfs_inc_extent_ref(trans, root->fs_info,
+- node->eb->start, blocksize,
+- upper->eb->start,
+- btrfs_header_owner(upper->eb),
+- node->level, 0);
++ &ref);
+ BUG_ON(ret);
+
+ ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -717,9 +717,11 @@ static noinline int replay_one_extent(st
+ goto out;
+
+ if (ins.objectid > 0) {
++ struct btrfs_ref ref = { 0 };
+ u64 csum_start;
+ u64 csum_end;
+ LIST_HEAD(ordered_sums);
++
+ /*
+ * is this extent already allocated in the extent
+ * allocation tree? If so, just add a reference
+@@ -727,10 +729,14 @@ static noinline int replay_one_extent(st
+ ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
+ ins.offset);
+ if (ret == 0) {
+- ret = btrfs_inc_extent_ref(trans, fs_info,
+- ins.objectid, ins.offset,
+- 0, root->root_key.objectid,
++ btrfs_init_generic_ref(&ref,
++ BTRFS_ADD_DELAYED_REF,
++ ins.objectid, ins.offset, 0);
++ btrfs_init_data_ref(&ref,
++ root->root_key.objectid,
+ key->objectid, offset);
++ ret = btrfs_inc_extent_ref(trans, fs_info,
++ &ref);
+ if (ret)
+ goto out;
+ } else {
diff --git a/patches.suse/0008-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_fr.patch b/patches.suse/0008-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_fr.patch
new file mode 100644
index 0000000000..26e00ced78
--- /dev/null
+++ b/patches.suse/0008-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_fr.patch
@@ -0,0 +1,257 @@
+From ffd4bb2a19cd29681f5b70a200654ab92619de8a Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:36 +0800
+Git-commit: ffd4bb2a19cd29681f5b70a200654ab92619de8a
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 8/9] btrfs: extent-tree: Use btrfs_ref to refactor
+ btrfs_free_extent()
+
+Similar to btrfs_inc_extent_ref(), use btrfs_ref to replace the long
+parameter list and the confusing @owner parameter.
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/ctree.h | 3 +--
+ fs/btrfs/extent-tree.c | 44 +++++++++++++++++++-------------------------
+ fs/btrfs/file.c | 23 +++++++++++++----------
+ fs/btrfs/inode.c | 13 +++++++++----
+ fs/btrfs/relocation.c | 25 ++++++++++++++++---------
+ 5 files changed, 58 insertions(+), 50 deletions(-)
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -2720,8 +2720,7 @@ int btrfs_set_disk_extent_flags(struct b
+ int level, int is_data);
+ int btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+- u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+- u64 owner, u64 offset);
++ struct btrfs_ref *ref);
+
+ int btrfs_free_reserved_extent(struct btrfs_fs_info *fs_info,
+ u64 start, u64 len, int delalloc);
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -3298,9 +3298,8 @@ static int __btrfs_mod_ref(struct btrfs_
+ ret = btrfs_inc_extent_ref(trans, fs_info,
+ &generic_ref);
+ else
+- ret = btrfs_free_extent(trans, fs_info, bytenr,
+- num_bytes, parent, ref_root,
+- key.objectid, key.offset);
++ ret = btrfs_free_extent(trans, fs_info,
++ &generic_ref);
+ if (ret)
+ goto fail;
+ } else {
+@@ -3315,9 +3314,8 @@ static int __btrfs_mod_ref(struct btrfs_
+ ret = btrfs_inc_extent_ref(trans, fs_info,
+ &generic_ref);
+ else
+- ret = btrfs_free_extent(trans, fs_info, bytenr,
+- num_bytes, parent, ref_root,
+- level - 1, 0);
++ ret = btrfs_free_extent(trans, fs_info,
++ &generic_ref);
+ if (ret)
+ goto fail;
+ }
+@@ -7190,42 +7188,36 @@ out:
+ /* Can return -ENOMEM */
+ int btrfs_free_extent(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info,
+- u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+- u64 owner, u64 offset)
++ struct btrfs_ref *ref)
+ {
+- struct btrfs_ref generic_ref = { 0 };
+ int old_ref_mod, new_ref_mod;
+ int ret;
+
+ if (btrfs_is_testing(fs_info))
+ return 0;
+
+-
+- btrfs_init_generic_ref(&generic_ref, BTRFS_DROP_DELAYED_REF, bytenr,
+- num_bytes, parent);
+- generic_ref.real_root = root_objectid;
+ /*
+ * tree log blocks never actually go into the extent allocation
+ * tree, just update pinning info and exit early.
+ */
+- if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
+- WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
++ if ((ref->type == BTRFS_REF_METADATA &&
++ ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
++ (ref->type == BTRFS_REF_DATA &&
++ ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
+ /* unlocks the pinned mutex */
+- btrfs_pin_extent(fs_info, bytenr, num_bytes, 1);
++ btrfs_pin_extent(fs_info, ref->bytenr, ref->len, 1);
+ old_ref_mod = new_ref_mod = 0;
+ ret = 0;
+- } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
+- btrfs_init_tree_ref(&generic_ref, (int)owner, root_objectid);
+- ret = btrfs_add_delayed_tree_ref(fs_info, trans, &generic_ref,
++ } else if (ref->type == BTRFS_REF_METADATA) {
++ ret = btrfs_add_delayed_tree_ref(fs_info, trans, ref,
+ NULL, &old_ref_mod, &new_ref_mod);
+ } else {
+- btrfs_init_data_ref(&generic_ref, root_objectid, owner, offset);
+- ret = btrfs_add_delayed_data_ref(fs_info, trans, &generic_ref, 0,
++ ret = btrfs_add_delayed_data_ref(fs_info, trans, ref, 0,
+ &old_ref_mod, &new_ref_mod);
+ }
+
+ if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0)
+- add_pinned_bytes(fs_info, &generic_ref);
++ add_pinned_bytes(fs_info, ref);
+
+ return ret;
+ }
+@@ -8652,6 +8644,7 @@ static noinline int do_walk_down(struct
+ u32 blocksize;
+ struct btrfs_key key;
+ struct btrfs_key first_key;
++ struct btrfs_ref ref = { 0 };
+ struct extent_buffer *next;
+ int level = wc->level;
+ int reada = 0;
+@@ -8825,9 +8818,10 @@ skip:
+ wc->drop_level = level;
+ find_next_key(path, level, &wc->drop_progress);
+
+- ret = btrfs_free_extent(trans, fs_info, bytenr, blocksize,
+- parent, root->root_key.objectid,
+- level - 1, 0);
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
++ fs_info->nodesize, parent);
++ btrfs_init_tree_ref(&ref, level - 1, root->root_key.objectid);
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ if (ret)
+ goto out_unlock;
+ }
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1007,11 +1007,15 @@ delete_extent_item:
+ extent_end = ALIGN(extent_end,
+ fs_info->sectorsize);
+ } else if (update_refs && disk_bytenr > 0) {
+- ret = btrfs_free_extent(trans, fs_info,
+- disk_bytenr, num_bytes, 0,
++ btrfs_init_generic_ref(&ref,
++ BTRFS_DROP_DELAYED_REF,
++ disk_bytenr, num_bytes, 0);
++ btrfs_init_data_ref(&ref,
+ root->root_key.objectid,
+- key.objectid, key.offset -
+- extent_offset);
++ key.objectid,
++ key.offset - extent_offset);
++ ret = btrfs_free_extent(trans, fs_info,
++ &ref);
+ BUG_ON(ret); /* -ENOMEM */
+ inode_sub_bytes(inode,
+ extent_end - key.offset);
+@@ -1328,6 +1332,9 @@ again:
+
+ other_start = end;
+ other_end = 0;
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
++ num_bytes, 0);
++ btrfs_init_data_ref(&ref, root->root_key.objectid, ino, orig_offset);
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+@@ -1338,9 +1345,7 @@ again:
+ extent_end = other_end;
+ del_slot = path->slots[0] + 1;
+ del_nr++;
+- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+- 0, root->root_key.objectid,
+- ino, orig_offset);
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+@@ -1358,9 +1363,7 @@ again:
+ key.offset = other_start;
+ del_slot = path->slots[0];
+ del_nr++;
+- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+- 0, root->root_key.objectid,
+- ino, orig_offset);
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4476,12 +4476,17 @@ delete:
+ if (found_extent &&
+ (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
+ root == fs_info->tree_root)) {
++ struct btrfs_ref ref = { 0 };
++
+ btrfs_set_path_blocking(path);
+ bytes_deleted += extent_num_bytes;
+- ret = btrfs_free_extent(trans, fs_info, extent_start,
+- extent_num_bytes, 0,
+- btrfs_header_owner(leaf),
+- ino, extent_offset);
++
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
++ extent_start, extent_num_bytes, 0);
++ ref.real_root = root->root_key.objectid;
++ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
++ ino, extent_offset);
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1752,9 +1752,12 @@ int replace_file_extents(struct btrfs_tr
+ break;
+ }
+
+- ret = btrfs_free_extent(trans, fs_info, bytenr, num_bytes,
+- parent, btrfs_header_owner(leaf),
+- key.objectid, key.offset);
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
++ num_bytes, parent);
++ ref.real_root = root->root_key.objectid;
++ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
++ key.objectid, key.offset);
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ break;
+@@ -1968,14 +1971,18 @@ again:
+ ret = btrfs_inc_extent_ref(trans, fs_info, &ref);
+ BUG_ON(ret);
+
+- ret = btrfs_free_extent(trans, fs_info, new_bytenr, blocksize,
+- path->nodes[level]->start,
+- src->root_key.objectid, level - 1, 0);
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, new_bytenr,
++ blocksize, path->nodes[level]->start);
++ btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid);
++ ref.skip_qgroup = true;
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ BUG_ON(ret);
+
+- ret = btrfs_free_extent(trans, fs_info, old_bytenr, blocksize,
+- 0, dest->root_key.objectid, level - 1,
+- 0);
++ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, old_bytenr,
++ blocksize, 0);
++ btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid);
++ ref.skip_qgroup = true;
++ ret = btrfs_free_extent(trans, fs_info, &ref);
+ BUG_ON(ret);
+
+ btrfs_unlock_up_safe(path, 0);
diff --git a/patches.suse/0009-btrfs-qgroup-Don-t-scan-leaf-if-we-re-modifying-relo.patch b/patches.suse/0009-btrfs-qgroup-Don-t-scan-leaf-if-we-re-modifying-relo.patch
new file mode 100644
index 0000000000..5caaa438ea
--- /dev/null
+++ b/patches.suse/0009-btrfs-qgroup-Don-t-scan-leaf-if-we-re-modifying-relo.patch
@@ -0,0 +1,68 @@
+From c4140cbf35b90422be6589024f47e132eb2298b1 Mon Sep 17 00:00:00 2001
+From: Qu Wenruo <wqu@suse.com>
+Date: Thu, 4 Apr 2019 14:45:37 +0800
+Git-commit: c4140cbf35b90422be6589024f47e132eb2298b1
+References: bsc#1063638 bsc#1128052 bsc#1108838
+Patch-mainline: v5.2-rc1
+Subject: [PATCH 9/9] btrfs: qgroup: Don't scan leaf if we're modifying reloc
+ tree
+
+Since reloc tree doesn't contribute to qgroup numbers, just skip them.
+
+This should catch the final cause of unnecessary data ref processing
+when running balance of metadata with qgroups on.
+
+The 4G data 16 snapshots test (*) should explain it pretty well:
+
+ | delayed subtree | refactor delayed ref | this patch
+---------------------------------------------------------------------
+relocated | 22653 | 22673 | 22744
+qgroup dirty | 122792 | 48360 | 70
+time | 24.494 | 11.606 | 3.944
+
+Finally, we're at the stage where qgroup + metadata balance cost no
+obvious overhead.
+
+Test environment:
+
+Test VM:
+- vRAM 8G
+- vCPU 8
+- block dev vitrio-blk, 'unsafe' cache mode
+- host block 850evo
+
+Test workload:
+- Copy 4G data from /usr/ to one subvolume
+- Create 16 snapshots of that subvolume, and modify 3 files in each
+ snapshot
+- Enable quota, rescan
+- Time "btrfs balance start -m"
+
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+---
+ fs/btrfs/extent-tree.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -8911,11 +8911,14 @@ static noinline int walk_up_proc(struct
+ else
+ ret = btrfs_dec_ref(trans, root, eb, 0);
+ BUG_ON(ret); /* -ENOMEM */
+- ret = btrfs_qgroup_trace_leaf_items(trans, fs_info, eb);
+- if (ret) {
+- btrfs_err_rl(fs_info,
+- "error %d accounting leaf items. Quota is out of sync, rescan required.",
+- ret);
++ if (is_fstree(root->root_key.objectid)) {
++ ret = btrfs_qgroup_trace_leaf_items(trans,
++ fs_info, eb);
++ if (ret) {
++ btrfs_err_rl(fs_info,
++"error %d accounting leaf items. Quota is out of sync, rescan required.",
++ ret);
++ }
+ }
+ }
+ /* make block locked assertion in clean_tree_block happy */
diff --git a/patches.suse/dccp-do-not-use-ipv6-header-for-ipv4-flow.patch b/patches.suse/dccp-do-not-use-ipv6-header-for-ipv4-flow.patch
new file mode 100644
index 0000000000..1affb168a2
--- /dev/null
+++ b/patches.suse/dccp-do-not-use-ipv6-header-for-ipv4-flow.patch
@@ -0,0 +1,37 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Mar 2019 05:46:18 -0700
+Subject: dccp: do not use ipv6 header for ipv4 flow
+Git-commit: e0aa67709f89d08c8d8e5bdd9e0b649df61d0090
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+When a dual stack dccp listener accepts an ipv4 flow,
+it should not attempt to use an ipv6 header or
+inet6_iif() helper.
+
+Fixes: 3df80d9320bc ("[DCCP]: Introduce DCCPv6")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/dccp/ipv6.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
+index d5740bad5b18..57d84e9b7b6f 100644
+--- a/net/dccp/ipv6.c
++++ b/net/dccp/ipv6.c
+@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
+- newnp->mcast_oif = inet6_iif(skb);
+- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
++ newnp->mcast_oif = inet_iif(skb);
++ newnp->mcast_hops = ip_hdr(skb)->ttl;
+
+ /*
+ * No need to charge this sock to the relevant IPv6 refcnt debug socks count
+--
+2.21.0
+
diff --git a/patches.suse/genetlink-Fix-a-memory-leak-on-error-path.patch b/patches.suse/genetlink-Fix-a-memory-leak-on-error-path.patch
new file mode 100644
index 0000000000..0df45b8fe5
--- /dev/null
+++ b/patches.suse/genetlink-Fix-a-memory-leak-on-error-path.patch
@@ -0,0 +1,45 @@
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 21 Mar 2019 15:02:50 +0800
+Subject: genetlink: Fix a memory leak on error path
+Git-commit: ceabee6c59943bdd5e1da1a6a20dc7ee5f8113a2
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+In genl_register_family(), when idr_alloc() fails,
+we forget to free the memory we possibly allocate for
+family->attrbuf.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Fixes: 2ae0f17df1cd ("genetlink: use idr to track families")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Reviewed-by: Kirill Tkhai <ktkhai@virtuozzo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/netlink/genetlink.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index 25eeb6d2a75a..f0ec068e1d02 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
+ start, end + 1, GFP_KERNEL);
+ if (family->id < 0) {
+ err = family->id;
+- goto errout_locked;
++ goto errout_free;
+ }
+
+ err = genl_validate_assign_mc_groups(family);
+@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
+
+ errout_remove:
+ idr_remove(&genl_fam_idr, family->id);
++errout_free:
+ kfree(family->attrbuf);
+ errout_locked:
+ genl_unlock_all();
+--
+2.21.0
+
diff --git a/patches.suse/net-aquantia-fix-rx-checksum-offload-for-UDP-TCP-ove.patch b/patches.suse/net-aquantia-fix-rx-checksum-offload-for-UDP-TCP-ove.patch
new file mode 100644
index 0000000000..628a6af5e2
--- /dev/null
+++ b/patches.suse/net-aquantia-fix-rx-checksum-offload-for-UDP-TCP-ove.patch
@@ -0,0 +1,39 @@
+From: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
+Date: Sat, 16 Mar 2019 08:28:18 +0000
+Subject: net: aquantia: fix rx checksum offload for UDP/TCP over IPv6
+Git-commit: a7faaa0c5dc7d091cc9f72b870d7edcdd6f43f12
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+TCP/UDP checksum validity was propagated to skb
+only if IP checksum is valid.
+But for IPv6 there is no validity as there is no checksum in IPv6.
+This patch propagates TCP/UDP checksum validity regardless of IP checksum.
+
+Fixes: 018423e90bee ("net: ethernet: aquantia: Add ring support code")
+Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com>
+Signed-off-by: Nikita Danilov <nikita.danilov@aquantia.com>
+Signed-off-by: Dmitry Bogdanov <dmitry.bogdanov@aquantia.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/aquantia/atlantic/aq_ring.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+@@ -231,11 +231,12 @@ int aq_ring_rx_clean(struct aq_ring_s *s
+ } else {
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+- if (buff->is_udp_cso || buff->is_tcp_cso)
+- __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
++
++ if (buff->is_udp_cso || buff->is_tcp_cso)
++ __skb_incr_checksum_unnecessary(skb);
+ }
+
+ skb_set_hash(skb, buff->rss_hash,
diff --git a/patches.suse/net-rose-fix-a-possible-stack-overflow.patch b/patches.suse/net-rose-fix-a-possible-stack-overflow.patch
new file mode 100644
index 0000000000..ef02b355ce
--- /dev/null
+++ b/patches.suse/net-rose-fix-a-possible-stack-overflow.patch
@@ -0,0 +1,129 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 15 Mar 2019 10:41:14 -0700
+Subject: net: rose: fix a possible stack overflow
+Git-commit: e5dcc0c3223c45c94100f05f28d8ef814db3d82c
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+rose_write_internal() uses a temp buffer of 100 bytes, but a manual
+inspection showed that given arbitrary input, rose_create_facilities()
+can fill up to 110 bytes.
+
+Lets use a tailroom of 256 bytes for peace of mind, and remove
+the bounce buffer : we can simply allocate a big enough skb
+and adjust its length as needed.
+
+syzbot report :
+
+BUG: KASAN: stack-out-of-bounds in memcpy include/linux/string.h:352 [inline]
+BUG: KASAN: stack-out-of-bounds in rose_create_facilities net/rose/rose_subr.c:521 [inline]
+BUG: KASAN: stack-out-of-bounds in rose_write_internal+0x597/0x15d0 net/rose/rose_subr.c:116
+Write of size 7 at addr ffff88808b1ffbef by task syz-executor.0/24854
+
+CPU: 0 PID: 24854 Comm: syz-executor.0 Not tainted 5.0.0+ #97
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+Call Trace:
+ __dump_stack lib/dump_stack.c:77 [inline]
+ dump_stack+0x172/0x1f0 lib/dump_stack.c:113
+ print_address_description.cold+0x7c/0x20d mm/kasan/report.c:187
+ kasan_report.cold+0x1b/0x40 mm/kasan/report.c:317
+ check_memory_region_inline mm/kasan/generic.c:185 [inline]
+ check_memory_region+0x123/0x190 mm/kasan/generic.c:191
+ memcpy+0x38/0x50 mm/kasan/common.c:131
+ memcpy include/linux/string.h:352 [inline]
+ rose_create_facilities net/rose/rose_subr.c:521 [inline]
+ rose_write_internal+0x597/0x15d0 net/rose/rose_subr.c:116
+ rose_connect+0x7cb/0x1510 net/rose/af_rose.c:826
+ __sys_connect+0x266/0x330 net/socket.c:1685
+ __do_sys_connect net/socket.c:1696 [inline]
+ __se_sys_connect net/socket.c:1693 [inline]
+ __x64_sys_connect+0x73/0xb0 net/socket.c:1693
+ do_syscall_64+0x103/0x610 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x458079
+Code: ad b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b8 fb ff c3 66 2e 0f 1f 84 00 00 00 00
+RSP: 002b:00007f47b8d9dc78 EFLAGS: 00000246 ORIG_RAX: 000000000000002a
+RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 0000000000458079
+RDX: 000000000000001c RSI: 0000000020000040 RDI: 0000000000000004
+RBP: 000000000073bf00 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 00007f47b8d9e6d4
+R13: 00000000004be4a4 R14: 00000000004ceca8 R15: 00000000ffffffff
+
+The buggy address belongs to the page:
+page:ffffea00022c7fc0 count:0 mapcount:0 mapping:0000000000000000 index:0x0
+flags: 0x1fffc0000000000()
+raw: 01fffc0000000000 0000000000000000 ffffffff022c0101 0000000000000000
+raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff88808b1ffa80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff88808b1ffb00: 00 00 00 00 00 00 00 00 f1 f1 f1 f1 00 00 00 03
+>ffff88808b1ffb80: f2 f2 00 00 00 00 00 00 00 00 00 00 00 00 04 f3
+ ^
+ ffff88808b1ffc00: f3 f3 f3 f3 00 00 00 00 00 00 00 00 00 00 00 00
+ ffff88808b1ffc80: 00 00 00 00 00 00 00 f1 f1 f1 f1 f1 f1 01 f2 01
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/rose/rose_subr.c | 21 ++++++++++++---------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+--- a/net/rose/rose_subr.c
++++ b/net/rose/rose_subr.c
+@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk
+ struct sk_buff *skb;
+ unsigned char *dptr;
+ unsigned char lci1, lci2;
+- char buffer[100];
+- int len, faclen = 0;
++ int maxfaclen = 0;
++ int len, faclen;
++ int reserve;
+
+- len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
++ reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
++ len = ROSE_MIN_LEN;
+
+ switch (frametype) {
+ case ROSE_CALL_REQUEST:
+ len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
+- faclen = rose_create_facilities(buffer, rose);
+- len += faclen;
++ maxfaclen = 256;
+ break;
+ case ROSE_CALL_ACCEPTED:
+ case ROSE_CLEAR_REQUEST:
+@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk
+ break;
+ }
+
+- if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
++ skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
++ if (!skb)
+ return;
+
+ /*
+ * Space for AX.25 header and PID.
+ */
+- skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
++ skb_reserve(skb, reserve);
+
+- dptr = skb_put(skb, skb_tailroom(skb));
++ dptr = skb_put(skb, len);
+
+ lci1 = (rose->lci >> 8) & 0x0F;
+ lci2 = (rose->lci >> 0) & 0xFF;
+@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk
+ dptr += ROSE_ADDR_LEN;
+ memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
+ dptr += ROSE_ADDR_LEN;
+- memcpy(dptr, buffer, faclen);
++ faclen = rose_create_facilities(dptr, rose);
++ skb_put(skb, faclen);
+ dptr += faclen;
+ break;
+
diff --git a/patches.suse/net-stmmac-fix-memory-corruption-with-large-MTUs.patch b/patches.suse/net-stmmac-fix-memory-corruption-with-large-MTUs.patch
new file mode 100644
index 0000000000..7d35824b58
--- /dev/null
+++ b/patches.suse/net-stmmac-fix-memory-corruption-with-large-MTUs.patch
@@ -0,0 +1,62 @@
+From: Aaro Koskinen <aaro.koskinen@nokia.com>
+Date: Mon, 18 Mar 2019 23:36:08 +0200
+Subject: net: stmmac: fix memory corruption with large MTUs
+Git-commit: 223a960c01227e4dbcb6f9fa06b47d73bda21274
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+When using 16K DMA buffers and ring mode, the DES3 refill is not working
+correctly as the function is using a bogus pointer for checking the
+private data. As a result stale pointers will remain in the RX descriptor
+ring, so DMA will now likely overwrite/corrupt some already freed memory.
+
+As simple reproducer, just receive some UDP traffic:
+
+ # ifconfig eth0 down; ifconfig eth0 mtu 9000; ifconfig eth0 up
+ # iperf3 -c 192.168.253.40 -u -b 0 -R
+
+If you didn't crash by now check the RX descriptors to find non-contiguous
+RX buffers:
+
+ cat /sys/kernel/debug/stmmaceth/eth0/descriptors_status
+ [...]
+ 1 [0x2be5020]: 0xa3220321 0x9ffc1ffc 0x72d70082 0x130e207e
+ ^^^^^^^^^^^^^^^^^^^^^
+ 2 [0x2be5040]: 0xa3220321 0x9ffc1ffc 0x72998082 0x1311a07e
+ ^^^^^^^^^^^^^^^^^^^^^
+
+A simple ping test will now report bad data:
+
+ # ping -s 8200 192.168.253.40
+ PING 192.168.253.40 (192.168.253.40) 8200(8228) bytes of data.
+ 8208 bytes from 192.168.253.40: icmp_seq=1 ttl=64 time=1.00 ms
+ wrong data byte #8144 should be 0xd0 but was 0x88
+
+Fix the wrong pointer. Also we must refill DES3 only if the DMA buffer
+size is 16K.
+
+Fixes: 54139cf3bb33 ("net: stmmac: adding multiple buffers for rx")
+Signed-off-by: Aaro Koskinen <aaro.koskinen@nokia.com>
+Acked-by: Jose Abreu <joabreu@synopsys.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/stmicro/stmmac/ring_mode.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -114,10 +114,11 @@ static unsigned int stmmac_is_jumbo_frm(
+
+ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
+ {
+- struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
++ struct stmmac_rx_queue *rx_q = priv_ptr;
++ struct stmmac_priv *priv = rx_q->priv_data;
+
+ /* Fill DES3 in case of RING mode */
+- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
++ if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+ }
+
diff --git a/patches.suse/packets-Always-register-packet-sk-in-the-same-order.patch b/patches.suse/packets-Always-register-packet-sk-in-the-same-order.patch
new file mode 100644
index 0000000000..458c4bbc7b
--- /dev/null
+++ b/patches.suse/packets-Always-register-packet-sk-in-the-same-order.patch
@@ -0,0 +1,69 @@
+From: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Date: Sat, 16 Mar 2019 14:41:30 +0100
+Subject: packets: Always register packet sk in the same order
+Git-commit: a4dc6a49156b1f8d6e17251ffda17c9e6a5db78a
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+When using fanouts with AF_PACKET, the demux functions such as
+fanout_demux_cpu will return an index in the fanout socket array, which
+corresponds to the selected socket.
+
+The ordering of this array depends on the order the sockets were added
+to a given fanout group, so for FANOUT_CPU this means sockets are bound
+to cpus in the order they are configured, which is OK.
+
+However, when stopping then restarting the interface these sockets are
+bound to, the sockets are reassigned to the fanout group in the reverse
+order, due to the fact that they were inserted at the head of the
+interface's AF_PACKET socket list.
+
+This means that traffic that was directed to the first socket in the
+fanout group is now directed to the last one after an interface restart.
+
+In the case of FANOUT_CPU, traffic from CPU0 will be directed to the
+socket that used to receive traffic from the last CPU after an interface
+restart.
+
+This commit introduces a helper to add a socket at the tail of a list,
+then uses it to register AF_PACKET sockets.
+
+Note that this changes the order in which sockets are listed in /proc and
+with sock_diag.
+
+Fixes: dc99f600698d ("packet: Add fanout support")
+Signed-off-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/net/sock.h | 6 ++++++
+ net/packet/af_packet.c | 2 +-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -677,6 +677,12 @@ static inline void sk_add_node_rcu(struc
+ hlist_add_head_rcu(&sk->sk_node, list);
+ }
+
++static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
++{
++ sock_hold(sk);
++ hlist_add_tail_rcu(&sk->sk_node, list);
++}
++
+ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
+ {
+ hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -3282,7 +3282,7 @@ static int packet_create(struct net *net
+ }
+
+ mutex_lock(&net->packet.sklist_lock);
+- sk_add_node_rcu(sk, &net->packet.sklist);
++ sk_add_node_tail_rcu(sk, &net->packet.sklist);
+ mutex_unlock(&net->packet.sklist_lock);
+
+ preempt_disable();
diff --git a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
index 1ff8b31a3b..48b920d0b5 100644
--- a/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
+++ b/patches.suse/revert-btrfs-qgroup-move-half-of-the-qgroup-accounting-time-out-of-commit-trans.patch
@@ -80,15 +80,15 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
if (new_ref_mod)
*new_ref_mod = head_ref->total_ref_mod;
-@@ -732,7 +726,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -731,7 +725,6 @@ int btrfs_add_delayed_tree_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
- bool is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID);
- int ret;
- u8 ref_type;
-@@ -775,8 +768,7 @@ int btrfs_add_delayed_tree_ref(struct bt
+ bool is_system;
+ int action = generic_ref->action;
+ int level = generic_ref->tree_ref.level;
+@@ -785,8 +778,7 @@ int btrfs_add_delayed_tree_ref(struct bt
* the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
@@ -98,7 +98,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
-@@ -788,8 +780,6 @@ int btrfs_add_delayed_tree_ref(struct bt
+@@ -798,8 +790,6 @@ int btrfs_add_delayed_tree_ref(struct bt
if (ret > 0)
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
@@ -107,15 +107,15 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
return 0;
free_head_ref:
-@@ -814,7 +804,6 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -822,7 +812,6 @@ int btrfs_add_delayed_data_ref(struct bt
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
struct btrfs_qgroup_extent_record *record = NULL;
- int qrecord_inserted;
+ int action = generic_ref->action;
int ret;
- u8 ref_type;
-
-@@ -863,8 +852,7 @@ int btrfs_add_delayed_data_ref(struct bt
+ u64 bytenr = generic_ref->bytenr;
+@@ -881,8 +870,7 @@ int btrfs_add_delayed_data_ref(struct bt
* the spin lock
*/
head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record,
@@ -125,7 +125,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
ret = insert_delayed_ref(trans, delayed_refs, head_ref, &ref->node);
spin_unlock(&delayed_refs->lock);
-@@ -875,8 +863,6 @@ int btrfs_add_delayed_data_ref(struct bt
+@@ -893,8 +881,6 @@ int btrfs_add_delayed_data_ref(struct bt
if (ret > 0)
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
@@ -134,7 +134,7 @@ Acked-by: Jeff Mahoney <jeffm@suse.com>
return 0;
}
-@@ -902,7 +888,7 @@ int btrfs_add_delayed_extent_op(struct b
+@@ -920,7 +906,7 @@ int btrfs_add_delayed_extent_op(struct b
add_delayed_ref_head(fs_info, trans, head_ref, NULL,
BTRFS_UPDATE_DELAYED_HEAD,
diff --git a/patches.suse/sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch b/patches.suse/sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
new file mode 100644
index 0000000000..3e8c3e1b9b
--- /dev/null
+++ b/patches.suse/sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
@@ -0,0 +1,38 @@
+From: Xin Long <lucien.xin@gmail.com>
+Date: Mon, 18 Mar 2019 19:47:00 +0800
+Subject: sctp: get sctphdr by offset in sctp_compute_cksum
+Git-commit: 273160ffc6b993c7c91627f5a84799c66dfe4dee
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+sctp_hdr(skb) only works when skb->transport_header is set properly.
+
+But in Netfilter, skb->transport_header for ipv6 is not guaranteed
+to be right value for sctphdr. It would cause to fail to check the
+checksum for sctp packets.
+
+So fix it by using offset, which is always right in all places.
+
+v1->v2:
+ - Fix the changelog.
+
+Fixes: e6d8b64b34aa ("net: sctp: fix and consolidate SCTP checksumming code")
+Reported-by: Li Shuang <shuali@redhat.com>
+Signed-off-by: Xin Long <lucien.xin@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ include/net/sctp/checksum.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/sctp/checksum.h
++++ b/include/net/sctp/checksum.h
+@@ -60,7 +60,7 @@ static inline __wsum sctp_csum_combine(_
+ static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+ {
+- struct sctphdr *sh = sctp_hdr(skb);
++ struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
+ __le32 ret, old = sh->checksum;
+ const struct skb_checksum_ops ops = {
+ .update = sctp_csum_update,
diff --git a/patches.suse/tcp-do-not-use-ipv6-header-for-ipv4-flow.patch b/patches.suse/tcp-do-not-use-ipv6-header-for-ipv4-flow.patch
new file mode 100644
index 0000000000..5905124295
--- /dev/null
+++ b/patches.suse/tcp-do-not-use-ipv6-header-for-ipv4-flow.patch
@@ -0,0 +1,43 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 19 Mar 2019 05:45:35 -0700
+Subject: tcp: do not use ipv6 header for ipv4 flow
+Git-commit: 89e4130939a20304f4059ab72179da81f5347528
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+When a dual stack tcp listener accepts an ipv4 flow,
+it should not attempt to use an ipv6 header or tcp_v6_iif() helper.
+
+Fixes: 1397ed35f22d ("ipv6: add flowinfo for tcp6 pkt_options for all cases")
+Fixes: df3687ffc665 ("ipv6: add the IPV6_FL_F_REFLECT flag to IPV6_FL_A_GET")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ net/ipv6/tcp_ipv6.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 57ef69a10889..44d431849d39 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
+ newnp->ipv6_fl_list = NULL;
+ newnp->pktoptions = NULL;
+ newnp->opt = NULL;
+- newnp->mcast_oif = tcp_v6_iif(skb);
+- newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
+- newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
++ newnp->mcast_oif = inet_iif(skb);
++ newnp->mcast_hops = ip_hdr(skb)->ttl;
++ newnp->rcv_flowinfo = 0;
+ if (np->repflow)
+- newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
++ newnp->flow_label = 0;
+
+ /*
+ * No need to charge this sock to the relevant IPv6 refcnt debug socks count
+--
+2.21.0
+
diff --git a/patches.suse/thunderx-eliminate-extra-calls-to-put_page-for-pages.patch b/patches.suse/thunderx-eliminate-extra-calls-to-put_page-for-pages.patch
new file mode 100644
index 0000000000..b0a1ab4525
--- /dev/null
+++ b/patches.suse/thunderx-eliminate-extra-calls-to-put_page-for-pages.patch
@@ -0,0 +1,62 @@
+From: Dean Nelson <dnelson@redhat.com>
+Date: Tue, 26 Mar 2019 11:53:26 -0400
+Subject: thunderx: eliminate extra calls to put_page() for pages held for
+ recycling
+Git-commit: cd35ef91490ad8049dd180bb060aff7ee192eda9
+Patch-mainline: v5.1-rc4
+References: networking-stable-19_03_28
+
+For the non-XDP case, commit 773225388dae15e72790 ("net: thunderx: Optimize
+page recycling for XDP") added code to nicvf_free_rbdr() that, when releasing
+the additional receive buffer page reference held for recycling, repeatedly
+calls put_page() until the page's _refcount goes to zero. Which results in
+the page being freed.
+
+This is not okay if the page's _refcount was greater than 1 (in the non-XDP
+case), because nicvf_free_rbdr() should not be subtracting more than what
+nicvf_alloc_page() had previously added to the page's _refcount, which was
+only 1 (in the non-XDP case).
+
+This can arise if a received packet is still being processed and the receive
+buffer (i.e., skb->head) has not yet been freed via skb_free_head() when
+nicvf_free_rbdr() is spinning through the aforementioned put_page() loop.
+
+If this should occur, when the received packet finishes processing and
+skb_free_head() is called, various problems can ensue. Exactly what, depends on
+whether the page has already been reallocated or not, anything from "BUG: Bad
+page state ... ", to "Unable to handle kernel NULL pointer dereference ..." or
+"Unable to handle kernel paging request...".
+
+So this patch changes nicvf_free_rbdr() to only call put_page() once for pages
+held for recycling (in the non-XDP case).
+
+Fixes: 773225388dae ("net: thunderx: Optimize page recycling for XDP")
+Signed-off-by: Dean Nelson <dnelson@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 55dbf02c42af..e246f9733bb8 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -364,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+ while (head < rbdr->pgcnt) {
+ pgcache = &rbdr->pgcache[head];
+ if (pgcache->page && page_ref_count(pgcache->page) != 0) {
+- if (!rbdr->is_xdp) {
+- put_page(pgcache->page);
+- continue;
++ if (rbdr->is_xdp) {
++ page_ref_sub(pgcache->page,
++ pgcache->ref_count - 1);
+ }
+- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
+ put_page(pgcache->page);
+ }
+ head++;
+--
+2.21.0
+
diff --git a/patches.suse/thunderx-enable-page-recycling-for-non-XDP-case.patch b/patches.suse/thunderx-enable-page-recycling-for-non-XDP-case.patch
new file mode 100644
index 0000000000..c3f6240220
--- /dev/null
+++ b/patches.suse/thunderx-enable-page-recycling-for-non-XDP-case.patch
@@ -0,0 +1,62 @@
+From: Dean Nelson <dnelson@redhat.com>
+Date: Tue, 26 Mar 2019 11:53:19 -0400
+Subject: thunderx: enable page recycling for non-XDP case
+Git-commit: b3e208069477588c06f4d5d986164b435bb06e6d
+Patch-mainline: v5.1-rc4
+References: networking-stable-19_03_28
+
+Commit 773225388dae15e72790 ("net: thunderx: Optimize page recycling for XDP")
+added code to nicvf_alloc_page() that inadvertently disables receive buffer
+page recycling for the non-XDP case by always NULL'ng the page pointer.
+
+This patch corrects two if-conditionals to allow for the recycling of non-XDP
+mode pages by only setting the page pointer to NULL when the page is not ready
+for recycling.
+
+Fixes: 773225388dae ("net: thunderx: Optimize page recycling for XDP")
+Signed-off-by: Dean Nelson <dnelson@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ .../ethernet/cavium/thunder/nicvf_queues.c | 23 +++++++++----------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+index 5b4d3badcb73..55dbf02c42af 100644
+--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
++++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
+ /* Check if page can be recycled */
+ if (page) {
+ ref_count = page_ref_count(page);
+- /* Check if this page has been used once i.e 'put_page'
+- * called after packet transmission i.e internal ref_count
+- * and page's ref_count are equal i.e page can be recycled.
++ /* This page can be recycled if internal ref_count and page's
++ * ref_count are equal, indicating that the page has been used
++ * once for packet transmission. For non-XDP mode, internal
++ * ref_count is always '1'.
+ */
+- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
+- pgcache->ref_count--;
+- else
+- page = NULL;
+-
+- /* In non-XDP mode, page's ref_count needs to be '1' for it
+- * to be recycled.
+- */
+- if (!rbdr->is_xdp && (ref_count != 1))
++ if (rbdr->is_xdp) {
++ if (ref_count == pgcache->ref_count)
++ pgcache->ref_count--;
++ else
++ page = NULL;
++ } else if (ref_count != 1) {
+ page = NULL;
++ }
+ }
+
+ if (!page) {
+--
+2.21.0
+
diff --git a/patches.suse/tun-add-a-missing-rcu_read_unlock-in-error-path.patch b/patches.suse/tun-add-a-missing-rcu_read_unlock-in-error-path.patch
new file mode 100644
index 0000000000..915d0e431c
--- /dev/null
+++ b/patches.suse/tun-add-a-missing-rcu_read_unlock-in-error-path.patch
@@ -0,0 +1,29 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 16 Mar 2019 13:09:53 -0700
+Subject: tun: add a missing rcu_read_unlock() in error path
+Git-commit: 9180bb4f046064dfa4541488102703b402bb04e1
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+In my latest patch I missed one rcu_read_unlock(), in case
+device is down.
+
+Fixes: 4477138fa0ae ("tun: properly test for IFF_UP")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/tun.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1353,6 +1353,7 @@ drop:
+ rcu_read_lock();
+ if (unlikely(!(tun->dev->flags & IFF_UP))) {
+ err = -EIO;
++ rcu_read_unlock();
+ goto drop;
+ }
+
diff --git a/patches.suse/tun-properly-test-for-IFF_UP.patch b/patches.suse/tun-properly-test-for-IFF_UP.patch
new file mode 100644
index 0000000000..c5aa0c9610
--- /dev/null
+++ b/patches.suse/tun-properly-test-for-IFF_UP.patch
@@ -0,0 +1,80 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 14 Mar 2019 20:19:47 -0700
+Subject: tun: properly test for IFF_UP
+Git-commit: 4477138fa0ae4e1b699786ef0600863ea6e6c61c
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+Same reasons than the ones explained in commit 4179cb5a4c92
+("vxlan: test dev->flags & IFF_UP before calling netif_rx()")
+
+netif_rx_ni() or napi_gro_frags() must be called under a strict contract.
+
+At device dismantle phase, core networking clears IFF_UP
+and flush_all_backlogs() is called after rcu grace period
+to make sure no incoming packet might be in a cpu backlog
+and still referencing the device.
+
+A similar protocol is used for gro layer.
+
+Most drivers call netif_rx() from their interrupt handler,
+and since the interrupts are disabled at device dismantle,
+netif_rx() does not have to check dev->flags & IFF_UP
+
+Virtual drivers do not have this guarantee, and must
+therefore make the check themselves.
+
+Fixes: 1bd4978a88ac ("tun: honor IFF_UP in tun_get_user()")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/tun.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1220,9 +1220,6 @@ static ssize_t tun_get_user(struct tun_s
+ int err;
+ u32 rxhash;
+
+- if (!(tun->dev->flags & IFF_UP))
+- return -EIO;
+-
+ if (!(tun->flags & IFF_NO_PI)) {
+ if (len < sizeof(pi))
+ return -EINVAL;
+@@ -1297,9 +1294,11 @@ static ssize_t tun_get_user(struct tun_s
+ err = skb_copy_datagram_from_iter(skb, 0, from, len);
+
+ if (err) {
++ err = -EFAULT;
++drop:
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
+ kfree_skb(skb);
+- return -EFAULT;
++ return err;
+ }
+
+ if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
+@@ -1350,11 +1349,19 @@ static ssize_t tun_get_user(struct tun_s
+ skb_probe_transport_header(skb, 0);
+
+ rxhash = skb_get_hash(skb);
++
++ rcu_read_lock();
++ if (unlikely(!(tun->dev->flags & IFF_UP))) {
++ err = -EIO;
++ goto drop;
++ }
++
+ #ifndef CONFIG_4KSTACKS
+ tun_rx_batched(tun, tfile, skb, more);
+ #else
+ netif_rx_ni(skb);
+ #endif
++ rcu_read_unlock();
+
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
diff --git a/patches.suse/vxlan-Don-t-call-gro_cells_destroy-before-device-is-.patch b/patches.suse/vxlan-Don-t-call-gro_cells_destroy-before-device-is-.patch
new file mode 100644
index 0000000000..a98a44811f
--- /dev/null
+++ b/patches.suse/vxlan-Don-t-call-gro_cells_destroy-before-device-is-.patch
@@ -0,0 +1,45 @@
+From: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Date: Sat, 16 Mar 2019 17:02:54 +0800
+Subject: vxlan: Don't call gro_cells_destroy() before device is unregistered
+Git-commit: cc4807bb609230d8959fd732b0bf3bd4c2de8eac
+Patch-mainline: v5.1-rc3
+References: networking-stable-19_03_28
+
+Commit ad6c9986bcb62 ("vxlan: Fix GRO cells race condition between
+receive and link delete") fixed a race condition for the typical case a vxlan
+device is dismantled from the current netns. But if a netns is dismantled,
+vxlan_destroy_tunnels() is called to schedule a unregister_netdevice_queue()
+of all the vxlan tunnels that are related to this netns.
+
+In vxlan_destroy_tunnels(), gro_cells_destroy() is called and finished before
+unregister_netdevice_queue(). This means that the gro_cells_destroy() call is
+done too soon, for the same reasons explained in above commit.
+
+So we need to fully respect the RCU rules, and thus must remove the
+gro_cells_destroy() call or risk use after-free.
+
+Fixes: 58ce31cca1ff ("vxlan: GRO support at tunnel layer")
+Signed-off-by: Suanming.Mou <mousuanming@huawei.com>
+Suggested-by: Eric Dumazet <eric.dumazet@gmail.com>
+Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
+Reviewed-by: Zhiqiang Liu <liuzhiqiang26@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+---
+ drivers/net/vxlan.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3645,10 +3645,8 @@ static void __net_exit vxlan_exit_net(st
+ /* If vxlan->dev is in the same netns, it has already been added
+ * to the list by the previous loop.
+ */
+- if (!net_eq(dev_net(vxlan->dev), net)) {
+- gro_cells_destroy(&vxlan->gro_cells);
++ if (!net_eq(dev_net(vxlan->dev), net))
+ unregister_netdevice_queue(vxlan->dev, &list);
+- }
+ }
+
+ unregister_netdevice_many(&list);
diff --git a/series.conf b/series.conf
index ad19ebdd4b..09994eadf7 100644
--- a/series.conf
+++ b/series.conf
@@ -21697,8 +21697,19 @@
patches.fixes/NFS-fix-mount-umount-race-in-nlmclnt.patch
patches.fixes/NFSv4.1-don-t-free-interrupted-slot-on-open.patch
patches.fixes/NFS-Fix-a-typo-in-nfs_init_timeout_values.patch
+ patches.suse/tun-properly-test-for-IFF_UP.patch
+ patches.suse/tun-add-a-missing-rcu_read_unlock-in-error-path.patch
+ patches.suse/net-rose-fix-a-possible-stack-overflow.patch
+ patches.suse/net-aquantia-fix-rx-checksum-offload-for-UDP-TCP-ove.patch
+ patches.suse/vxlan-Don-t-call-gro_cells_destroy-before-device-is-.patch
+ patches.suse/packets-Always-register-packet-sk-in-the-same-order.patch
+ patches.suse/sctp-get-sctphdr-by-offset-in-sctp_compute_cksum.patch
patches.drivers/mISDN-hfcpci-Test-both-vendor-device-ID-for-Digium-H.patch
+ patches.suse/net-stmmac-fix-memory-corruption-with-large-MTUs.patch
+ patches.suse/tcp-do-not-use-ipv6-header-for-ipv4-flow.patch
+ patches.suse/dccp-do-not-use-ipv6-header-for-ipv4-flow.patch
patches.suse/net-packet-Set-__GFP_NOWARN-upon-allocation-in-alloc.patch
+ patches.suse/genetlink-Fix-a-memory-leak-on-error-path.patch
patches.fixes/0001-netfilter-bridge-set-skb-transport_header-before-ent.patch
patches.fixes/rhashtable-Still-do-rehash-when-we-get-EEXIST.patch
patches.fixes/bpf-do-not-restore-dst_reg-when-cur_state-is-freed.patch
@@ -21767,6 +21778,8 @@
patches.drivers/HID-debug-fix-race-condition-with-between-rdesc_show.patch
patches.drivers/HID-input-add-mapping-for-Assistant-key.patch
patches.fixes/0001-net-datagram-fix-unbounded-loop-in-__skb_try_recv_da.patch
+ patches.suse/thunderx-enable-page-recycling-for-non-XDP-case.patch
+ patches.suse/thunderx-eliminate-extra-calls-to-put_page-for-pages.patch
patches.fixes/batman-adv-Reduce-claim-hash-refcnt-only-for-removed.patch
patches.fixes/batman-adv-Reduce-tt_local-hash-refcnt-only-for-remo.patch
patches.fixes/batman-adv-Reduce-tt_global-hash-refcnt-only-for-rem.patch
@@ -21827,6 +21840,7 @@
patches.fixes/0001-net-bridge-multicast-use-rcu-to-access-port-list-fro.patch
patches.drivers/rt2x00-do-not-increment-sequence-number-while-re-tra.patch
patches.fixes/0001-net-bridge-fix-per-port-af_packet-sockets.patch
+ patches.fixes/CIFS-keep-FileInfo-handle-live-during-oplock-break.patch
patches.fixes/crypto-x86-poly1305-fix-overflow-during-partial-redu.patch
patches.drivers/Input-snvs_pwrkey-initialize-necessary-driver-data-b.patch
patches.drivers/iio-gyro-bmg160-Use-millidegrees-for-temperature-sca.patch
@@ -21906,6 +21920,12 @@
patches.fixes/0001-btrfs-reloc-Fix-NULL-pointer-dereference-due-to-expa.patch
patches.suse/0001-btrfs-delayed-ref-Introduce-better-documented-delaye.patch
patches.suse/0002-btrfs-extent-tree-Open-code-process_func-in-__btrfs_.patch
+ patches.suse/0003-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
+ patches.suse/0004-btrfs-delayed-ref-Use-btrfs_ref-to-refactor-btrfs_ad.patch
+ patches.suse/0006-btrfs-extent-tree-Use-btrfs_ref-to-refactor-add_pinn.patch
+ patches.suse/0007-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_in.patch
+ patches.suse/0008-btrfs-extent-tree-Use-btrfs_ref-to-refactor-btrfs_fr.patch
+ patches.suse/0009-btrfs-qgroup-Don-t-scan-leaf-if-we-re-modifying-relo.patch
patches.drivers/mmc-core-fix-possible-use-after-free-of-host.patch
patches.drivers/phy-sun4i-usb-Make-sure-to-disable-PHY0-passby-for-p.patch
patches.drivers/stm-class-Fix-channel-free-in-stm-output-free-path.patch
@@ -21918,6 +21938,7 @@
patches.fixes/Revert-ide-unexport-DISK_EVENT_MEDIA_CHANGE-for-ide-.patch
patches.suse/Revert-block-unexport-DISK_EVENT_MEDIA_CHANGE-for.patch
patches.suse/block-check_events-don-t-bother-with-events-if-un.patch
+ patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
patches.fixes/audit-fix-a-memleak-caused-by-auditing-load-module.patch
patches.drivers/ibmvnic-Report-actual-backing-device-speed-and-duple.patch
patches.fixes/openvswitch-add-seqadj-extension-when-NAT-is-used.patch
@@ -21995,6 +22016,8 @@
patches.drivers/dmaengine-tegra210-dma-free-dma-controller-in-remove.patch
patches.drivers/clk-rockchip-fix-wrong-clock-definitions-for-rk3328.patch
patches.drivers/clk-rockchip-Fix-video-codec-clocks-on-rk3288.patch
+ patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
+ patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
patches.arch/powerpc-pseries-Track-LMB-nid-instead-of-using-devic.patch
patches.arch/powerpc-tm-Avoid-machine-crash-on-rt_sigreturn.patch
patches.drivers/pwm-Fix-deadlock-warning-when-removing-PWM-device.patch
@@ -22018,14 +22041,30 @@
patches.drivers/iommu-vt-d-make-kernel-parameter-igfx_off-work-with-viommu
patches.drivers/net-ibmvnic-Update-MAC-address-settings-after-adapte.patch
patches.drivers/net-ibmvnic-Update-carrier-state-after-link-state-ch.patch
+ patches.arch/x86-msr-index-cleanup-bit-defines.patch
+ patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
+ patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
+ patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
+ patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
+ patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
+ patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
+ patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
+ patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
+ patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
+ patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
+ patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
+ patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
+ patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
+ patches.arch/x86-speculation-mds-add-smt-warning-message.patch
+ patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
+ patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
+ patches.drivers/PCI-Mark-AMD-Stoney-Radeon-R7-GPU-ATS-as-broken.patch
+ patches.drivers/PCI-Mark-Atheros-AR9462-to-avoid-bus-reset.patch
+ patches.drivers/backlight-lm3630a-Return-0-on-success-in-update_stat.patch
# davem/net-next
patches.suse/msft-hv-1766-hv_netvsc-fix-vf-serial-matching-with-pci-slot-info.patch
- # powerpc/linux next
- patches.arch/powerpc-numa-improve-control-of-topology-updates.patch
- patches.arch/powerpc-numa-document-topology_updates_enabled-disab.patch
-
# dhowells/linux-fs keys-uefi
patches.suse/0001-KEYS-Allow-unrestricted-boot-time-addition-of-keys-t.patch
patches.suse/0002-efi-Add-EFI-signature-data-types.patch
@@ -22046,33 +22085,12 @@
patches.fixes/qla2xxx-always-allocate-qla_tgt_wq.patch
patches.suse/lpfc-validate-command-in-lpfc_sli4_scmd_to_wqidx_dis.patch
patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
+ patches.suse/0001-btrfs-extent-tree-Fix-a-bug-that-btrfs-is-unable-to-.patch
########################################################
# end of sorted patches
########################################################
- # git://git.infradead.org/nvme.git nvme-5.2
- patches.fixes/nvme-multipath-split-bios-with-the-ns_head-bio_set-b.patch
-
- # MDS
- patches.arch/x86-msr-index-cleanup-bit-defines.patch
- patches.arch/x86-speculation-consolidate-cpu-whitelists.patch
- patches.arch/x86-speculation-mds-add-basic-bug-infrastructure-for-mds.patch
- patches.arch/x86-speculation-mds-add-bug_msbds_only.patch
- patches.arch/x86-kvm-expose-x86_feature_md_clear-to-guests.patch
- patches.arch/x86-speculation-mds-add-mds_clear_cpu_buffers.patch
- patches.arch/x86-speculation-mds-clear-cpu-buffers-on-exit-to-user.patch
- patches.arch/x86-kvm-vmx-add-mds-protection-when-l1d-flush-is-not-active.patch
- patches.arch/x86-speculation-mds-conditionally-clear-cpu-buffers-on-idle-entry.patch
- patches.arch/x86-speculation-mds-add-mitigation-control-for-mds.patch
- patches.arch/x86-speculation-mds-add-sysfs-reporting-for-mds.patch
- patches.arch/x86-speculation-mds-add-mitigation-mode-vmwerv.patch
- patches.arch/x86-speculation-mds-add-mds-full-nosmt-cmdline-option.patch
- patches.arch/x86-speculation-move-arch_smt_update-call-to-after-mitigation-decisions.patch
- patches.arch/x86-speculation-mds-add-smt-warning-message.patch
- patches.arch/x86-speculation-mds-print-smt-vulnerable-on-msbds-with-mitigations-off.patch
- patches.arch/x86-speculation-mds-add-mitigations-support-for-mds.patch
-
########################################################
#
# packaging-specific patches (tweaks for autobuild,