Home Home > GIT Browse > openSUSE-42.3
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-06-17 15:11:23 +0200
committerTakashi Iwai <tiwai@suse.de>2019-06-17 15:11:23 +0200
commit7cfa20a40fc88dc4448d38a08571779ef2987b7d (patch)
treeb5d3ec4f2e0603911eef01028f949cf7ca5f3c53
parentf25b7b201bccdeb9bb7da10b80d6e2e111ffaf53 (diff)
parent6843df5b47b2fe5132558bc9a6cc464c9b1a9706 (diff)
Merge branch 'users/tiwai/SLE12-SP3/emu' into openSUSE-42.3openSUSE-42.3
-rw-r--r--patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch145
-rw-r--r--patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch46
-rw-r--r--patches.arch/powerpc-cacheinfo-add-cacheinfo_teardown-cacheinfo_r.patch72
-rw-r--r--patches.arch/powerpc-pseries-mobility-prevent-cpu-hotplug-during-.patch61
-rw-r--r--patches.arch/powerpc-pseries-mobility-rebuild-cacheinfo-hierarchy.patch83
-rw-r--r--patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch121
-rw-r--r--patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch34
-rw-r--r--patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch50
-rw-r--r--patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch142
-rw-r--r--patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch68
-rw-r--r--patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch52
-rw-r--r--patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch90
-rw-r--r--series.conf14
13 files changed, 978 insertions, 0 deletions
diff --git a/patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch b/patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
new file mode 100644
index 0000000000..050b562b20
--- /dev/null
+++ b/patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
@@ -0,0 +1,145 @@
+From 62cf0d0251c8d74c63b589942f517b5c3093c00f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:12 +0200
+Subject: [PATCH] cpu/hotplug: Provide cpus_read|write_[un]lock()
+
+References: bsc#1138374, LTC#178199
+Patch-mainline: v4.13-rc1
+Git-commit: 8f553c498e1772cccb39a114da4a498d22992758
+
+The counting 'rwsem' hackery of get|put_online_cpus() is going to be
+replaced by percpu rwsem.
+
+Rename the functions to make it clear that it's locking and not some
+refcount style interface. These new functions will be used for the
+preparatory patches which make the code ready for the percpu rwsem
+conversion.
+
+Rename all instances in the cpu hotplug code while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.080397752@linutronix.de
+[only add new names, kABI: preserve old name of exported functions]
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ include/linux/cpu.h | 30 ++++++++++++++++++------------
+ kernel/cpu.c | 12 ++++++------
+ 2 files changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 544120a24494..fc5fd5292adb 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -229,13 +229,15 @@ static inline void smpboot_thread_init(void)
+ #endif /* CONFIG_SMP */
+ extern struct bus_type cpu_subsys;
+
+-#ifdef CONFIG_HOTPLUG_CPU
+-/* Stop CPUs going up and down. */
++/* kABI - rename exported functions back to old name */
++#define cpus_read_lock get_online_cpus
++#define cpus_read_unlock put_online_cpus
+
+-extern void cpu_hotplug_begin(void);
+-extern void cpu_hotplug_done(void);
+-extern void get_online_cpus(void);
+-extern void put_online_cpus(void);
++#ifdef CONFIG_HOTPLUG_CPU
++extern void cpus_write_lock(void);
++extern void cpus_write_unlock(void);
++extern void cpus_read_lock(void);
++extern void cpus_read_unlock(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+@@ -249,12 +251,12 @@ int cpu_down(unsigned int cpu);
+
+ #else /* CONFIG_HOTPLUG_CPU */
+
+-static inline void cpu_hotplug_begin(void) {}
+-static inline void cpu_hotplug_done(void) {}
+-#define get_online_cpus() do { } while (0)
+-#define put_online_cpus() do { } while (0)
+-#define cpu_hotplug_disable() do { } while (0)
+-#define cpu_hotplug_enable() do { } while (0)
++static inline void cpus_write_lock(void) { }
++static inline void cpus_write_unlock(void) { }
++static inline void cpus_read_lock(void) { }
++static inline void cpus_read_unlock(void) { }
++static inline void cpu_hotplug_disable(void) { }
++static inline void cpu_hotplug_enable(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+ /* These aren't inline functions due to a GCC bug. */
+@@ -264,6 +266,10 @@ static inline void cpu_hotplug_done(void) {}
+ #define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+ #endif /* CONFIG_HOTPLUG_CPU */
+
++/* Wrappers which go away once all code is converted */
++static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
++static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
++
+ #ifdef CONFIG_PM_SLEEP_SMP
+ extern int disable_nonboot_cpus(void);
+ extern void enable_nonboot_cpus(void);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index a1145266c71b..5072bfa657d1 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -104,7 +104,7 @@ static DEFINE_MUTEX(cpuhp_state_mutex);
+ #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+
+
+-void get_online_cpus(void)
++void cpus_read_lock(void)
+ {
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+@@ -114,9 +114,9 @@ void get_online_cpus(void)
+ atomic_inc(&cpu_hotplug.refcount);
+ mutex_unlock(&cpu_hotplug.lock);
+ }
+-EXPORT_SYMBOL_GPL(get_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_lock);
+
+-void put_online_cpus(void)
++void cpus_read_unlock(void)
+ {
+ int refcount;
+
+@@ -133,7 +133,7 @@ void put_online_cpus(void)
+ cpuhp_lock_release();
+
+ }
+-EXPORT_SYMBOL_GPL(put_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_unlock);
+
+ /*
+ * This ensures that the hotplug operation can begin only when the
+@@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
+ * get_online_cpus() not an api which is called all that often.
+ *
+ */
+-void cpu_hotplug_begin(void)
++void cpus_write_lock(void)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -175,7 +175,7 @@ void cpu_hotplug_begin(void)
+ finish_wait(&cpu_hotplug.wq, &wait);
+ }
+
+-void cpu_hotplug_done(void)
++void cpus_write_unlock(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+ mutex_unlock(&cpu_hotplug.lock);
+--
+2.21.0
+
diff --git a/patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch b/patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
new file mode 100644
index 0000000000..36034d2c0d
--- /dev/null
+++ b/patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
@@ -0,0 +1,46 @@
+From ade3f680a76b474d9f5375a9b1d100ee787bf469 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:13 +0200
+Subject: [PATCH] cpu/hotplug: Provide lockdep_assert_cpus_held()
+
+References: bsc#1138374, LTC#178199
+Patch-mainline: v4.13-rc1
+Git-commit: ade3f680a76b474d9f5375a9b1d100ee787bf469
+
+Provide a stub function which can be used in places where existing
+get_online_cpus() calls are moved to call sites.
+
+This stub is going to be filled by the final conversion of the hotplug
+locking mechanism to a percpu rwsem.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.161282442@linutronix.de
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ include/linux/cpu.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -237,6 +237,7 @@ extern void cpus_write_lock(void);
+ extern void cpus_write_unlock(void);
+ extern void cpus_read_lock(void);
+ extern void cpus_read_unlock(void);
++static inline void lockdep_assert_cpus_held(void) { }
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
+@@ -254,6 +255,7 @@ static inline void cpus_write_lock(void)
+ static inline void cpus_write_unlock(void) { }
+ static inline void cpus_read_lock(void) { }
+ static inline void cpus_read_unlock(void) { }
++static inline void lockdep_assert_cpus_held(void) { }
+ static inline void cpu_hotplug_disable(void) { }
+ static inline void cpu_hotplug_enable(void) { }
+ #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
diff --git a/patches.arch/powerpc-cacheinfo-add-cacheinfo_teardown-cacheinfo_r.patch b/patches.arch/powerpc-cacheinfo-add-cacheinfo_teardown-cacheinfo_r.patch
new file mode 100644
index 0000000000..05aa8da393
--- /dev/null
+++ b/patches.arch/powerpc-cacheinfo-add-cacheinfo_teardown-cacheinfo_r.patch
@@ -0,0 +1,72 @@
+From d4aa219a074a5abaf95a756b9f0d190b5c03a945 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathanl@linux.ibm.com>
+Date: Tue, 11 Jun 2019 23:45:04 -0500
+Subject: [PATCH] powerpc/cacheinfo: add cacheinfo_teardown, cacheinfo_rebuild
+
+References: bsc#1138374, LTC#178199
+Patch-mainline: queued
+Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Git-commit: d4aa219a074a5abaf95a756b9f0d190b5c03a945
+
+Allow external callers to force the cacheinfo code to release all its
+references to cache nodes, e.g. before processing device tree updates
+post-migration, and to rebuild the hierarchy afterward.
+
+CPU online/offline must be blocked by callers; enforce this.
+
+Fixes: 410bccf97881 ("powerpc/pseries: Partition migration in the kernel")
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/kernel/cacheinfo.c | 21 +++++++++++++++++++++
+ arch/powerpc/kernel/cacheinfo.h | 4 ++++
+ 2 files changed, 25 insertions(+)
+
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index 862e2890bd3d..42c559efe060 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -896,4 +896,25 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
+ if (cache)
+ cache_cpu_clear(cache, cpu_id);
+ }
++
++void cacheinfo_teardown(void)
++{
++ unsigned int cpu;
++
++ lockdep_assert_cpus_held();
++
++ for_each_online_cpu(cpu)
++ cacheinfo_cpu_offline(cpu);
++}
++
++void cacheinfo_rebuild(void)
++{
++ unsigned int cpu;
++
++ lockdep_assert_cpus_held();
++
++ for_each_online_cpu(cpu)
++ cacheinfo_cpu_online(cpu);
++}
++
+ #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
+diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
+index 955f5e999f1b..52bd3fc6642d 100644
+--- a/arch/powerpc/kernel/cacheinfo.h
++++ b/arch/powerpc/kernel/cacheinfo.h
+@@ -6,4 +6,8 @@
+ extern void cacheinfo_cpu_online(unsigned int cpu_id);
+ extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+
++/* Allow migration/suspend to tear down and rebuild the hierarchy. */
++extern void cacheinfo_teardown(void);
++extern void cacheinfo_rebuild(void);
++
+ #endif /* _PPC_CACHEINFO_H */
+--
+2.21.0
+
diff --git a/patches.arch/powerpc-pseries-mobility-prevent-cpu-hotplug-during-.patch b/patches.arch/powerpc-pseries-mobility-prevent-cpu-hotplug-during-.patch
new file mode 100644
index 0000000000..b6b768e89a
--- /dev/null
+++ b/patches.arch/powerpc-pseries-mobility-prevent-cpu-hotplug-during-.patch
@@ -0,0 +1,61 @@
+From e59a175faa8df9d674247946f2a5a9c29c835725 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathanl@linux.ibm.com>
+Date: Tue, 11 Jun 2019 23:45:05 -0500
+Subject: [PATCH] powerpc/pseries/mobility: prevent cpu hotplug during DT
+ update
+
+References: bsc#1138374, LTC#178199
+Patch-mainline: queued
+Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Git-commit: e59a175faa8df9d674247946f2a5a9c29c835725
+
+CPU online/offline code paths are sensitive to parts of the device
+tree (various cpu node properties, cache nodes) that can be changed as
+a result of a migration.
+
+Prevent CPU hotplug while the device tree potentially is inconsistent.
+
+Fixes: 410bccf97881 ("powerpc/pseries: Partition migration in the kernel")
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/platforms/pseries/mobility.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index 88925f8ca8a0..edc1ec408589 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -9,6 +9,7 @@
+ * 2 as published by the Free Software Foundation.
+ */
+
++#include <linux/cpu.h>
+ #include <linux/kernel.h>
+ #include <linux/kobject.h>
+ #include <linux/smp.h>
+@@ -338,11 +339,19 @@ void post_mobility_fixup(void)
+ if (rc)
+ printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
+
++ /*
++ * We don't want CPUs to go online/offline while the device
++ * tree is being updated.
++ */
++ cpus_read_lock();
++
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ cpus_read_unlock();
++
+ /* Possibly switch to a new RFI flush type */
+ pseries_setup_rfi_flush();
+
+--
+2.21.0
+
diff --git a/patches.arch/powerpc-pseries-mobility-rebuild-cacheinfo-hierarchy.patch b/patches.arch/powerpc-pseries-mobility-rebuild-cacheinfo-hierarchy.patch
new file mode 100644
index 0000000000..5ca26871a7
--- /dev/null
+++ b/patches.arch/powerpc-pseries-mobility-rebuild-cacheinfo-hierarchy.patch
@@ -0,0 +1,83 @@
+From e610a466d16a086e321f0bd421e2fc75cff28605 Mon Sep 17 00:00:00 2001
+From: Nathan Lynch <nathanl@linux.ibm.com>
+Date: Tue, 11 Jun 2019 23:45:06 -0500
+Subject: [PATCH] powerpc/pseries/mobility: rebuild cacheinfo hierarchy
+ post-migration
+
+References: bsc#1138374, LTC#178199
+Patch-mainline: queued
+Git-repo: https://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux.git
+Git-commit: e610a466d16a086e321f0bd421e2fc75cff28605
+
+It's common for the platform to replace the cache device nodes after a
+migration. Since the cacheinfo code is never informed about this, it
+never drops its references to the source system's cache nodes, causing
+it to wind up in an inconsistent state resulting in warnings and oopses
+as soon as CPU online/offline occurs after the migration, e.g.
+
+ cache for /cpus/l3-cache@3113(Unified) refers to cache for /cpus/l2-cache@200d(Unified)
+ WARNING: CPU: 15 PID: 86 at arch/powerpc/kernel/cacheinfo.c:176 release_cache+0x1bc/0x1d0
+ [...]
+ NIP release_cache+0x1bc/0x1d0
+ LR release_cache+0x1b8/0x1d0
+ Call Trace:
+ release_cache+0x1b8/0x1d0 (unreliable)
+ cacheinfo_cpu_offline+0x1c4/0x2c0
+ unregister_cpu_online+0x1b8/0x260
+ cpuhp_invoke_callback+0x114/0xf40
+ cpuhp_thread_fun+0x270/0x310
+ smpboot_thread_fn+0x2c8/0x390
+ kthread+0x1b8/0x1c0
+ ret_from_kernel_thread+0x5c/0x68
+
+Using device tree notifiers won't work since we want to rebuild the
+hierarchy only after all the removals and additions have occurred and
+the device tree is in a consistent state. Call cacheinfo_teardown()
+before processing device tree updates, and rebuild the hierarchy
+afterward.
+
+Fixes: 410bccf97881 ("powerpc/pseries: Partition migration in the kernel")
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Reviewed-by: Gautham R. Shenoy <ego@linux.vnet.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Acked-by: Michal Suchanek <msuchanek@suse.de>
+---
+ arch/powerpc/platforms/pseries/mobility.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index edc1ec408589..b8c8096907d4 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -23,6 +23,7 @@
+ #include <asm/machdep.h>
+ #include <asm/rtas.h>
+ #include "pseries.h"
++#include "../../kernel/cacheinfo.h"
+
+ static struct kobject *mobility_kobj;
+
+@@ -345,11 +346,20 @@ void post_mobility_fixup(void)
+ */
+ cpus_read_lock();
+
++ /*
++ * It's common for the destination firmware to replace cache
++ * nodes. Release all of the cacheinfo hierarchy's references
++ * before updating the device tree.
++ */
++ cacheinfo_teardown();
++
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ cacheinfo_rebuild();
++
+ cpus_read_unlock();
+
+ /* Possibly switch to a new RFI flush type */
+--
+2.21.0
+
diff --git a/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch b/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch
new file mode 100644
index 0000000000..9100e4aba2
--- /dev/null
+++ b/patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch
@@ -0,0 +1,121 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 6 Jun 2019 09:38:47 -0700
+Subject: tcp: add tcp_min_snd_mss sysctl
+Patch-mainline: Not yet, embargoed
+References: bsc#1137586 CVE-2019-11479
+
+Some TCP peers announce a very small MSS option in their SYN and/or
+SYN/ACK messages.
+
+This forces the stack to send packets with a very high network/cpu
+overhead.
+
+Linux has enforced a minimal value of 48. Since this value includes
+the size of TCP options, and that the options can consume up to 40
+bytes, this means that each segment can include only 8 bytes of payload.
+
+In some cases, it can be useful to increase the minimal value
+to a saner value.
+
+We still let the default to 48 (TCP_MIN_SND_MSS), for compatibility
+reasons.
+
+Note that TCP_MAXSEG socket option enforces a minimal value
+of (TCP_MIN_MSS). David Miller increased this minimal value
+in commit c39508d6f118 ("tcp: Make TCP_MAXSEG minimum more correct.")
+from 64 to 88.
+
+We might in the future merge TCP_MIN_SND_MSS and TCP_MIN_MSS.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Suggested-by: Jonathan Looney <jtl@netflix.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Cc: Bruce Curtis <brucec@netflix.com>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ Documentation/networking/ip-sysctl.txt | 8 ++++++++
+ include/net/netns/ipv4.h | 1 +
+ net/ipv4/sysctl_net_ipv4.c | 11 +++++++++++
+ net/ipv4/tcp_ipv4.c | 1 +
+ net/ipv4/tcp_output.c | 3 +--
+ 5 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -223,6 +223,14 @@ tcp_base_mss - INTEGER
+ Path MTU discovery (MTU probing). If MTU probing is enabled,
+ this is the initial MSS used by the connection.
+
++tcp_min_snd_mss - INTEGER
++ TCP SYN and SYNACK messages usually advertise an ADVMSS option,
++ as described in RFC 1122 and RFC 6691.
++ If this ADVMSS option is smaller than tcp_min_snd_mss,
++ it is silently capped to tcp_min_snd_mss.
++
++ Default : 48 (at least 8 bytes of payload per segment)
++
+ tcp_congestion_control - STRING
+ Set the congestion control algorithm to be used for new
+ connections. The algorithm "reno" is always available, but
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -88,6 +88,7 @@ struct netns_ipv4 {
+ int sysctl_tcp_fwmark_accept;
+ int sysctl_tcp_mtu_probing;
+ int sysctl_tcp_base_mss;
++ int sysctl_tcp_min_snd_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -36,6 +36,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
+ static int ip_local_port_range_max[] = { 65535, 65535 };
+ static int tcp_adv_win_scale_min = -31;
+ static int tcp_adv_win_scale_max = 31;
++static int tcp_min_snd_mss_min = TCP_MIN_SND_MSS;
++static int tcp_min_snd_mss_max = 65535;
+ static int ip_ttl_min = 1;
+ static int ip_ttl_max = 255;
+ static int tcp_syn_retries_min = 1;
+@@ -929,6 +931,15 @@ static struct ctl_table ipv4_net_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
++ {
++ .procname = "tcp_min_snd_mss",
++ .data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &tcp_min_snd_mss_min,
++ .extra2 = &tcp_min_snd_mss_max,
++ },
+ {
+ .procname = "tcp_probe_threshold",
+ .data = &init_net.ipv4.sysctl_tcp_probe_threshold,
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2410,6 +2410,7 @@ static int __net_init tcp_sk_init(struct net *net)
+ net->ipv4.sysctl_tcp_ecn_fallback = 1;
+
+ net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
++ net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
+ net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
+ net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1318,8 +1318,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
+ mss_now -= icsk->icsk_ext_hdr_len;
+
+ /* Then reserve room for full set of TCP options and 8 bytes of data */
+- if (mss_now < TCP_MIN_SND_MSS)
+- mss_now = TCP_MIN_SND_MSS;
++ mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
+ return mss_now;
+ }
+
diff --git a/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch b/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch
new file mode 100644
index 0000000000..df1d1b17c1
--- /dev/null
+++ b/patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch
@@ -0,0 +1,34 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 8 Jun 2019 10:38:08 -0700
+Subject: tcp: enforce tcp_min_snd_mss in tcp_mtu_probing()
+Patch-mainline: Not yet, embargo
+References: bsc#1137586 CVE-2019-11479
+
+If mtu probing is enabled tcp_mtu_probing() could very well end up
+with a too small MSS.
+
+Use the new sysctl tcp_min_snd_mss to make sure MSS search
+is performed in an acceptable range.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jonathan Lemon <jonathan.lemon@gmail.com>
+Cc: Jonathan Looney <jtl@netflix.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Cc: Bruce Curtis <brucec@netflix.com>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+---
+ net/ipv4/tcp_timer.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -117,6 +117,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
+ mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
+ mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
+ mss = max(mss, 68 - tp->tcp_header_len);
++ mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+ tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ }
diff --git a/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch b/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch
new file mode 100644
index 0000000000..a768d488e0
--- /dev/null
+++ b/patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch
@@ -0,0 +1,50 @@
+From: Joao Martins <joao.m.martins@oracle.com>
+Date: Mon, 10 Jun 2019 10:13:23 -0400
+Subject: tcp: fix fack_count accounting on tcp_shift_skb_data()
+Patch-mainline: Not yet, embargo
+References: CVE-2019-11477 bsc#1137586
+
+v4.15 or since commit 737ff314563 ("tcp: use sequence distance to
+detect reordering") had switched from the packet-based FACK tracking and
+switched to sequence-based.
+
+v4.14 and older still have the old logic and hence on
+tcp_skb_shift_data() needs to retain its original logic and have
+@fack_count in sync. In other words, we keep the increment of pcount with
+tcp_skb_pcount(skb) to later used that to update fack_count. To make it
+more explicit we track the new skb that gets incremented to pcount in
+@next_pcount, and we get to avoid the constant invocation of
+tcp_skb_pcount(skb) all together.
+
+Reported-by: Alexey Kodanev <alexey.kodanev@oracle.com>
+Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+---
+ net/ipv4/tcp_input.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1355,6 +1355,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *prev;
+ int mss;
++ int next_pcount;
+ int pcount = 0;
+ int len;
+ int in_sack;
+@@ -1468,9 +1469,11 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ goto out;
+
+ len = skb->len;
+- pcount = tcp_skb_pcount(skb);
+- if (tcp_skb_shift(prev, skb, pcount, len))
+- tcp_shifted_skb(sk, skb, state, pcount, len, mss, 0);
++ next_pcount = tcp_skb_pcount(skb);
++ if (tcp_skb_shift(prev, skb, next_pcount, len)) {
++ pcount += next_pcount;
++ tcp_shifted_skb(sk, skb, state, next_pcount, len, mss, 0);
++ }
+
+ out:
+ state->fack_count += pcount;
diff --git a/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch b/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch
new file mode 100644
index 0000000000..2002c5ce07
--- /dev/null
+++ b/patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch
@@ -0,0 +1,142 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 6 Jun 2019 09:38:45 -0700
+Subject: tcp: limit payload size of sacked skbs
+Patch-mainline: Not yet, embargoed
+References: bsc#1137586 CVE-2019-11477
+
+Jonathan Looney reported that TCP can trigger the following crash
+in tcp_shifted_skb() :
+
+ BUG_ON(tcp_skb_pcount(skb) < pcount);
+
+This can happen if the remote peer has advertized the smallest
+MSS that linux TCP accepts : 48
+
+An skb can hold 17 fragments, and each fragment can hold 32KB
+on x86, or 64KB on PowerPC.
+
+This means that the 16bit witdh of TCP_SKB_CB(skb)->tcp_gso_segs
+can overflow.
+
+Note that tcp_sendmsg() builds skbs with less than 64KB
+of payload, so this problem needs SACK to be enabled.
+SACK blocks allow TCP to coalesce multiple skbs in the retransmit
+queue, thus filling the 17 fragments to maximal capacity.
+
+Fixes: 832d11c5cd07 ("tcp: Try to restore large SKBs while SACK processing")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jonathan Looney <jtl@netflix.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Bruce Curtis <brucec@netflix.com>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ include/linux/tcp.h | 3 +++
+ include/net/tcp.h | 2 ++
+ net/ipv4/tcp.c | 1 +
+ net/ipv4/tcp_input.c | 26 ++++++++++++++++++++------
+ net/ipv4/tcp_output.c | 4 ++--
+ 5 files changed, 28 insertions(+), 8 deletions(-)
+
+--- a/include/linux/tcp.h
++++ b/include/linux/tcp.h
+@@ -420,4 +420,7 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
+ tp->saved_syn = NULL;
+ }
+
++int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount,
++ int shiftlen);
++
+ #endif /* _LINUX_TCP_H */
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -54,6 +54,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
+
+ #define MAX_TCP_HEADER (128 + MAX_HEADER)
+ #define MAX_TCP_OPTION_SPACE 40
++#define TCP_MIN_SND_MSS 48
++#define TCP_MIN_GSO_SIZE (TCP_MIN_SND_MSS - MAX_TCP_OPTION_SPACE)
+
+ /*
+ * Never offer a window over 32767 without using window scaling. Some
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3134,6 +3134,7 @@ void __init tcp_init(void)
+ int max_rshare, max_wshare, cnt;
+ unsigned int i;
+
++ BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE);
+ sock_skb_cb_check_size(sizeof(struct tcp_skb_cb));
+
+ percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1267,7 +1267,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
+ TCP_SKB_CB(skb)->seq += shifted;
+
+ tcp_skb_pcount_add(prev, pcount);
+- BUG_ON(tcp_skb_pcount(skb) < pcount);
++ WARN_ON_ONCE(tcp_skb_pcount(skb) < pcount);
+ tcp_skb_pcount_add(skb, -pcount);
+
+ /* When we're adding to gso_segs == 1, gso_size will be zero,
+@@ -1329,6 +1329,21 @@ static int skb_can_shift(const struct sk_buff *skb)
+ return !skb_headlen(skb) && skb_is_nonlinear(skb);
+ }
+
++int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from,
++ int pcount, int shiftlen)
++{
++ /* TCP min gso_size is 8 bytes (TCP_MIN_GSO_SIZE)
++ * Since TCP_SKB_CB(skb)->tcp_gso_segs is 16 bits, we need
++ * to make sure not storing more than 65535 * 8 bytes per skb,
++ * even if current MSS is bigger.
++ */
++ if (unlikely(to->len + shiftlen >= 65535 * TCP_MIN_GSO_SIZE))
++ return 0;
++ if (unlikely(tcp_skb_pcount(to) + pcount > 65535))
++ return 0;
++ return skb_shift(to, from, shiftlen);
++}
++
+ /* Try collapsing SACK blocks spanning across multiple skbs to a single
+ * skb.
+ */
+@@ -1434,7 +1449,7 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+ goto fallback;
+
+- if (!skb_shift(prev, skb, len))
++ if (!tcp_skb_shift(prev, skb, pcount, len))
+ goto fallback;
+ if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
+ goto out;
+@@ -1453,10 +1468,9 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
+ goto out;
+
+ len = skb->len;
+- if (skb_shift(prev, skb, len)) {
+- pcount += tcp_skb_pcount(skb);
+- tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
+- }
++ pcount = tcp_skb_pcount(skb);
++ if (tcp_skb_shift(prev, skb, pcount, len))
++ tcp_shifted_skb(sk, skb, state, pcount, len, mss, 0);
+
+ out:
+ state->fack_count += pcount;
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1315,8 +1315,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
+ mss_now -= icsk->icsk_ext_hdr_len;
+
+ /* Then reserve room for full set of TCP options and 8 bytes of data */
+- if (mss_now < 48)
+- mss_now = 48;
++ if (mss_now < TCP_MIN_SND_MSS)
++ mss_now = TCP_MIN_SND_MSS;
+ return mss_now;
+ }
+
diff --git a/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch b/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch
new file mode 100644
index 0000000000..65cd974118
--- /dev/null
+++ b/patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch
@@ -0,0 +1,68 @@
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 6 Jun 2019 09:38:46 -0700
+Subject: tcp: tcp_fragment() should apply sane memory limits
+Patch-mainline: Not yet, embargoed
+References: bsc#1137586 CVE-2019-11478
+
+Jonathan Looney reported that a malicious peer can force a sender
+to fragment its retransmit queue into tiny skbs, inflating memory
+usage and/or overflow 32bit counters.
+
+TCP allows an application to queue up to sk_sndbuf bytes,
+so we need to give some allowance for non malicious splitting
+of retransmit queue.
+
+A new SNMP counter is added to monitor how many times TCP
+did not allow to split an skb if the allowance was exceeded.
+
+Note that this counter might increase in the case applications
+use SO_SNDBUF socket option to lower sk_sndbuf.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Jonathan Looney <jtl@netflix.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Reviewed-by: Tyler Hicks <tyhicks@canonical.com>
+Cc: Bruce Curtis <brucec@netflix.com>
+Acked-by: Michal Kubecek <mkubecek@suse.cz>
+
+---
+ include/uapi/linux/snmp.h | 1 +
+ net/ipv4/proc.c | 1 +
+ net/ipv4/tcp_output.c | 5 +++++
+ 3 files changed, 7 insertions(+)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -280,6 +280,7 @@ enum
+ LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
+ LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
+ LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
++ LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
+ __LINUX_MIB_MAX
+ };
+
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -302,6 +302,7 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+ SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+ SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
++ SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
+ SNMP_MIB_SENTINEL
+ };
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1149,6 +1149,11 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
++ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
++ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
++ return -ENOMEM;
++ }
++
+ if (skb_unclone(skb, gfp))
+ return -ENOMEM;
+
diff --git a/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch b/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch
new file mode 100644
index 0000000000..02b35dbd03
--- /dev/null
+++ b/patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch
@@ -0,0 +1,52 @@
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Fri, 7 Jun 2019 18:05:46 +0200
+Subject: kabi: drop LINUX_MIB_TCPWQUEUETOOBIG snmp counter
+Patch-mainline: Never, kabi workaround
+References: bsc#1137586 CVE-2019-11478
+
+patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch adds
+LINUX_MIB_TCPWQUEUETOOBIG snmp attribute which breaks kABI. As it is only
+a diagnostic aid and is not essential for the actual security fix, drop
+the snmp counter and leave only the check.
+
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+---
+ include/uapi/linux/snmp.h | 1 -
+ net/ipv4/proc.c | 1 -
+ net/ipv4/tcp_output.c | 4 +---
+ 3 files changed, 1 insertion(+), 5 deletions(-)
+
+--- a/include/uapi/linux/snmp.h
++++ b/include/uapi/linux/snmp.h
+@@ -280,7 +280,6 @@ enum
+ LINUX_MIB_TCPKEEPALIVE, /* TCPKeepAlive */
+ LINUX_MIB_TCPMTUPFAIL, /* TCPMTUPFail */
+ LINUX_MIB_TCPMTUPSUCCESS, /* TCPMTUPSuccess */
+- LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
+ __LINUX_MIB_MAX
+ };
+
+--- a/net/ipv4/proc.c
++++ b/net/ipv4/proc.c
+@@ -302,7 +302,6 @@ static const struct snmp_mib snmp4_net_list[] = {
+ SNMP_MIB_ITEM("TCPKeepAlive", LINUX_MIB_TCPKEEPALIVE),
+ SNMP_MIB_ITEM("TCPMTUPFail", LINUX_MIB_TCPMTUPFAIL),
+ SNMP_MIB_ITEM("TCPMTUPSuccess", LINUX_MIB_TCPMTUPSUCCESS),
+- SNMP_MIB_ITEM("TCPWqueueTooBig", LINUX_MIB_TCPWQUEUETOOBIG),
+ SNMP_MIB_SENTINEL
+ };
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1149,10 +1149,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
+- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
++ if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf))
+ return -ENOMEM;
+- }
+
+ if (skb_unclone(skb, gfp))
+ return -ENOMEM;
diff --git a/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch b/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch
new file mode 100644
index 0000000000..a15e2a0b25
--- /dev/null
+++ b/patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch
@@ -0,0 +1,90 @@
+From: Michal Kubecek <mkubecek@suse.cz>
+Date: Sat, 8 Jun 2019 12:30:13 +0200
+Subject: kabi: move sysctl_tcp_min_snd_mss to preserve struct net layout
+Patch-mainline: Never, kabi workaround
+References: bsc#1137586 CVE-2019-11479
+
+Patch patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch adds new member
+sysctl_tcp_min_snd_mss into struct netns_ipv4 which is embedded into struct
+net so that the patch changes its layout in an incompatible way. Move it to
+the end of struct net.
+
+This is safe as struct net is always allocated by in-tree helper and is
+never embedded in another structure or used as an array element.
+
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+---
+ include/net/net_namespace.h | 3 +++
+ include/net/netns/ipv4.h | 1 -
+ net/ipv4/sysctl_net_ipv4.c | 2 +-
+ net/ipv4/tcp_ipv4.c | 2 +-
+ net/ipv4/tcp_output.c | 2 +-
+ net/ipv4/tcp_timer.c | 2 +-
+ 6 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -139,6 +139,9 @@ struct net {
+ #endif
+ struct sock *diag_nlsk;
+ atomic_t fnhe_genid;
++#ifndef __GENKSYMS__
++ int sysctl_tcp_min_snd_mss;
++#endif
+ };
+
+ #include <linux/seq_file_net.h>
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -88,7 +88,6 @@ struct netns_ipv4 {
+ int sysctl_tcp_fwmark_accept;
+ int sysctl_tcp_mtu_probing;
+ int sysctl_tcp_base_mss;
+- int sysctl_tcp_min_snd_mss;
+ int sysctl_tcp_probe_threshold;
+ u32 sysctl_tcp_probe_interval;
+
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -933,7 +933,7 @@ static struct ctl_table ipv4_net_table[] = {
+ },
+ {
+ .procname = "tcp_min_snd_mss",
+- .data = &init_net.ipv4.sysctl_tcp_min_snd_mss,
++ .data = &init_net.sysctl_tcp_min_snd_mss,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -2410,7 +2410,7 @@ static int __net_init tcp_sk_init(struct net *net)
+ net->ipv4.sysctl_tcp_ecn_fallback = 1;
+
+ net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
+- net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
++ net->sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
+ net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
+ net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1318,7 +1318,7 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu)
+ mss_now -= icsk->icsk_ext_hdr_len;
+
+ /* Then reserve room for full set of TCP options and 8 bytes of data */
+- mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss);
++ mss_now = max(mss_now, sock_net(sk)->sysctl_tcp_min_snd_mss);
+ return mss_now;
+ }
+
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -117,7 +117,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
+ mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
+ mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
+ mss = max(mss, 68 - tp->tcp_header_len);
+- mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss);
++ mss = max(mss, net->sysctl_tcp_min_snd_mss);
+ icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
+ tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
+ }
diff --git a/series.conf b/series.conf
index 94671667c9..4ad4c9d062 100644
--- a/series.conf
+++ b/series.conf
@@ -22299,6 +22299,8 @@
patches.arch/s390-sles12sp3-03-05-08-pci-provide-more-debug-information.patch
patches.arch/s390-sles12sp3-03-05-09-pci-recognize-name-clashes-with-uids.patch
patches.drivers/0010-irqchip-gic-v3-its-Add-ACPI-NUMA-node-mapping.patch
+ patches.arch/cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
+ patches.arch/cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
patches.arch/stop_machine-provide-stop_machine_cpuslocked.patch
patches.arch/02-jump_label-reorder-hotplug-lock-and-jump_label_lock.patch
patches.arch/s390-prevent-hotplug-rwsem-recursion.patch
@@ -25660,6 +25662,11 @@
# jejb/scsi for-next
patches.drivers/scsi-mpt3sas_ctl-fix-double-fetch-bug-in-ctl_ioctl_main
+ # powerpc/linux next
+ patches.arch/powerpc-cacheinfo-add-cacheinfo_teardown-cacheinfo_r.patch
+ patches.arch/powerpc-pseries-mobility-prevent-cpu-hotplug-during-.patch
+ patches.arch/powerpc-pseries-mobility-rebuild-cacheinfo-hierarchy.patch
+
# out-of-tree patches
patches.kabi/0001-move-power_up_on_resume-flag-to-end-of-structure-for.patch
patches.suse/0001-drm-ttm-Remove-warning-about-inconsistent-mapping-in.patch
@@ -25667,6 +25674,13 @@
patches.fixes/0001-mwifiex-Fix-heap-overflow-in-mwifiex_uap_parse_tail_.patch
patches.fixes/0001-mwifiex-Abort-at-too-short-BSS-descriptor-element.patch
patches.fixes/0001-mwifiex-Fix-possible-buffer-overflows-at-parsing-bss.patch
+ patches.fixes/tcp-limit-payload-size-of-sacked-skbs.patch
+ patches.fixes/tcp-tcp_fragment-should-apply-sane-memory-limits.patch
+ patches.kabi/kabi-drop-LINUX_MIB_TCPWQUEUETOOBIG-snmp-counter.patch
+ patches.fixes/tcp-add-tcp_min_snd_mss-sysctl.patch
+ patches.fixes/tcp-enforce-tcp_min_snd_mss-in-tcp_mtu_probing.patch
+ patches.kabi/kabi-move-sysctl_tcp_min_snd_mss-to-preserve-struct-.patch
+ patches.fixes/tcp-fix-fack_count-accounting-on-tcp_shift_skb_data.patch
########################################################
# end of sorted patches