Home Home > GIT Browse
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichal Kubecek <mkubecek@suse.cz>2019-10-16 15:35:29 +0200
committerMichal Kubecek <mkubecek@suse.cz>2019-10-16 15:35:29 +0200
commitf99dc97bcab8e805c47cd255977cb5ef942e6ccb (patch)
tree1fd7e8eac50d86e4d1a302a5b299e784ceff5a7a
parentc2da3eac10411fdc200d3eb6f730d49474f91b43 (diff)
parent1a8c0199c5857f7c16a908ec2cf20c4a42de64b0 (diff)
Merge branch 'users/vbabka/SLE15-SP2/for-next' into SLE15-SP2
Pull memory management backport from Vlastimil Babka.
-rw-r--r--patches.suse/mm-compaction-fix-wrong-pfn-handling-in-_reset_isolation_pfn.patch66
-rw-r--r--patches.suse/mm-page_owner-debug_pagealloc-save-and-dump-freeing-stack-trace.patch184
-rw-r--r--patches.suse/mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch146
-rw-r--r--patches.suse/mm-page_owner-fix-off-by-one-error-in-_set_page_owner_handle.patch195
-rw-r--r--patches.suse/mm-page_owner-keep-owner-info-when-freeing-the-page.patch127
-rw-r--r--patches.suse/mm-page_owner-record-page-owner-for-each-subpage.patch131
-rw-r--r--patches.suse/mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch94
-rw-r--r--patches.suse/x86-mm-fix-fast-gup-paravirt.patch79
-rw-r--r--series.conf9
9 files changed, 950 insertions, 81 deletions
diff --git a/patches.suse/mm-compaction-fix-wrong-pfn-handling-in-_reset_isolation_pfn.patch b/patches.suse/mm-compaction-fix-wrong-pfn-handling-in-_reset_isolation_pfn.patch
new file mode 100644
index 0000000000..9fdee72ceb
--- /dev/null
+++ b/patches.suse/mm-compaction-fix-wrong-pfn-handling-in-_reset_isolation_pfn.patch
@@ -0,0 +1,66 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 14 Oct 2019 14:12:07 -0700
+Subject: mm, compaction: fix wrong pfn handling in __reset_isolation_pfn()
+Git-commit: a2e9a5afce080226edbf1882d63d99bf32070e9e
+Patch-mainline: v5.5 or v5.4-rc4 (next release)
+References: git-fixes (mm/compaction)
+
+Florian and Dave reported [1] a NULL pointer dereference in
+__reset_isolation_pfn(). While the exact cause is unclear, staring at
+the code revealed two bugs, which might be related.
+
+One bug is that if zone starts in the middle of pageblock, block_page
+might correspond to different pfn than block_pfn, and then the
+pfn_valid_within() checks will check different pfn's than those accessed
+via struct page. This might result in acessing an unitialized page in
+CONFIG_HOLES_IN_ZONE configs.
+
+The other bug is that end_page refers to the first page of next
+pageblock and not last page of current pageblock. The online and valid
+check is then wrong and with sections, the while (page < end_page) loop
+might wander off actual struct page arrays.
+
+[1] https://lore.kernel.org/linux-xfs/87o8z1fvqu.fsf@mid.deneb.enyo.de/
+
+Link: http://lkml.kernel.org/r/20191008152915.24704-1-vbabka@suse.cz
+Fixes: 6b0868c820ff ("mm/compaction.c: correct zone boundary handling when resetting pageblock skip hints")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: Florian Weimer <fw@deneb.enyo.de>
+Reported-by: Dave Chinner <david@fromorbit.com>
+Acked-by: Mel Gorman <mgorman@techsingularity.net>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ mm/compaction.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone,
+
+ /* Ensure the start of the pageblock or zone is online and valid */
+ block_pfn = pageblock_start_pfn(pfn);
+- block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
++ block_pfn = max(block_pfn, zone->zone_start_pfn);
++ block_page = pfn_to_online_page(block_pfn);
+ if (block_page) {
+ page = block_page;
+ pfn = block_pfn;
+ }
+
+ /* Ensure the end of the pageblock or zone is online and valid */
+- block_pfn += pageblock_nr_pages;
++ block_pfn = pageblock_end_pfn(pfn) - 1;
+ block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
+ end_page = pfn_to_online_page(block_pfn);
+ if (!end_page)
+@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone,
+
+ page += (1 << PAGE_ALLOC_COSTLY_ORDER);
+ pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
+- } while (page < end_page);
++ } while (page <= end_page);
+
+ return false;
+ }
diff --git a/patches.suse/mm-page_owner-debug_pagealloc-save-and-dump-freeing-stack-trace.patch b/patches.suse/mm-page_owner-debug_pagealloc-save-and-dump-freeing-stack-trace.patch
new file mode 100644
index 0000000000..a76d86f135
--- /dev/null
+++ b/patches.suse/mm-page_owner-debug_pagealloc-save-and-dump-freeing-stack-trace.patch
@@ -0,0 +1,184 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 23 Sep 2019 15:34:42 -0700
+Subject: mm, page_owner, debug_pagealloc: save and dump freeing stack trace
+Git-commit: 8974558f49a6a41b4a74db672e13bca616eff6d8
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-8959, bsc#1144653, VM Debug Functionality
+
+The debug_pagealloc functionality is useful to catch buggy page allocator
+users that cause e.g. use after free or double free. When page
+inconsistency is detected, debugging is often simpler by knowing the call
+stack of process that last allocated and freed the page. When page_owner
+is also enabled, we record the allocation stack trace, but not freeing.
+
+This patch therefore adds recording of freeing process stack trace to page
+owner info, if both page_owner and debug_pagealloc are configured and
+enabled. With only page_owner enabled, this info is not useful for the
+memory leak debugging use case. dump_page() is adjusted to print the
+info. An example result of calling __free_pages() twice may look like
+this (note the page last free stack trace):
+
+BUG: Bad page state in process bash pfn:13d8f8
+page:ffffc31984f63e00 refcount:-1 mapcount:0 mapping:0000000000000000 index:0x0
+flags: 0x1affff800000000()
+raw: 01affff800000000 dead000000000100 dead000000000122 0000000000000000
+raw: 0000000000000000 0000000000000000 ffffffffffffffff 0000000000000000
+page dumped because: nonzero _refcount
+page_owner tracks the page as freed
+page last allocated via order 0, migratetype Unmovable, gfp_mask 0xcc0(GFP_KERNEL)
+ prep_new_page+0x143/0x150
+ get_page_from_freelist+0x289/0x380
+ __alloc_pages_nodemask+0x13c/0x2d0
+ khugepaged+0x6e/0xc10
+ kthread+0xf9/0x130
+ ret_from_fork+0x3a/0x50
+page last free stack trace:
+ free_pcp_prepare+0x134/0x1e0
+ free_unref_page+0x18/0x90
+ khugepaged+0x7b/0xc10
+ kthread+0xf9/0x130
+ ret_from_fork+0x3a/0x50
+Modules linked in:
+CPU: 3 PID: 271 Comm: bash Not tainted 5.3.0-rc4-2.g07a1a73-default+ #57
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.1-0-ga5cab58-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ dump_stack+0x85/0xc0
+ bad_page.cold+0xba/0xbf
+ rmqueue_pcplist.isra.0+0x6c5/0x6d0
+ rmqueue+0x2d/0x810
+ get_page_from_freelist+0x191/0x380
+ __alloc_pages_nodemask+0x13c/0x2d0
+ __get_free_pages+0xd/0x30
+ __pud_alloc+0x2c/0x110
+ copy_page_range+0x4f9/0x630
+ dup_mmap+0x362/0x480
+ dup_mm+0x68/0x110
+ copy_process+0x19e1/0x1b40
+ _do_fork+0x73/0x310
+ __x64_sys_clone+0x75/0x80
+ do_syscall_64+0x6e/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+RIP: 0033:0x7f10af854a10
+...
+
+Link: http://lkml.kernel.org/r/20190820131828.22684-5-vbabka@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ Documentation/admin-guide/kernel-parameters.txt | 2
+ mm/Kconfig.debug | 4 +
+ mm/page_owner.c | 53 ++++++++++++++++++------
+ 3 files changed, 45 insertions(+), 14 deletions(-)
+
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -809,6 +809,8 @@
+ enables the feature at boot time. By default, it is
+ disabled and the system will work mostly the same as a
+ kernel built without CONFIG_DEBUG_PAGEALLOC.
++ Note: to get most of debug_pagealloc error reports, it's
++ useful to also enable the page_owner functionality.
+ on: enable the feature
+
+ debugpat [X86] Enable PAT debugging
+--- a/mm/Kconfig.debug
++++ b/mm/Kconfig.debug
+@@ -21,7 +21,9 @@ config DEBUG_PAGEALLOC
+ Also, the state of page tracking structures is checked more often as
+ pages are being allocated and freed, as unexpected state changes
+ often happen for same reasons as memory corruption (e.g. double free,
+- use-after-free).
++ use-after-free). The error reports for these checks can be augmented
++ with stack traces of last allocation and freeing of the page, when
++ PAGE_OWNER is also selected and enabled on boot.
+
+ For architectures which don't enable ARCH_SUPPORTS_DEBUG_PAGEALLOC,
+ fill the pages with poison patterns after free_pages() and verify
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -24,6 +24,9 @@ struct page_owner {
+ short last_migrate_reason;
+ gfp_t gfp_mask;
+ depot_stack_handle_t handle;
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ depot_stack_handle_t free_handle;
++#endif
+ };
+
+ static bool page_owner_disabled = true;
+@@ -102,19 +105,6 @@ static inline struct page_owner *get_pag
+ return (void *)page_ext + page_owner_ops.offset;
+ }
+
+-void __reset_page_owner(struct page *page, unsigned int order)
+-{
+- int i;
+- struct page_ext *page_ext;
+-
+- for (i = 0; i < (1 << order); i++) {
+- page_ext = lookup_page_ext(page + i);
+- if (unlikely(!page_ext))
+- continue;
+- __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+- }
+-}
+-
+ static inline bool check_recursive_alloc(unsigned long *entries,
+ unsigned int nr_entries,
+ unsigned long ip)
+@@ -154,6 +144,32 @@ static noinline depot_stack_handle_t sav
+ return handle;
+ }
+
++void __reset_page_owner(struct page *page, unsigned int order)
++{
++ int i;
++ struct page_ext *page_ext;
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ depot_stack_handle_t handle = 0;
++ struct page_owner *page_owner;
++
++ if (debug_pagealloc_enabled())
++ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
++#endif
++
++ for (i = 0; i < (1 << order); i++) {
++ page_ext = lookup_page_ext(page + i);
++ if (unlikely(!page_ext))
++ continue;
++ __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (debug_pagealloc_enabled()) {
++ page_owner = get_page_owner(page_ext);
++ page_owner->free_handle = handle;
++ }
++#endif
++ }
++}
++
+ static inline void __set_page_owner_handle(struct page *page,
+ struct page_ext *page_ext, depot_stack_handle_t handle,
+ unsigned int order, gfp_t gfp_mask)
+@@ -435,6 +451,17 @@ void __dump_page_owner(struct page *page
+ stack_trace_print(entries, nr_entries, 0);
+ }
+
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ handle = READ_ONCE(page_owner->free_handle);
++ if (!handle) {
++ pr_alert("page_owner free stack trace missing\n");
++ } else {
++ nr_entries = stack_depot_fetch(handle, &entries);
++ pr_alert("page last free stack trace:\n");
++ stack_trace_print(entries, nr_entries, 0);
++ }
++#endif
++
+ if (page_owner->last_migrate_reason != -1)
+ pr_alert("page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_owner->last_migrate_reason]);
diff --git a/patches.suse/mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch b/patches.suse/mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch
new file mode 100644
index 0000000000..02d592f11f
--- /dev/null
+++ b/patches.suse/mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch
@@ -0,0 +1,146 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 14 Oct 2019 14:11:44 -0700
+Subject: mm, page_owner: decouple freeing stack trace from debug_pagealloc
+Git-commit: 0fe9a448a029a11d7211fcc2ebe9023d7fd31792
+Patch-mainline: v5.5 or v5.4-rc4 (next release)
+References: jsc#SLE-8959, bsc#1144653, VM Debug Functionality
+
+Commit 8974558f49a6 ("mm, page_owner, debug_pagealloc: save and dump
+freeing stack trace") enhanced page_owner to also store freeing stack
+trace, when debug_pagealloc is also enabled. KASAN would also like to
+do this [1] to improve error reports to debug e.g. UAF issues.
+
+Kirill has suggested that the freeing stack trace saving should be also
+possible to be enabled separately from KASAN or debug_pagealloc, i.e.
+with an extra boot option. Qian argued that we have enough options
+already, and avoiding the extra overhead is not worth the complications
+in the case of a debugging option. Kirill noted that the extra stack
+handle in struct page_owner requires 0.1% of memory.
+
+This patch therefore enables free stack saving whenever page_owner is
+enabled, regardless of whether debug_pagealloc or KASAN is also enabled.
+KASAN kernels booted with page_owner=on will thus benefit from the
+improved error reports.
+
+[1] https://bugzilla.kernel.org/show_bug.cgi?id=203967
+
+[vbabka@suse.cz: v3]
+ Link: http://lkml.kernel.org/r/20191007091808.7096-3-vbabka@suse.cz
+Link: http://lkml.kernel.org/r/20190930122916.14969-3-vbabka@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reviewed-by: Qian Cai <cai@lca.pw>
+Suggested-by: Dmitry Vyukov <dvyukov@google.com>
+Suggested-by: Walter Wu <walter-zh.wu@mediatek.com>
+Suggested-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Suggested-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ Documentation/dev-tools/kasan.rst | 3 +++
+ mm/page_owner.c | 28 +++++++---------------------
+ 2 files changed, 10 insertions(+), 21 deletions(-)
+
+--- a/Documentation/dev-tools/kasan.rst
++++ b/Documentation/dev-tools/kasan.rst
+@@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 -
+ Both KASAN modes work with both SLUB and SLAB memory allocators.
+ For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
+
++To augment reports with last allocation and freeing stack of the physical page,
++it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
++
+ To disable instrumentation for specific files or directories, add a line
+ similar to the following to the respective kernel Makefile:
+
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -24,12 +24,10 @@ struct page_owner {
+ short last_migrate_reason;
+ gfp_t gfp_mask;
+ depot_stack_handle_t handle;
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ depot_stack_handle_t free_handle;
+-#endif
+ };
+
+-static bool page_owner_disabled = true;
++static bool page_owner_enabled = false;
+ DEFINE_STATIC_KEY_FALSE(page_owner_inited);
+
+ static depot_stack_handle_t dummy_handle;
+@@ -44,7 +42,7 @@ static int __init early_page_owner_param
+ return -EINVAL;
+
+ if (strcmp(buf, "on") == 0)
+- page_owner_disabled = false;
++ page_owner_enabled = true;
+
+ return 0;
+ }
+@@ -52,10 +50,7 @@ early_param("page_owner", early_page_own
+
+ static bool need_page_owner(void)
+ {
+- if (page_owner_disabled)
+- return false;
+-
+- return true;
++ return page_owner_enabled;
+ }
+
+ static __always_inline depot_stack_handle_t create_dummy_stack(void)
+@@ -84,7 +79,7 @@ static noinline void register_early_stac
+
+ static void init_page_owner(void)
+ {
+- if (page_owner_disabled)
++ if (!page_owner_enabled)
+ return;
+
+ register_dummy_stack();
+@@ -148,25 +143,18 @@ void __reset_page_owner(struct page *pag
+ {
+ int i;
+ struct page_ext *page_ext;
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ depot_stack_handle_t handle = 0;
+ struct page_owner *page_owner;
+
+- if (debug_pagealloc_enabled())
+- handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+-#endif
++ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+
+ page_ext = lookup_page_ext(page);
+ if (unlikely(!page_ext))
+ return;
+ for (i = 0; i < (1 << order); i++) {
+ __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+- if (debug_pagealloc_enabled()) {
+- page_owner = get_page_owner(page_ext);
+- page_owner->free_handle = handle;
+- }
+-#endif
++ page_owner = get_page_owner(page_ext);
++ page_owner->free_handle = handle;
+ page_ext = page_ext_next(page_ext);
+ }
+ }
+@@ -450,7 +438,6 @@ void __dump_page_owner(struct page *page
+ stack_trace_print(entries, nr_entries, 0);
+ }
+
+-#ifdef CONFIG_DEBUG_PAGEALLOC
+ handle = READ_ONCE(page_owner->free_handle);
+ if (!handle) {
+ pr_alert("page_owner free stack trace missing\n");
+@@ -459,7 +446,6 @@ void __dump_page_owner(struct page *page
+ pr_alert("page last free stack trace:\n");
+ stack_trace_print(entries, nr_entries, 0);
+ }
+-#endif
+
+ if (page_owner->last_migrate_reason != -1)
+ pr_alert("page has been migrated, last migrate reason: %s\n",
diff --git a/patches.suse/mm-page_owner-fix-off-by-one-error-in-_set_page_owner_handle.patch b/patches.suse/mm-page_owner-fix-off-by-one-error-in-_set_page_owner_handle.patch
new file mode 100644
index 0000000000..eeb0005d6a
--- /dev/null
+++ b/patches.suse/mm-page_owner-fix-off-by-one-error-in-_set_page_owner_handle.patch
@@ -0,0 +1,195 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 14 Oct 2019 14:11:40 -0700
+Subject: mm, page_owner: fix off-by-one error in __set_page_owner_handle()
+Git-commit: 5556cfe8d994d5e7b4d50fd91597b8dc0b3a82fd
+Patch-mainline: v5.5 or v5.4-rc4 (next release)
+References: jsc#SLE-8959 bsc#1144653, VM Debug Functionality
+
+Patch series "followups to debug_pagealloc improvements through
+page_owner", v3.
+
+These are followups to [1] which made it to Linus meanwhile. Patches 1
+and 3 are based on Kirill's review, patch 2 on KASAN request [2]. It
+would be nice if all of this made it to 5.4 with [1] already there (or
+at least Patch 1).
+
+This patch (of 3):
+
+As noted by Kirill, commit 7e2f2a0cd17c ("mm, page_owner: record page
+owner for each subpage") has introduced an off-by-one error in
+__set_page_owner_handle() when looking up page_ext for subpages. As a
+result, the head page page_owner info is set twice, while for the last
+tail page, it's not set at all.
+
+Fix this and also make the code more efficient by advancing the page_ext
+pointer we already have, instead of calling lookup_page_ext() for each
+subpage. Since the full size of struct page_ext is not known at compile
+time, we can't use a simple page_ext++ statement, so introduce a
+page_ext_next() inline function for that.
+
+Link: http://lkml.kernel.org/r/20190930122916.14969-2-vbabka@suse.cz
+Fixes: 7e2f2a0cd17c ("mm, page_owner: record page owner for each subpage")
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Reported-by: Kirill A. Shutemov <kirill@shutemov.name>
+Reported-by: Miles Chen <miles.chen@mediatek.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Walter Wu <walter-zh.wu@mediatek.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/page_ext.h | 8 ++++++++
+ mm/page_ext.c | 23 +++++++++--------------
+ mm/page_owner.c | 15 +++++++--------
+ 3 files changed, 24 insertions(+), 22 deletions(-)
+
+--- a/include/linux/page_ext.h
++++ b/include/linux/page_ext.h
+@@ -36,6 +36,7 @@ struct page_ext {
+ unsigned long flags;
+ };
+
++extern unsigned long page_ext_size;
+ extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+
+ #ifdef CONFIG_SPARSEMEM
+@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
+
+ struct page_ext *lookup_page_ext(const struct page *page);
+
++static inline struct page_ext *page_ext_next(struct page_ext *curr)
++{
++ void *next = curr;
++ next += page_ext_size;
++ return next;
++}
++
+ #else /* !CONFIG_PAGE_EXTENSION */
+ struct page_ext;
+
+--- a/mm/page_ext.c
++++ b/mm/page_ext.c
+@@ -67,8 +67,9 @@ static struct page_ext_operations *page_
+ #endif
+ };
+
++unsigned long page_ext_size = sizeof(struct page_ext);
++
+ static unsigned long total_usage;
+-static unsigned long extra_mem;
+
+ static bool __init invoke_need_callbacks(void)
+ {
+@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks
+
+ for (i = 0; i < entries; i++) {
+ if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
+- page_ext_ops[i]->offset = sizeof(struct page_ext) +
+- extra_mem;
+- extra_mem += page_ext_ops[i]->size;
++ page_ext_ops[i]->offset = page_ext_size;
++ page_ext_size += page_ext_ops[i]->size;
+ need = true;
+ }
+ }
+@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks
+ }
+ }
+
+-static unsigned long get_entry_size(void)
+-{
+- return sizeof(struct page_ext) + extra_mem;
+-}
+-
+ static inline struct page_ext *get_entry(void *base, unsigned long index)
+ {
+- return base + get_entry_size() * index;
++ return base + page_ext_size * index;
+ }
+
+ #if !defined(CONFIG_SPARSEMEM)
+@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(in
+ !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
+ nr_pages += MAX_ORDER_NR_PAGES;
+
+- table_size = get_entry_size() * nr_pages;
++ table_size = page_ext_size * nr_pages;
+
+ base = memblock_alloc_try_nid(
+ table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
+@@ -234,7 +229,7 @@ static int __meminit init_section_page_e
+ if (section->page_ext)
+ return 0;
+
+- table_size = get_entry_size() * PAGES_PER_SECTION;
++ table_size = page_ext_size * PAGES_PER_SECTION;
+ base = alloc_page_ext(table_size, nid);
+
+ /*
+@@ -254,7 +249,7 @@ static int __meminit init_section_page_e
+ * we need to apply a mask.
+ */
+ pfn &= PAGE_SECTION_MASK;
+- section->page_ext = (void *)base - get_entry_size() * pfn;
++ section->page_ext = (void *)base - page_ext_size * pfn;
+ total_usage += table_size;
+ return 0;
+ }
+@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
+ struct page *page = virt_to_page(addr);
+ size_t table_size;
+
+- table_size = get_entry_size() * PAGES_PER_SECTION;
++ table_size = page_ext_size * PAGES_PER_SECTION;
+
+ BUG_ON(PageReserved(page));
+ kmemleak_free(addr);
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -156,10 +156,10 @@ void __reset_page_owner(struct page *pag
+ handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
+ #endif
+
++ page_ext = lookup_page_ext(page);
++ if (unlikely(!page_ext))
++ return;
+ for (i = 0; i < (1 << order); i++) {
+- page_ext = lookup_page_ext(page + i);
+- if (unlikely(!page_ext))
+- continue;
+ __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+ #ifdef CONFIG_DEBUG_PAGEALLOC
+ if (debug_pagealloc_enabled()) {
+@@ -167,6 +167,7 @@ void __reset_page_owner(struct page *pag
+ page_owner->free_handle = handle;
+ }
+ #endif
++ page_ext = page_ext_next(page_ext);
+ }
+ }
+
+@@ -186,7 +187,7 @@ static inline void __set_page_owner_hand
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+ __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+
+- page_ext = lookup_page_ext(page + i);
++ page_ext = page_ext_next(page_ext);
+ }
+ }
+
+@@ -224,12 +225,10 @@ void __split_page_owner(struct page *pag
+ if (unlikely(!page_ext))
+ return;
+
+- page_owner = get_page_owner(page_ext);
+- page_owner->order = 0;
+- for (i = 1; i < (1 << order); i++) {
+- page_ext = lookup_page_ext(page + i);
++ for (i = 0; i < (1 << order); i++) {
+ page_owner = get_page_owner(page_ext);
+ page_owner->order = 0;
++ page_ext = page_ext_next(page_ext);
+ }
+ }
+
diff --git a/patches.suse/mm-page_owner-keep-owner-info-when-freeing-the-page.patch b/patches.suse/mm-page_owner-keep-owner-info-when-freeing-the-page.patch
new file mode 100644
index 0000000000..de8257abb9
--- /dev/null
+++ b/patches.suse/mm-page_owner-keep-owner-info-when-freeing-the-page.patch
@@ -0,0 +1,127 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 23 Sep 2019 15:34:39 -0700
+Subject: mm, page_owner: keep owner info when freeing the page
+Git-commit: 37389167a281f3ccb6bc958c32b2e088c7269fe0
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-8959, bsc#1144653, VM Debug Functionality
+
+For debugging purposes it might be useful to keep the owner info even
+after page has been freed, and include it in e.g. dump_page() when
+detecting a bad page state. For that, change the PAGE_EXT_OWNER flag
+meaning to "page owner info has been set at least once" and add new
+PAGE_EXT_OWNER_ACTIVE for tracking whether page is supposed to be
+currently tracked allocated or free. Adjust dump_page() accordingly,
+distinguishing free and allocated pages. In the page_owner debugfs file,
+keep printing only allocated pages so that existing scripts are not
+confused, and also because free pages are irrelevant for the memory
+statistics or leak detection that's the typical use case of the file,
+anyway.
+
+Link: http://lkml.kernel.org/r/20190820131828.22684-4-vbabka@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/page_ext.h | 1 +
+ mm/page_owner.c | 34 ++++++++++++++++++++++++----------
+ 2 files changed, 25 insertions(+), 10 deletions(-)
+
+--- a/include/linux/page_ext.h
++++ b/include/linux/page_ext.h
+@@ -18,6 +18,7 @@ struct page_ext_operations {
+
+ enum page_ext_flags {
+ PAGE_EXT_OWNER,
++ PAGE_EXT_OWNER_ACTIVE,
+ #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_YOUNG,
+ PAGE_EXT_IDLE,
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -111,7 +111,7 @@ void __reset_page_owner(struct page *pag
+ page_ext = lookup_page_ext(page + i);
+ if (unlikely(!page_ext))
+ continue;
+- __clear_bit(PAGE_EXT_OWNER, &page_ext->flags);
++ __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+ }
+ }
+
+@@ -168,6 +168,7 @@ static inline void __set_page_owner_hand
+ page_owner->gfp_mask = gfp_mask;
+ page_owner->last_migrate_reason = -1;
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
++ __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+
+ page_ext = lookup_page_ext(page + i);
+ }
+@@ -243,6 +244,7 @@ void __copy_page_owner(struct page *oldp
+ * the new page, which will be freed.
+ */
+ __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
++ __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
+ }
+
+ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+@@ -302,7 +304,7 @@ void pagetypeinfo_showmixedcount_print(s
+ if (unlikely(!page_ext))
+ continue;
+
+- if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
++ if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+ continue;
+
+ page_owner = get_page_owner(page_ext);
+@@ -413,21 +415,26 @@ void __dump_page_owner(struct page *page
+ mt = gfpflags_to_migratetype(gfp_mask);
+
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
+- pr_alert("page_owner info is not active (free page?)\n");
++ pr_alert("page_owner info is not present (never set?)\n");
+ return;
+ }
+
++ if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
++ pr_alert("page_owner tracks the page as allocated\n");
++ else
++ pr_alert("page_owner tracks the page as freed\n");
++
++ pr_alert("page last allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
++ page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
++
+ handle = READ_ONCE(page_owner->handle);
+ if (!handle) {
+- pr_alert("page_owner info is not active (free page?)\n");
+- return;
++ pr_alert("page_owner allocation stack trace missing\n");
++ } else {
++ nr_entries = stack_depot_fetch(handle, &entries);
++ stack_trace_print(entries, nr_entries, 0);
+ }
+
+- nr_entries = stack_depot_fetch(handle, &entries);
+- pr_alert("page allocated via order %u, migratetype %s, gfp_mask %#x(%pGg)\n",
+- page_owner->order, migratetype_names[mt], gfp_mask, &gfp_mask);
+- stack_trace_print(entries, nr_entries, 0);
+-
+ if (page_owner->last_migrate_reason != -1)
+ pr_alert("page has been migrated, last migrate reason: %s\n",
+ migrate_reason_names[page_owner->last_migrate_reason]);
+@@ -489,6 +496,13 @@ read_page_owner(struct file *file, char
+ if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
+ continue;
+
++ /*
++ * Although we do have the info about past allocation of free
++ * pages, it's not relevant for current memory usage.
++ */
++ if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
++ continue;
++
+ page_owner = get_page_owner(page_ext);
+
+ /*
diff --git a/patches.suse/mm-page_owner-record-page-owner-for-each-subpage.patch b/patches.suse/mm-page_owner-record-page-owner-for-each-subpage.patch
new file mode 100644
index 0000000000..70d5cb55a2
--- /dev/null
+++ b/patches.suse/mm-page_owner-record-page-owner-for-each-subpage.patch
@@ -0,0 +1,131 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 23 Sep 2019 15:34:36 -0700
+Subject: mm, page_owner: record page owner for each subpage
+Git-commit: 7e2f2a0cd17cfc42acb4b6a293d5cb6c7eda9862
+Patch-mainline: v5.4-rc1
+References: jsc#SLE-8959, bsc#1144653, VM Debug Functionality
+
+Patch series "debug_pagealloc improvements through page_owner", v2.
+
+The debug_pagealloc functionality serves a similar purpose on the page
+allocator level that slub_debug does on the kmalloc level, which is to
+detect bad users. One notable feature that slub_debug has is storing
+stack traces of who last allocated and freed the object. On page level we
+track allocations via page_owner, but that info is discarded when freeing,
+and we don't track freeing at all. This series improves those aspects.
+With both debug_pagealloc and page_owner enabled, we can then get bug
+reports such as the example in Patch 4.
+
+SLUB debug tracking additionally stores cpu, pid and timestamp. This could
+be added later, if deemed useful enough to justify the additional page_ext
+structure size.
+
+This patch (of 3):
+
+Currently, page owner info is only recorded for the first page of a
+high-order allocation, and copied to tail pages in the event of a split
+page. With the plan to keep previous owner info after freeing the page,
+it would be benefical to record page owner for each subpage upon
+allocation. This increases the overhead for high orders, but that should
+be acceptable for a debugging option.
+
+The order stored for each subpage is the order of the whole allocation.
+This makes it possible to calculate the "head" pfn and to recognize "tail"
+pages (quoted because not all high-order allocations are compound pages
+with true head and tail pages). When reading the page_owner debugfs file,
+keep skipping the "tail" pages so that stats gathered by existing scripts
+don't get inflated.
+
+Link: http://lkml.kernel.org/r/20190820131828.22684-3-vbabka@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ mm/page_owner.c | 40 ++++++++++++++++++++++++++++------------
+ 1 file changed, 28 insertions(+), 12 deletions(-)
+
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -154,18 +154,23 @@ static noinline depot_stack_handle_t sav
+ return handle;
+ }
+
+-static inline void __set_page_owner_handle(struct page_ext *page_ext,
+- depot_stack_handle_t handle, unsigned int order, gfp_t gfp_mask)
++static inline void __set_page_owner_handle(struct page *page,
++ struct page_ext *page_ext, depot_stack_handle_t handle,
++ unsigned int order, gfp_t gfp_mask)
+ {
+ struct page_owner *page_owner;
++ int i;
+
+- page_owner = get_page_owner(page_ext);
+- page_owner->handle = handle;
+- page_owner->order = order;
+- page_owner->gfp_mask = gfp_mask;
+- page_owner->last_migrate_reason = -1;
++ for (i = 0; i < (1 << order); i++) {
++ page_owner = get_page_owner(page_ext);
++ page_owner->handle = handle;
++ page_owner->order = order;
++ page_owner->gfp_mask = gfp_mask;
++ page_owner->last_migrate_reason = -1;
++ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+
+- __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
++ page_ext = lookup_page_ext(page + i);
++ }
+ }
+
+ noinline void __set_page_owner(struct page *page, unsigned int order,
+@@ -178,7 +183,7 @@ noinline void __set_page_owner(struct pa
+ return;
+
+ handle = save_stack(gfp_mask);
+- __set_page_owner_handle(page_ext, handle, order, gfp_mask);
++ __set_page_owner_handle(page, page_ext, handle, order, gfp_mask);
+ }
+
+ void __set_page_owner_migrate_reason(struct page *page, int reason)
+@@ -204,8 +209,11 @@ void __split_page_owner(struct page *pag
+
+ page_owner = get_page_owner(page_ext);
+ page_owner->order = 0;
+- for (i = 1; i < (1 << order); i++)
+- __copy_page_owner(page, page + i);
++ for (i = 1; i < (1 << order); i++) {
++ page_ext = lookup_page_ext(page + i);
++ page_owner = get_page_owner(page_ext);
++ page_owner->order = 0;
++ }
+ }
+
+ void __copy_page_owner(struct page *oldpage, struct page *newpage)
+@@ -484,6 +492,13 @@ read_page_owner(struct file *file, char
+ page_owner = get_page_owner(page_ext);
+
+ /*
++ * Don't print "tail" pages of high-order allocations as that
++ * would inflate the stats.
++ */
++ if (!IS_ALIGNED(pfn, 1 << page_owner->order))
++ continue;
++
++ /*
+ * Access to page_ext->handle isn't synchronous so we should
+ * be careful to access it.
+ */
+@@ -562,7 +577,8 @@ static void init_pages_in_zone(pg_data_t
+ continue;
+
+ /* Found early allocated page */
+- __set_page_owner_handle(page_ext, early_handle, 0, 0);
++ __set_page_owner_handle(page, page_ext, early_handle,
++ 0, 0);
+ count++;
+ }
+ cond_resched();
diff --git a/patches.suse/mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch b/patches.suse/mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch
new file mode 100644
index 0000000000..09ccd22d1b
--- /dev/null
+++ b/patches.suse/mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch
@@ -0,0 +1,94 @@
+From: Vlastimil Babka <vbabka@suse.cz>
+Date: Mon, 14 Oct 2019 14:11:47 -0700
+Subject: mm, page_owner: rename flag indicating that page is allocated
+Git-commit: fdf3bf809162592b54c278b9b0e84f3e126f8844
+Patch-mainline: v5.5 or v5.4-rc4 (next release)
+References: jsc#SLE-8959, bsc#1144653, VM Debug Functionality
+
+Commit 37389167a281 ("mm, page_owner: keep owner info when freeing the
+page") has introduced a flag PAGE_EXT_OWNER_ACTIVE to indicate that page
+is tracked as being allocated. Kirril suggested naming it
+PAGE_EXT_OWNER_ALLOCATED to make it more clear, as "active is somewhat
+loaded term for a page".
+
+Link: http://lkml.kernel.org/r/20190930122916.14969-4-vbabka@suse.cz
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Suggested-by: Kirill A. Shutemov <kirill@shutemov.name>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Cc: Walter Wu <walter-zh.wu@mediatek.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+---
+ include/linux/page_ext.h | 2 +-
+ mm/page_owner.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/include/linux/page_ext.h
++++ b/include/linux/page_ext.h
+@@ -18,7 +18,7 @@ struct page_ext_operations {
+
+ enum page_ext_flags {
+ PAGE_EXT_OWNER,
+- PAGE_EXT_OWNER_ACTIVE,
++ PAGE_EXT_OWNER_ALLOCATED,
+ #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
+ PAGE_EXT_YOUNG,
+ PAGE_EXT_IDLE,
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -152,7 +152,7 @@ void __reset_page_owner(struct page *pag
+ if (unlikely(!page_ext))
+ return;
+ for (i = 0; i < (1 << order); i++) {
+- __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
++ __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+ page_owner = get_page_owner(page_ext);
+ page_owner->free_handle = handle;
+ page_ext = page_ext_next(page_ext);
+@@ -173,7 +173,7 @@ static inline void __set_page_owner_hand
+ page_owner->gfp_mask = gfp_mask;
+ page_owner->last_migrate_reason = -1;
+ __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
+- __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
++ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+
+ page_ext = page_ext_next(page_ext);
+ }
+@@ -247,7 +247,7 @@ void __copy_page_owner(struct page *oldp
+ * the new page, which will be freed.
+ */
+ __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
+- __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
++ __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
+ }
+
+ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
+@@ -307,7 +307,7 @@ void pagetypeinfo_showmixedcount_print(s
+ if (unlikely(!page_ext))
+ continue;
+
+- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
++ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+ continue;
+
+ page_owner = get_page_owner(page_ext);
+@@ -422,7 +422,7 @@ void __dump_page_owner(struct page *page
+ return;
+ }
+
+- if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
++ if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+ pr_alert("page_owner tracks the page as allocated\n");
+ else
+ pr_alert("page_owner tracks the page as freed\n");
+@@ -512,7 +512,7 @@ read_page_owner(struct file *file, char
+ * Although we do have the info about past allocation of free
+ * pages, it's not relevant for current memory usage.
+ */
+- if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
++ if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
+ continue;
+
+ page_owner = get_page_owner(page_ext);
diff --git a/patches.suse/x86-mm-fix-fast-gup-paravirt.patch b/patches.suse/x86-mm-fix-fast-gup-paravirt.patch
deleted file mode 100644
index ca54e823e0..0000000000
--- a/patches.suse/x86-mm-fix-fast-gup-paravirt.patch
+++ /dev/null
@@ -1,79 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Subject: x86, mm: fix fast GUP with hyper-based TLB flushing
-Patch-mainline: never, mainline fixed by the way of major rewrite, but stable submission might be possible
-References: VM Functionality, bsc#1140903
-
-The x86 version of get_user_pages_fast() relies on disabled interrupts to
-synchronize gup_pte_range() between gup_get_pte(ptep); and get_page() agains
-a parallel munmap. The munmap side nulls the pte, then flushes TLBs, then
-releases the page. As TLB flush is done synchronously via acked interrupt,
-disabling interrupts blocks the page release, and get_page(), which assumes
-existing reference on page, is thus safe.
-However when TLB flush is done by a hypercall, e.g. in Xen PV or HyperV guests,
-there is no blocking thanks to disabled interrupts, and get_page() can succeed
-on a page that was already freed or even reused.
-
-Fix this by removing the dependency on TLB flush interrupts the same way as the
-generic get_user_pages_fast() code by using page_cache_add_speculative() and
-revalidating the PTE contents after pinning the page.
-
-Reproduced-by: Oscar Salvador <osalvador@suse.de>
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-
----
- arch/x86/mm/gup.c | 27 +++++++++++++++++++++++++--
- 1 file changed, 25 insertions(+), 2 deletions(-)
-
---- a/arch/x86/mm/gup.c
-+++ b/arch/x86/mm/gup.c
-@@ -98,6 +98,20 @@ static inline int pte_allows_gup(unsigne
- }
-
- /*
-+ * Return the compund head page with ref appropriately incremented,
-+ * or NULL if that failed.
-+ */
-+static inline struct page *try_get_compound_head(struct page *page, int refs)
-+{
-+ struct page *head = compound_head(page);
-+ if (WARN_ON_ONCE(page_ref_count(head) < 0))
-+ return NULL;
-+ if (unlikely(!page_cache_add_speculative(head, refs)))
-+ return NULL;
-+ return head;
-+}
-+
-+/*
- * The performance critical leaf functions are made noinline otherwise gcc
- * inlines everything into a single function which results in too much
- * register pressure.
-@@ -117,7 +131,7 @@ static noinline int gup_pte_range(pmd_t
- ptem = ptep = pte_offset_map(&pmd, addr);
- do {
- pte_t pte = gup_get_pte(ptep);
-- struct page *page;
-+ struct page *head, *page;
-
- /* Similar to the PMD case, NUMA hinting must take slow path */
- if (pte_protnone(pte))
-@@ -137,10 +151,19 @@ static noinline int gup_pte_range(pmd_t
-
- VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
- page = pte_page(pte);
-- if (unlikely(!try_get_page(page))) {
-+
-+ head = try_get_compound_head(page, 1);
-+ if (!head) {
- put_dev_pagemap(pgmap);
- break;
- }
-+
-+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
-+ put_page(head);
-+ put_dev_pagemap(pgmap);
-+ break;
-+ }
-+
- put_dev_pagemap(pgmap);
- SetPageReferenced(page);
- pages[*nr] = page;
diff --git a/series.conf b/series.conf
index be4b53763b..59447cea05 100644
--- a/series.conf
+++ b/series.conf
@@ -811,6 +811,9 @@
patches.suse/kbuild-clean-compressed-initramfs-image.patch
patches.suse/ocfs2-wait-for-recovering-done-after-direct-unlock-r.patch
patches.suse/kmemleak-increase-DEBUG_KMEMLEAK_EARLY_LOG_SIZE-defa.patch
+ patches.suse/mm-page_owner-record-page-owner-for-each-subpage.patch
+ patches.suse/mm-page_owner-keep-owner-info-when-freeing-the-page.patch
+ patches.suse/mm-page_owner-debug_pagealloc-save-and-dump-freeing-stack-trace.patch
patches.suse/z3fold-fix-memory-leak-in-kmem-cache.patch
patches.suse/mm-compaction.c-clear-total_-migrate-free-_scanned-b.patch
patches.suse/memcg-oom-don-t-require-__GFP_FS-when-invoking-memcg.patch
@@ -1058,6 +1061,10 @@
patches.suse/xfs-move-local-to-extent-inode-logging-into-bmap-hel.patch
patches.suse/powerpc-pseries-Remove-confusing-warning-message.patch
patches.suse/x86-cpu-add-comet-lake-to-the-intel-cpu-models-header.patch
+ patches.suse/mm-page_owner-fix-off-by-one-error-in-_set_page_owner_handle.patch
+ patches.suse/mm-page_owner-decouple-freeing-stack-trace-from-debug_pagealloc.patch
+ patches.suse/mm-page_owner-rename-flag-indicating-that-page-is-allocated.patch
+ patches.suse/mm-compaction-fix-wrong-pfn-handling-in-_reset_isolation_pfn.patch
patches.suse/scsi-qla2xxx-Remove-WARN_ON_ONCE-in-qla2x00_status_c.patch
# davem/net
@@ -1563,8 +1570,6 @@
+tbogendoerfer patches.suse/s390-sles15-00-04-04-kmsg-add-VNIC-Characteristics-msg-documentation.patch
# trenn
+trenn patches.suse/cpufreq-intel_pstate-use-setpoint-of-10-on-servers.patch
-# vbabka
-+vbabka patches.suse/x86-mm-fix-fast-gup-paravirt.patch
# wqu
+wqu patches.suse/0001-btrfs-qgroup-Make-qgroup-async-transaction-commit-mo.patch
# yousaf.kaukab