Home Home > GIT Browse > SLE15
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2019-02-22 14:06:25 +0100
committerTakashi Iwai <tiwai@suse.de>2019-02-22 14:06:25 +0100
commitc311db7248f02b71021734b3852b75217e38276d (patch)
tree6b5186ffe766779ea3a4d66948baa39fccfea670
parentf75da847decfbd36f657496c1bba62db6f7ff983 (diff)
parent3d51d2b828321216499f0d9843bcf7109eee26ba (diff)
Merge branch 'users/mgorman/SLE15/for-next' into SLE15
Pull mm fixes from Mel Gorman suse-commit: 10d91b9b69d979998bf314325d5256ebad2db425
-rw-r--r--mm/hmm.c3
-rw-r--r--mm/ksm.c14
-rw-r--r--mm/mmap.c32
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c6
5 files changed, 55 insertions, 8 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index 0feb4188f68a..f09dc7f10563 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -277,7 +277,8 @@ static int hmm_pfns_bad(unsigned long addr,
unsigned long end,
struct mm_walk *walk)
{
- struct hmm_range *range = walk->private;
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
hmm_pfn_t *pfns = range->pfns;
unsigned long i;
diff --git a/mm/ksm.c b/mm/ksm.c
index 85e8c5fc95e6..c78d14460552 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -183,6 +183,8 @@ struct rmap_item {
#define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
#define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
#define STABLE_FLAG 0x200 /* is listed from the stable tree */
+#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
+ /* to mask all the flags */
/* The stable and unstable tree heads */
static struct rb_root one_stable_tree[1] = { RB_ROOT };
@@ -1973,10 +1975,15 @@ again:
anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
+ unsigned long addr;
+
cond_resched();
vma = vmac->vma;
- if (rmap_item->address < vma->vm_start ||
- rmap_item->address >= vma->vm_end)
+
+ /* Ignore the stable/unstable/sqnr flags */
+ addr = rmap_item->address & ~KSM_FLAG_MASK;
+
+ if (addr < vma->vm_start || addr >= vma->vm_end)
continue;
/*
* Initially we examine only the vma which covers this
@@ -1990,8 +1997,7 @@ again:
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
- if (!rwc->rmap_one(page, vma,
- rmap_item->address, rwc->arg)) {
+ if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index b62900b4d095..39b871be9a6e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1315,6 +1315,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
return 0;
}
+static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
+{
+ if (S_ISREG(inode->i_mode))
+ return inode->i_sb->s_maxbytes;
+
+ if (S_ISBLK(inode->i_mode))
+ return MAX_LFS_FILESIZE;
+
+ /* Special "we do even unsigned file positions" case */
+ if (file->f_mode & FMODE_UNSIGNED_OFFSET)
+ return 0;
+
+ /* Yes, random drivers might want more. But I'm tired of buggy drivers */
+ return ULONG_MAX;
+}
+
+static inline bool file_mmap_ok(struct file *file, struct inode *inode,
+ unsigned long pgoff, unsigned long len)
+{
+ u64 maxsize = file_mmap_size_max(file, inode);
+
+ if (maxsize && len > maxsize)
+ return false;
+ maxsize -= len;
+ if (pgoff > maxsize >> PAGE_SHIFT)
+ return false;
+ return true;
+}
+
/*
* The caller must hold down_write(&current->mm->mmap_sem).
*/
@@ -1389,6 +1418,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
struct inode *inode = file_inode(file);
unsigned long flags_mask;
+ if (!file_mmap_ok(file, inode, pgoff, len))
+ return -EOVERFLOW;
+
flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
switch (flags & MAP_TYPE) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 604fefd499f0..91f9df2567f1 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
#include <asm/tlbflush.h>
@@ -1492,11 +1493,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
pteval = swp_entry_to_pte(make_hwpoison_entry(subpage));
set_pte_at(mm, address, pvmw.pte, pteval);
- } else if (pte_unused(pteval)) {
+ } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
+ * A future reference will then fault in a new zero
+ * page. When userfaultfd is active, we must not drop
+ * this page though, as its main user (postcopy
+ * migration) will not expect userfaults on already
+ * copied pages.
*/
dec_mm_counter(mm, mm_counter(page));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
diff --git a/mm/shmem.c b/mm/shmem.c
index 3f5af65d76bc..e5a01786f187 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1530,11 +1530,13 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
{
struct page *oldpage, *newpage;
struct address_space *swap_mapping;
+ swp_entry_t entry;
pgoff_t swap_index;
int error;
oldpage = *pagep;
- swap_index = page_private(oldpage);
+ entry.val = page_private(oldpage);
+ swap_index = swp_offset(entry);
swap_mapping = page_mapping(oldpage);
/*
@@ -1553,7 +1555,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
__SetPageLocked(newpage);
__SetPageSwapBacked(newpage);
SetPageUptodate(newpage);
- set_page_private(newpage, swap_index);
+ set_page_private(newpage, entry.val);
SetPageSwapCache(newpage);
/*