Home Home > GIT Browse > SLE12-SP5-AZURE
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPetr Tesarik <ptesarik@suse.cz>2019-08-20 15:24:13 +0200
committerPetr Tesarik <ptesarik@suse.cz>2019-08-20 15:24:13 +0200
commit4ad1f5fed9a2479f2a1705e10ad0f6f816865c0c (patch)
tree04d0e4d8ff4cb60140392faa78297a2e0ba7e8de
parent498123f5846f022ba57e610b2a412afdd556845d (diff)
parent136a1c2d34c93ebe981999096370e534d7228a69 (diff)
Merge branch 'users/mgorman/SLE15-SP1/for-next' into SLE15-SP1
Pull a mm fix from Mel Gorman suse-commit: 62b9ee69b0ce30c825db8cb84d00466aaf7235eb
-rw-r--r--mm/vmscan.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e32e44c5137d..66934bc43654 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -97,9 +97,6 @@ struct scan_control {
/* Can pages be swapped as part of reclaim? */
unsigned int may_swap:1;
- /* e.g. boosted watermark reclaim leaves slabs alone */
- unsigned int may_shrinkslab:1;
-
/*
* Cgroups are not reclaimed below their configured memory.low,
* unless we threaten to OOM. If any cgroups are skipped due to
@@ -2557,7 +2554,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
shrink_node_memcg(pgdat, memcg, sc, &lru_pages);
node_lru_pages += lru_pages;
- if (memcg && sc->may_shrinkslab)
+ if (memcg)
shrink_slab(sc->gfp_mask, pgdat->node_id,
memcg, sc->nr_scanned - scanned,
lru_pages);
@@ -2588,7 +2585,7 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
* Shrink the slab caches in the same proportion that
* the eligible LRU pages were scanned.
*/
- if (global_reclaim(sc) && sc->may_shrinkslab)
+ if (global_reclaim(sc))
shrink_slab(sc->gfp_mask, pgdat->node_id, NULL,
sc->nr_scanned - nr_scanned,
node_lru_pages);
@@ -2989,7 +2986,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = 1,
- .may_shrinkslab = 1,
};
/*
@@ -3026,7 +3022,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg,
.may_unmap = 1,
.reclaim_idx = MAX_NR_ZONES - 1,
.may_swap = !noswap,
- .may_shrinkslab = 1,
};
unsigned long lru_pages;
@@ -3072,7 +3067,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
.may_writepage = !laptop_mode,
.may_unmap = 1,
.may_swap = may_swap,
- .may_shrinkslab = 1,
};
/*
@@ -3374,7 +3368,6 @@ restart:
*/
sc.may_writepage = !laptop_mode && !nr_boost_reclaim;
sc.may_swap = !nr_boost_reclaim;
- sc.may_shrinkslab = !nr_boost_reclaim;
/*
* Do some background aging of the anon list, to give