aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 725b5d4784f7..39657012e2d8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -853,7 +853,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
* gets returned with a refcount of 0.
*/
static int __remove_mapping(struct address_space *mapping, struct page *page,
- bool reclaimed)
+ bool reclaimed, struct mem_cgroup *target_memcg)
{
unsigned long flags;
int refcount;
@@ -925,7 +925,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
*/
if (reclaimed && page_is_file_cache(page) &&
!mapping_exiting(mapping) && !dax_mapping(mapping))
- shadow = workingset_eviction(page);
+ shadow = workingset_eviction(page, target_memcg);
__delete_from_page_cache(page, shadow);
xa_unlock_irqrestore(&mapping->i_pages, flags);
@@ -948,7 +948,7 @@ cannot_free:
*/
int remove_mapping(struct address_space *mapping, struct page *page)
{
- if (__remove_mapping(mapping, page, false)) {
+ if (__remove_mapping(mapping, page, false, NULL)) {
/*
* Unfreezing the refcount with 1 rather than 2 effectively
* drops the pagecache ref for us without requiring another
@@ -1426,7 +1426,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
count_vm_event(PGLAZYFREED);
count_memcg_page_event(page, PGLAZYFREED);
- } else if (!mapping || !__remove_mapping(mapping, page, true))
+ } else if (!mapping || !__remove_mapping(mapping, page, true,
+ sc->target_mem_cgroup))
goto keep_locked;
unlock_page(page);
@@ -2189,6 +2190,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
enum lru_list inactive_lru = file * LRU_FILE;
unsigned long inactive, active;
unsigned long inactive_ratio;
+ struct lruvec *target_lruvec;
unsigned long refaults;
unsigned long gb;
@@ -2200,8 +2202,9 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
* is being established. Disable active list protection to get
* rid of the stale workingset quickly.
*/
- refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
- if (file && lruvec->refaults != refaults) {
+ target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
+ refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
+ if (file && target_lruvec->refaults != refaults) {
inactive_ratio = 0;
} else {
gb = (inactive + active) >> (30 - PAGE_SHIFT);
@@ -2973,19 +2976,14 @@ static void shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask = orig_mask;
}
-static void snapshot_refaults(struct mem_cgroup *root_memcg, pg_data_t *pgdat)
+static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
{
- struct mem_cgroup *memcg;
-
- memcg = mem_cgroup_iter(root_memcg, NULL, NULL);
- do {
- unsigned long refaults;
- struct lruvec *lruvec;
+ struct lruvec *target_lruvec;
+ unsigned long refaults;
- lruvec = mem_cgroup_lruvec(memcg, pgdat);
- refaults = lruvec_page_state_local(lruvec, WORKINGSET_ACTIVATE);
- lruvec->refaults = refaults;
- } while ((memcg = mem_cgroup_iter(root_memcg, memcg, NULL)));
+ target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
+ refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
+ target_lruvec->refaults = refaults;
}
/*