aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2019-11-30 17:55:31 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-12-01 12:59:06 -0800
commitde3b01506ea494b46aab05dc143b69adbf2aaa9d (patch)
tree2a616d9b2fe8a989cd1030a4112544cf071db90c /mm
parentmm/vmscan.c: remove unused scan_control parameter from pageout() (diff)
downloadlinux-dev-de3b01506ea494b46aab05dc143b69adbf2aaa9d.tar.xz
linux-dev-de3b01506ea494b46aab05dc143b69adbf2aaa9d.zip
mm: vmscan: simplify lruvec_lru_size()
Patch series "mm: vmscan: cgroup-related cleanups". Here are 8 patches that clean up the reclaim code's interaction with cgroups a bit. They're not supposed to change any behavior, just make the implementation easier to understand and work with. This patch (of 8): This function currently takes the node or lruvec size and subtracts the zones that are excluded by the classzone index of the allocation. It uses four different types of counters to do this. Just add up the eligible zones. [cai@lca.pw: fix an undefined behavior for zone id] Link: http://lkml.kernel.org/r/20191108204407.1435-1-cai@lca.pw [akpm@linux-foundation.org: deal with the MAX_NR_ZONES special case. per Qian Cai] Link: http://lkml.kernel.org/r/64E60F6F-7582-427B-8DD5-EF97B1656F5A@lca.pw Link: http://lkml.kernel.org/r/20191022144803.302233-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Roman Gushchin <guro@fb.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c21
1 files changed, 5 insertions, 16 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 44f5c54d6dd8..266620f7c814 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -351,32 +351,21 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
*/
unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
{
- unsigned long lru_size = 0;
+ unsigned long size = 0;
int zid;
- if (!mem_cgroup_disabled()) {
- for (zid = 0; zid < MAX_NR_ZONES; zid++)
- lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
- } else
- lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
-
- for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
+ for (zid = 0; zid <= zone_idx && zid < MAX_NR_ZONES; zid++) {
struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid];
- unsigned long size;
if (!managed_zone(zone))
continue;
if (!mem_cgroup_disabled())
- size = mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+ size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
else
- size = zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zid],
- NR_ZONE_LRU_BASE + lru);
- lru_size -= min(size, lru_size);
+ size += zone_page_state(zone, NR_ZONE_LRU_BASE + lru);
}
-
- return lru_size;
-
+ return size;
}
/*