aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorGreg Thelen <gthelen@google.com>2011-01-13 15:47:37 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 17:32:50 -0800
commit2a7106f2cb0768d00fe8c1eb42a754a7d8518f08 (patch)
tree730bef06e752c1edcb2d475fd193f94bea00bf6a /include/linux/memcontrol.h
parentmemcg: document cgroup dirty memory interfaces (diff)
downloadlinux-dev-2a7106f2cb0768d00fe8c1eb42a754a7d8518f08.tar.xz
linux-dev-2a7106f2cb0768d00fe8c1eb42a754a7d8518f08.zip
memcg: create extensible page stat update routines
Replace usage of the mem_cgroup_update_file_mapped() memcg statistic update routine with two new routines: * mem_cgroup_inc_page_stat() * mem_cgroup_dec_page_stat() As before, only the file_mapped statistic is managed. However, these more general interfaces allow for new statistics to be more easily added. New statistics are added with memcg dirty page accounting. Signed-off-by: Greg Thelen <gthelen@google.com> Signed-off-by: Andrea Righi <arighi@develer.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to '')
-rw-r--r--include/linux/memcontrol.h31
1 files changed, 28 insertions, 3 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 159a0762aeaf..067115ce6b3e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -25,6 +25,11 @@ struct page_cgroup;
struct page;
struct mm_struct;
+/* Stats that can be updated by kernel. */
+enum mem_cgroup_page_stat_item {
+ MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
+};
+
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
@@ -121,7 +126,22 @@ static inline bool mem_cgroup_disabled(void)
return false;
}
-void mem_cgroup_update_file_mapped(struct page *page, int val);
+void mem_cgroup_update_page_stat(struct page *page,
+ enum mem_cgroup_page_stat_item idx,
+ int val);
+
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+ enum mem_cgroup_page_stat_item idx)
+{
+ mem_cgroup_update_page_stat(page, idx, 1);
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+ enum mem_cgroup_page_stat_item idx)
+{
+ mem_cgroup_update_page_stat(page, idx, -1);
+}
+
unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
gfp_t gfp_mask);
u64 mem_cgroup_get_limit(struct mem_cgroup *mem);
@@ -293,8 +313,13 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{
}
-static inline void mem_cgroup_update_file_mapped(struct page *page,
- int val)
+static inline void mem_cgroup_inc_page_stat(struct page *page,
+ enum mem_cgroup_page_stat_item idx)
+{
+}
+
+static inline void mem_cgroup_dec_page_stat(struct page *page,
+ enum mem_cgroup_page_stat_item idx)
{
}