aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c47
1 files changed, 39 insertions, 8 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 47bd7f13f526..327dcda3ebf6 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -62,6 +62,7 @@
#include <linux/oom.h>
#include <linux/lockdep.h>
#include <linux/file.h>
+#include <linux/tracehook.h>
#include "internal.h"
#include <net/sock.h>
#include <net/ip.h>
@@ -1972,6 +1973,31 @@ static int memcg_cpu_hotplug_callback(struct notifier_block *nb,
return NOTIFY_OK;
}
+/*
+ * Scheduled by try_charge() to be executed from the userland return path
+ * and reclaims memory over the high limit.
+ */
+void mem_cgroup_handle_over_high(void)
+{
+ unsigned int nr_pages = current->memcg_nr_pages_over_high;
+ struct mem_cgroup *memcg, *pos;
+
+ if (likely(!nr_pages))
+ return;
+
+ pos = memcg = get_mem_cgroup_from_mm(current->mm);
+
+ do {
+ if (page_counter_read(&pos->memory) <= pos->high)
+ continue;
+ mem_cgroup_events(pos, MEMCG_HIGH, 1);
+ try_to_free_mem_cgroup_pages(pos, nr_pages, GFP_KERNEL, true);
+ } while ((pos = parent_mem_cgroup(pos)));
+
+ css_put(&memcg->css);
+ current->memcg_nr_pages_over_high = 0;
+}
+
static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
unsigned int nr_pages)
{
@@ -2080,17 +2106,22 @@ done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
- if (!(gfp_mask & __GFP_WAIT))
- goto done;
+
/*
- * If the hierarchy is above the normal consumption range,
- * make the charging task trim their excess contribution.
+ * If the hierarchy is above the normal consumption range, schedule
+ * reclaim on returning to userland. We can perform reclaim here
+ * if __GFP_WAIT but let's always punt for simplicity and so that
+ * GFP_KERNEL can consistently be used during reclaim. @memcg is
+ * not recorded as it most likely matches current's and won't
+ * change in the meantime. As high limit is checked again before
+ * reclaim, the cost of mismatch is negligible.
*/
do {
- if (page_counter_read(&memcg->memory) <= memcg->high)
- continue;
- mem_cgroup_events(memcg, MEMCG_HIGH, 1);
- try_to_free_mem_cgroup_pages(memcg, nr_pages, gfp_mask, true);
+ if (page_counter_read(&memcg->memory) > memcg->high) {
+ current->memcg_nr_pages_over_high += nr_pages;
+ set_notify_resume(current);
+ break;
+ }
} while ((memcg = parent_mem_cgroup(memcg)));
done:
return ret;