aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2021-11-05 13:47:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-11-06 13:30:45 -0700
commitee801b7dd7822a82fd7663048ad649545fac6df3 (patch)
treeae87166f6742d9e516c05bb88548805e3a093ed9 /mm
parenttools/selftests/damon: update for regions prioritization of schemes (diff)
downloadlinux-dev-ee801b7dd7822a82fd7663048ad649545fac6df3.tar.xz
linux-dev-ee801b7dd7822a82fd7663048ad649545fac6df3.zip
mm/damon/schemes: activate schemes based on a watermarks mechanism
DAMON-based operation schemes need to be manually turned on and off. In some use cases, however, the condition for turning a scheme on and off would depend on the system's situation. For example, schemes for proactive pages reclamation would need to be turned on when some memory pressure is detected, and turned off when the system has enough free memory. For easier control of schemes activation based on the system situation, this introduces a watermarks-based mechanism. The client can describe the watermark metric (e.g., amount of free memory in the system), watermark check interval, and three watermarks, namely high, mid, and low. If the scheme is deactivated, it only gets the metric and compare that to the three watermarks for every check interval. If the metric is higher than the high watermark, the scheme is deactivated. If the metric is between the mid watermark and the low watermark, the scheme is activated. If the metric is lower than the low watermark, the scheme is deactivated again. This is to allow users fall back to traditional page-granularity mechanisms. Link: https://lkml.kernel.org/r/20211019150731.16699-12-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Cc: Amit Shah <amit@kernel.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Hildenbrand <david@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: David Woodhouse <dwmw@amazon.com> Cc: Greg Thelen <gthelen@google.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Leonard Foerster <foersleo@amazon.de> Cc: Marco Elver <elver@google.com> Cc: Markus Boehme <markubo@amazon.de> Cc: Shakeel Butt <shakeelb@google.com> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/core.c97
-rw-r--r--mm/damon/dbgfs.c5
2 files changed, 100 insertions, 2 deletions
diff --git a/mm/damon/core.c b/mm/damon/core.c
index fad25778e2ec..6993c60ae31c 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -10,6 +10,7 @@
#include <linux/damon.h>
#include <linux/delay.h>
#include <linux/kthread.h>
+#include <linux/mm.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -90,7 +91,8 @@ struct damos *damon_new_scheme(
unsigned long min_sz_region, unsigned long max_sz_region,
unsigned int min_nr_accesses, unsigned int max_nr_accesses,
unsigned int min_age_region, unsigned int max_age_region,
- enum damos_action action, struct damos_quota *quota)
+ enum damos_action action, struct damos_quota *quota,
+ struct damos_watermarks *wmarks)
{
struct damos *scheme;
@@ -122,6 +124,13 @@ struct damos *damon_new_scheme(
scheme->quota.charge_target_from = NULL;
scheme->quota.charge_addr_from = 0;
+ scheme->wmarks.metric = wmarks->metric;
+ scheme->wmarks.interval = wmarks->interval;
+ scheme->wmarks.high = wmarks->high;
+ scheme->wmarks.mid = wmarks->mid;
+ scheme->wmarks.low = wmarks->low;
+ scheme->wmarks.activated = true;
+
return scheme;
}
@@ -582,6 +591,9 @@ static void damon_do_apply_schemes(struct damon_ctx *c,
unsigned long sz = r->ar.end - r->ar.start;
struct timespec64 begin, end;
+ if (!s->wmarks.activated)
+ continue;
+
/* Check the quota */
if (quota->esz && quota->charged_sz >= quota->esz)
continue;
@@ -684,6 +696,9 @@ static void kdamond_apply_schemes(struct damon_ctx *c)
unsigned long cumulated_sz;
unsigned int score, max_score = 0;
+ if (!s->wmarks.activated)
+ continue;
+
if (!quota->ms && !quota->sz)
continue;
@@ -924,6 +939,83 @@ static bool kdamond_need_stop(struct damon_ctx *ctx)
return true;
}
+static unsigned long damos_wmark_metric_value(enum damos_wmark_metric metric)
+{
+ struct sysinfo i;
+
+ switch (metric) {
+ case DAMOS_WMARK_FREE_MEM_RATE:
+ si_meminfo(&i);
+ return i.freeram * 1000 / i.totalram;
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Returns zero if the scheme is active. Else, returns time to wait for next
+ * watermark check in micro-seconds.
+ */
+static unsigned long damos_wmark_wait_us(struct damos *scheme)
+{
+ unsigned long metric;
+
+ if (scheme->wmarks.metric == DAMOS_WMARK_NONE)
+ return 0;
+
+ metric = damos_wmark_metric_value(scheme->wmarks.metric);
+ /* higher than high watermark or lower than low watermark */
+ if (metric > scheme->wmarks.high || scheme->wmarks.low > metric) {
+ if (scheme->wmarks.activated)
+ pr_debug("inactivate a scheme (%d) for %s wmark\n",
+ scheme->action,
+ metric > scheme->wmarks.high ?
+ "high" : "low");
+ scheme->wmarks.activated = false;
+ return scheme->wmarks.interval;
+ }
+
+ /* inactive and higher than middle watermark */
+ if ((scheme->wmarks.high >= metric && metric >= scheme->wmarks.mid) &&
+ !scheme->wmarks.activated)
+ return scheme->wmarks.interval;
+
+ if (!scheme->wmarks.activated)
+ pr_debug("activate a scheme (%d)\n", scheme->action);
+ scheme->wmarks.activated = true;
+ return 0;
+}
+
+static void kdamond_usleep(unsigned long usecs)
+{
+ if (usecs > 100 * 1000)
+ schedule_timeout_interruptible(usecs_to_jiffies(usecs));
+ else
+ usleep_range(usecs, usecs + 1);
+}
+
+/* Returns negative error code if it's not activated but should return */
+static int kdamond_wait_activation(struct damon_ctx *ctx)
+{
+ struct damos *s;
+ unsigned long wait_time;
+ unsigned long min_wait_time = 0;
+
+ while (!kdamond_need_stop(ctx)) {
+ damon_for_each_scheme(s, ctx) {
+ wait_time = damos_wmark_wait_us(s);
+ if (!min_wait_time || wait_time < min_wait_time)
+ min_wait_time = wait_time;
+ }
+ if (!min_wait_time)
+ return 0;
+
+ kdamond_usleep(min_wait_time);
+ }
+ return -EBUSY;
+}
+
static void set_kdamond_stop(struct damon_ctx *ctx)
{
mutex_lock(&ctx->kdamond_lock);
@@ -952,6 +1044,9 @@ static int kdamond_fn(void *data)
sz_limit = damon_region_sz_limit(ctx);
while (!kdamond_need_stop(ctx)) {
+ if (kdamond_wait_activation(ctx))
+ continue;
+
if (ctx->primitive.prepare_access_checks)
ctx->primitive.prepare_access_checks(ctx);
if (ctx->callback.after_sampling &&
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index 20c4feb8b918..9f13060d1058 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -195,6 +195,9 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
*nr_schemes = 0;
while (pos < len && *nr_schemes < max_nr_schemes) {
struct damos_quota quota = {};
+ struct damos_watermarks wmarks = {
+ .metric = DAMOS_WMARK_NONE,
+ };
ret = sscanf(&str[pos],
"%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u%n",
@@ -212,7 +215,7 @@ static struct damos **str_to_schemes(const char *str, ssize_t len,
pos += parsed;
scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a,
- min_age, max_age, action, &quota);
+ min_age, max_age, action, &quota, &wmarks);
if (!scheme)
goto fail;