aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/damon
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--mm/damon/Kconfig7
-rw-r--r--mm/damon/core-test.h30
-rw-r--r--mm/damon/core.c113
-rw-r--r--mm/damon/dbgfs.c19
-rw-r--r--mm/damon/ops-common.c38
-rw-r--r--mm/damon/ops-common.h2
-rw-r--r--mm/damon/paddr.c163
-rw-r--r--mm/damon/reclaim.c19
-rw-r--r--mm/damon/sysfs-common.c2
-rw-r--r--mm/damon/sysfs-common.h4
-rw-r--r--mm/damon/sysfs-schemes.c391
-rw-r--r--mm/damon/sysfs.c22
-rw-r--r--mm/damon/vaddr-test.h20
-rw-r--r--mm/damon/vaddr.c68
14 files changed, 748 insertions, 150 deletions
diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig
index 7821fcb3f258..436c6b4cb5ec 100644
--- a/mm/damon/Kconfig
+++ b/mm/damon/Kconfig
@@ -60,7 +60,7 @@ config DAMON_SYSFS
the interface for arbitrary data access monitoring.
config DAMON_DBGFS
- bool "DAMON debugfs interface"
+ bool "DAMON debugfs interface (DEPRECATED!)"
depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS
help
This builds the debugfs interface for DAMON. The user space admins
@@ -68,8 +68,9 @@ config DAMON_DBGFS
If unsure, say N.
- This will be removed after >5.15.y LTS kernel is released, so users
- should move to the sysfs interface (DAMON_SYSFS).
+ This is deprecated, so users should move to the sysfs interface
+ (DAMON_SYSFS). If you depend on this and cannot move, please report
+ your usecase to damon@lists.linux.dev and linux-mm@kvack.org.
config DAMON_DBGFS_KUNIT_TEST
bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS
diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h
index 3db9b7368756..fae64d32b925 100644
--- a/mm/damon/core-test.h
+++ b/mm/damon/core-test.h
@@ -289,6 +289,35 @@ static void damon_test_set_regions(struct kunit *test)
damon_destroy_target(t);
}
+static void damon_test_update_monitoring_result(struct kunit *test)
+{
+ struct damon_attrs old_attrs = {
+ .sample_interval = 10, .aggr_interval = 1000,};
+ struct damon_attrs new_attrs;
+ struct damon_region *r = damon_new_region(3, 7);
+
+ r->nr_accesses = 15;
+ r->age = 20;
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 100, .aggr_interval = 10000,};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 15);
+ KUNIT_EXPECT_EQ(test, r->age, 2);
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 1, .aggr_interval = 1000};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
+ KUNIT_EXPECT_EQ(test, r->age, 2);
+
+ new_attrs = (struct damon_attrs){
+ .sample_interval = 1, .aggr_interval = 100};
+ damon_update_monitoring_result(r, &old_attrs, &new_attrs);
+ KUNIT_EXPECT_EQ(test, r->nr_accesses, 150);
+ KUNIT_EXPECT_EQ(test, r->age, 20);
+}
+
static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_target),
KUNIT_CASE(damon_test_regions),
@@ -299,6 +328,7 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damon_test_split_regions_of),
KUNIT_CASE(damon_test_ops_registration),
KUNIT_CASE(damon_test_set_regions),
+ KUNIT_CASE(damon_test_update_monitoring_result),
{},
};
diff --git a/mm/damon/core.c b/mm/damon/core.c
index ceec75b88ef9..d9ef62047bf5 100644
--- a/mm/damon/core.c
+++ b/mm/damon/core.c
@@ -263,6 +263,40 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
return 0;
}
+struct damos_filter *damos_new_filter(enum damos_filter_type type,
+ bool matching)
+{
+ struct damos_filter *filter;
+
+ filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return NULL;
+ filter->type = type;
+ filter->matching = matching;
+ return filter;
+}
+
+void damos_add_filter(struct damos *s, struct damos_filter *f)
+{
+ list_add_tail(&f->list, &s->filters);
+}
+
+static void damos_del_filter(struct damos_filter *f)
+{
+ list_del(&f->list);
+}
+
+static void damos_free_filter(struct damos_filter *f)
+{
+ kfree(f);
+}
+
+void damos_destroy_filter(struct damos_filter *f)
+{
+ damos_del_filter(f);
+ damos_free_filter(f);
+}
+
/* initialize private fields of damos_quota and return the pointer */
static struct damos_quota *damos_quota_init_priv(struct damos_quota *quota)
{
@@ -287,6 +321,7 @@ struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
return NULL;
scheme->pattern = *pattern;
scheme->action = action;
+ INIT_LIST_HEAD(&scheme->filters);
scheme->stat = (struct damos_stat){};
INIT_LIST_HEAD(&scheme->list);
@@ -315,6 +350,10 @@ static void damon_free_scheme(struct damos *s)
void damon_destroy_scheme(struct damos *s)
{
+ struct damos_filter *f, *next;
+
+ damos_for_each_filter_safe(f, next, s)
+ damos_destroy_filter(f);
damon_del_scheme(s);
damon_free_scheme(s);
}
@@ -426,6 +465,76 @@ void damon_destroy_ctx(struct damon_ctx *ctx)
kfree(ctx);
}
+static unsigned int damon_age_for_new_attrs(unsigned int age,
+ struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
+{
+ return age * old_attrs->aggr_interval / new_attrs->aggr_interval;
+}
+
+/* convert access ratio in bp (per 10,000) to nr_accesses */
+static unsigned int damon_accesses_bp_to_nr_accesses(
+ unsigned int accesses_bp, struct damon_attrs *attrs)
+{
+ unsigned int max_nr_accesses =
+ attrs->aggr_interval / attrs->sample_interval;
+
+ return accesses_bp * max_nr_accesses / 10000;
+}
+
+/* convert nr_accesses to access ratio in bp (per 10,000) */
+static unsigned int damon_nr_accesses_to_accesses_bp(
+ unsigned int nr_accesses, struct damon_attrs *attrs)
+{
+ unsigned int max_nr_accesses =
+ attrs->aggr_interval / attrs->sample_interval;
+
+ return nr_accesses * 10000 / max_nr_accesses;
+}
+
+static unsigned int damon_nr_accesses_for_new_attrs(unsigned int nr_accesses,
+ struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
+{
+ return damon_accesses_bp_to_nr_accesses(
+ damon_nr_accesses_to_accesses_bp(
+ nr_accesses, old_attrs),
+ new_attrs);
+}
+
+static void damon_update_monitoring_result(struct damon_region *r,
+ struct damon_attrs *old_attrs, struct damon_attrs *new_attrs)
+{
+ r->nr_accesses = damon_nr_accesses_for_new_attrs(r->nr_accesses,
+ old_attrs, new_attrs);
+ r->age = damon_age_for_new_attrs(r->age, old_attrs, new_attrs);
+}
+
+/*
+ * region->nr_accesses is the number of sampling intervals in the last
+ * aggregation interval that access to the region has found, and region->age is
+ * the number of aggregation intervals that its access pattern has maintained.
+ * For the reason, the real meaning of the two fields depend on current
+ * sampling interval and aggregation interval. This function updates
+ * ->nr_accesses and ->age of given damon_ctx's regions for new damon_attrs.
+ */
+static void damon_update_monitoring_results(struct damon_ctx *ctx,
+ struct damon_attrs *new_attrs)
+{
+ struct damon_attrs *old_attrs = &ctx->attrs;
+ struct damon_target *t;
+ struct damon_region *r;
+
+ /* if any interval is zero, simply forgive conversion */
+ if (!old_attrs->sample_interval || !old_attrs->aggr_interval ||
+ !new_attrs->sample_interval ||
+ !new_attrs->aggr_interval)
+ return;
+
+ damon_for_each_target(t, ctx)
+ damon_for_each_region(r, t)
+ damon_update_monitoring_result(
+ r, old_attrs, new_attrs);
+}
+
/**
* damon_set_attrs() - Set attributes for the monitoring.
* @ctx: monitoring context
@@ -443,6 +552,7 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
if (attrs->min_nr_regions > attrs->max_nr_regions)
return -EINVAL;
+ damon_update_monitoring_results(ctx, attrs);
ctx->attrs = *attrs;
return 0;
}
@@ -1230,7 +1340,8 @@ static int kdamond_fn(void *data)
if (ctx->callback.after_aggregation &&
ctx->callback.after_aggregation(ctx))
break;
- kdamond_apply_schemes(ctx);
+ if (!list_empty(&ctx->schemes))
+ kdamond_apply_schemes(ctx);
kdamond_reset_aggregated(ctx);
kdamond_split_regions(ctx);
if (ctx->ops.reset_aggregated)
diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c
index b3f454a5c682..124f0f8c97b7 100644
--- a/mm/damon/dbgfs.c
+++ b/mm/damon/dbgfs.c
@@ -20,6 +20,14 @@ static int dbgfs_nr_ctxs;
static struct dentry **dbgfs_dirs;
static DEFINE_MUTEX(damon_dbgfs_lock);
+static void damon_dbgfs_warn_deprecation(void)
+{
+ pr_warn_once("DAMON debugfs interface is deprecated, "
+ "so users should move to DAMON_SYSFS. If you cannot, "
+ "please report your usecase to damon@lists.linux.dev and "
+ "linux-mm@kvack.org.\n");
+}
+
/*
* Returns non-empty string on success, negative error code otherwise.
*/
@@ -711,6 +719,8 @@ out:
static int damon_dbgfs_open(struct inode *inode, struct file *file)
{
+ damon_dbgfs_warn_deprecation();
+
file->private_data = inode->i_private;
return nonseekable_open(inode, file);
@@ -1039,15 +1049,24 @@ static ssize_t dbgfs_monitor_on_write(struct file *file,
return ret;
}
+static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file)
+{
+ damon_dbgfs_warn_deprecation();
+ return nonseekable_open(inode, file);
+}
+
static const struct file_operations mk_contexts_fops = {
+ .open = damon_dbgfs_static_file_open,
.write = dbgfs_mk_context_write,
};
static const struct file_operations rm_contexts_fops = {
+ .open = damon_dbgfs_static_file_open,
.write = dbgfs_rm_context_write,
};
static const struct file_operations monitor_on_fops = {
+ .open = damon_dbgfs_static_file_open,
.read = dbgfs_monitor_on_read,
.write = dbgfs_monitor_on_write,
};
diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
index 75409601f934..cc63cf953636 100644
--- a/mm/damon/ops-common.c
+++ b/mm/damon/ops-common.c
@@ -16,29 +16,33 @@
* Get an online page for a pfn if it's in the LRU list. Otherwise, returns
* NULL.
*
- * The body of this function is stolen from the 'page_idle_get_page()'. We
+ * The body of this function is stolen from the 'page_idle_get_folio()'. We
* steal rather than reuse it because the code is quite simple.
*/
-struct page *damon_get_page(unsigned long pfn)
+struct folio *damon_get_folio(unsigned long pfn)
{
struct page *page = pfn_to_online_page(pfn);
+ struct folio *folio;
- if (!page || !PageLRU(page) || !get_page_unless_zero(page))
+ if (!page || PageTail(page))
return NULL;
- if (unlikely(!PageLRU(page))) {
- put_page(page);
- page = NULL;
+ folio = page_folio(page);
+ if (!folio_test_lru(folio) || !folio_try_get(folio))
+ return NULL;
+ if (unlikely(page_folio(page) != folio || !folio_test_lru(folio))) {
+ folio_put(folio);
+ folio = NULL;
}
- return page;
+ return folio;
}
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
{
bool referenced = false;
- struct page *page = damon_get_page(pte_pfn(*pte));
+ struct folio *folio = damon_get_folio(pte_pfn(*pte));
- if (!page)
+ if (!folio)
return;
if (pte_young(*pte)) {
@@ -52,19 +56,19 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr)
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
- set_page_young(page);
+ folio_set_young(folio);
- set_page_idle(page);
- put_page(page);
+ folio_set_idle(folio);
+ folio_put(folio);
}
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool referenced = false;
- struct page *page = damon_get_page(pmd_pfn(*pmd));
+ struct folio *folio = damon_get_folio(pmd_pfn(*pmd));
- if (!page)
+ if (!folio)
return;
if (pmd_young(*pmd)) {
@@ -78,10 +82,10 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr)
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
- set_page_young(page);
+ folio_set_young(folio);
- set_page_idle(page);
- put_page(page);
+ folio_set_idle(folio);
+ folio_put(folio);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
index 8d82d3722204..14f4bc69f29b 100644
--- a/mm/damon/ops-common.h
+++ b/mm/damon/ops-common.h
@@ -7,7 +7,7 @@
#include <linux/damon.h>
-struct page *damon_get_page(unsigned long pfn);
+struct folio *damon_get_folio(unsigned long pfn);
void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr);
void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr);
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
index e1a4315c4be6..607bb69e526c 100644
--- a/mm/damon/paddr.c
+++ b/mm/damon/paddr.c
@@ -33,17 +33,15 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
static void damon_pa_mkold(unsigned long paddr)
{
- struct folio *folio;
- struct page *page = damon_get_page(PHYS_PFN(paddr));
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
- if (!page)
+ if (!folio)
return;
- folio = page_folio(page);
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
folio_set_idle(folio);
@@ -81,69 +79,57 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx)
}
}
-struct damon_pa_access_chk_result {
- unsigned long page_sz;
- bool accessed;
-};
-
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
- struct damon_pa_access_chk_result *result = arg;
+ bool *accessed = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
- result->accessed = false;
- result->page_sz = PAGE_SIZE;
+ *accessed = false;
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
if (pvmw.pte) {
- result->accessed = pte_young(*pvmw.pte) ||
+ *accessed = pte_young(*pvmw.pte) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
} else {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- result->accessed = pmd_young(*pvmw.pmd) ||
+ *accessed = pmd_young(*pvmw.pmd) ||
!folio_test_idle(folio) ||
mmu_notifier_test_young(vma->vm_mm, addr);
- result->page_sz = HPAGE_PMD_SIZE;
#else
WARN_ON_ONCE(1);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
}
- if (result->accessed) {
+ if (*accessed) {
page_vma_mapped_walk_done(&pvmw);
break;
}
}
/* If accessed, stop walking */
- return !result->accessed;
+ return *accessed == false;
}
-static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
+static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz)
{
- struct folio *folio;
- struct page *page = damon_get_page(PHYS_PFN(paddr));
- struct damon_pa_access_chk_result result = {
- .page_sz = PAGE_SIZE,
- .accessed = false,
- };
+ struct folio *folio = damon_get_folio(PHYS_PFN(paddr));
+ bool accessed = false;
struct rmap_walk_control rwc = {
- .arg = &result,
+ .arg = &accessed,
.rmap_one = __damon_pa_young,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
- if (!page)
+ if (!folio)
return false;
- folio = page_folio(page);
if (!folio_mapped(folio) || !folio_raw_mapping(folio)) {
if (folio_test_idle(folio))
- result.accessed = false;
+ accessed = false;
else
- result.accessed = true;
+ accessed = true;
folio_put(folio);
goto out;
}
@@ -161,25 +147,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
folio_put(folio);
out:
- *page_sz = result.page_sz;
- return result.accessed;
+ *folio_sz = folio_size(folio);
+ return accessed;
}
static void __damon_pa_check_access(struct damon_region *r)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz)) {
+ if (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz)) {
if (last_accessed)
r->nr_accesses++;
return;
}
- last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz);
+ last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;
@@ -202,63 +188,116 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx)
return max_nr_accesses;
}
-static unsigned long damon_pa_pageout(struct damon_region *r)
+static bool __damos_pa_filter_out(struct damos_filter *filter,
+ struct folio *folio)
+{
+ bool matched = false;
+ struct mem_cgroup *memcg;
+
+ switch (filter->type) {
+ case DAMOS_FILTER_TYPE_ANON:
+ matched = folio_test_anon(folio);
+ break;
+ case DAMOS_FILTER_TYPE_MEMCG:
+ rcu_read_lock();
+ memcg = folio_memcg_check(folio);
+ if (!memcg)
+ matched = false;
+ else
+ matched = filter->memcg_id == mem_cgroup_id(memcg);
+ rcu_read_unlock();
+ break;
+ default:
+ break;
+ }
+
+ return matched == filter->matching;
+}
+
+/*
+ * damos_pa_filter_out - Return true if the page should be filtered out.
+ */
+static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
+{
+ struct damos_filter *filter;
+
+ damos_for_each_filter(filter, scheme) {
+ if (__damos_pa_filter_out(filter, folio))
+ return true;
+ }
+ return false;
+}
+
+static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s)
{
unsigned long addr, applied;
- LIST_HEAD(page_list);
+ LIST_HEAD(folio_list);
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct page *page = damon_get_page(PHYS_PFN(addr));
+ struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+
+ if (!folio)
+ continue;
- if (!page)
+ if (damos_pa_filter_out(s, folio)) {
+ folio_put(folio);
continue;
+ }
- ClearPageReferenced(page);
- test_and_clear_page_young(page);
- if (isolate_lru_page(page)) {
- put_page(page);
+ folio_clear_referenced(folio);
+ folio_test_clear_young(folio);
+ if (!folio_isolate_lru(folio)) {
+ folio_put(folio);
continue;
}
- if (PageUnevictable(page)) {
- putback_lru_page(page);
+ if (folio_test_unevictable(folio)) {
+ folio_putback_lru(folio);
} else {
- list_add(&page->lru, &page_list);
- put_page(page);
+ list_add(&folio->lru, &folio_list);
+ folio_put(folio);
}
}
- applied = reclaim_pages(&page_list);
+ applied = reclaim_pages(&folio_list);
cond_resched();
return applied * PAGE_SIZE;
}
static inline unsigned long damon_pa_mark_accessed_or_deactivate(
- struct damon_region *r, bool mark_accessed)
+ struct damon_region *r, struct damos *s, bool mark_accessed)
{
unsigned long addr, applied = 0;
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
- struct page *page = damon_get_page(PHYS_PFN(addr));
+ struct folio *folio = damon_get_folio(PHYS_PFN(addr));
+
+ if (!folio)
+ continue;
- if (!page)
+ if (damos_pa_filter_out(s, folio)) {
+ folio_put(folio);
continue;
+ }
+
if (mark_accessed)
- mark_page_accessed(page);
+ folio_mark_accessed(folio);
else
- deactivate_page(page);
- put_page(page);
- applied++;
+ folio_deactivate(folio);
+ folio_put(folio);
+ applied += folio_nr_pages(folio);
}
return applied * PAGE_SIZE;
}
-static unsigned long damon_pa_mark_accessed(struct damon_region *r)
+static unsigned long damon_pa_mark_accessed(struct damon_region *r,
+ struct damos *s)
{
- return damon_pa_mark_accessed_or_deactivate(r, true);
+ return damon_pa_mark_accessed_or_deactivate(r, s, true);
}
-static unsigned long damon_pa_deactivate_pages(struct damon_region *r)
+static unsigned long damon_pa_deactivate_pages(struct damon_region *r,
+ struct damos *s)
{
- return damon_pa_mark_accessed_or_deactivate(r, false);
+ return damon_pa_mark_accessed_or_deactivate(r, s, false);
}
static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
@@ -267,11 +306,11 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx,
{
switch (scheme->action) {
case DAMOS_PAGEOUT:
- return damon_pa_pageout(r);
+ return damon_pa_pageout(r, scheme);
case DAMOS_LRU_PRIO:
- return damon_pa_mark_accessed(r);
+ return damon_pa_mark_accessed(r, scheme);
case DAMOS_LRU_DEPRIO:
- return damon_pa_deactivate_pages(r);
+ return damon_pa_deactivate_pages(r, scheme);
case DAMOS_STAT:
break;
default:
diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c
index e82631f39481..648d2a85523a 100644
--- a/mm/damon/reclaim.c
+++ b/mm/damon/reclaim.c
@@ -99,6 +99,15 @@ static unsigned long monitor_region_end __read_mostly;
module_param(monitor_region_end, ulong, 0600);
/*
+ * Skip anonymous pages reclamation.
+ *
+ * If this parameter is set as ``Y``, DAMON_RECLAIM does not reclaim anonymous
+ * pages. By default, ``N``.
+ */
+static bool skip_anon __read_mostly;
+module_param(skip_anon, bool, 0600);
+
+/*
* PID of the DAMON thread
*
* If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
@@ -142,6 +151,7 @@ static struct damos *damon_reclaim_new_scheme(void)
static int damon_reclaim_apply_parameters(void)
{
struct damos *scheme;
+ struct damos_filter *filter;
int err = 0;
err = damon_set_attrs(ctx, &damon_reclaim_mon_attrs);
@@ -152,6 +162,15 @@ static int damon_reclaim_apply_parameters(void)
scheme = damon_reclaim_new_scheme();
if (!scheme)
return -ENOMEM;
+ if (skip_anon) {
+ filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true);
+ if (!filter) {
+ /* Will be freed by next 'damon_set_schemes()' below */
+ damon_destroy_scheme(scheme);
+ return -ENOMEM;
+ }
+ damos_add_filter(scheme, filter);
+ }
damon_set_schemes(ctx, &scheme, 1);
return damon_set_region_biggest_system_ram_default(target,
diff --git a/mm/damon/sysfs-common.c b/mm/damon/sysfs-common.c
index 52bebf242f74..70edf45c2174 100644
--- a/mm/damon/sysfs-common.c
+++ b/mm/damon/sysfs-common.c
@@ -99,7 +99,7 @@ static struct attribute *damon_sysfs_ul_range_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_ul_range);
-struct kobj_type damon_sysfs_ul_range_ktype = {
+const struct kobj_type damon_sysfs_ul_range_ktype = {
.release = damon_sysfs_ul_range_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_ul_range_groups,
diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h
index 604a6cbc3ede..db677eba78fd 100644
--- a/mm/damon/sysfs-common.h
+++ b/mm/damon/sysfs-common.h
@@ -21,7 +21,7 @@ struct damon_sysfs_ul_range *damon_sysfs_ul_range_alloc(
unsigned long max);
void damon_sysfs_ul_range_release(struct kobject *kobj);
-extern struct kobj_type damon_sysfs_ul_range_ktype;
+extern const struct kobj_type damon_sysfs_ul_range_ktype;
/*
* schemes directory
@@ -36,7 +36,7 @@ struct damon_sysfs_schemes {
struct damon_sysfs_schemes *damon_sysfs_schemes_alloc(void);
void damon_sysfs_schemes_rm_dirs(struct damon_sysfs_schemes *schemes);
-extern struct kobj_type damon_sysfs_schemes_ktype;
+extern const struct kobj_type damon_sysfs_schemes_ktype;
int damon_sysfs_set_schemes(struct damon_ctx *ctx,
struct damon_sysfs_schemes *sysfs_schemes);
diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c
index 81fc4d27f4e4..3cdad5a7f936 100644
--- a/mm/damon/sysfs-schemes.c
+++ b/mm/damon/sysfs-schemes.c
@@ -103,7 +103,7 @@ static struct attribute *damon_sysfs_scheme_region_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_scheme_region);
-static struct kobj_type damon_sysfs_scheme_region_ktype = {
+static const struct kobj_type damon_sysfs_scheme_region_ktype = {
.release = damon_sysfs_scheme_region_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_scheme_region_groups,
@@ -153,7 +153,7 @@ static struct attribute *damon_sysfs_scheme_regions_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_scheme_regions);
-static struct kobj_type damon_sysfs_scheme_regions_ktype = {
+static const struct kobj_type damon_sysfs_scheme_regions_ktype = {
.release = damon_sysfs_scheme_regions_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_scheme_regions_groups,
@@ -252,13 +252,264 @@ static struct attribute *damon_sysfs_stats_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_stats);
-static struct kobj_type damon_sysfs_stats_ktype = {
+static const struct kobj_type damon_sysfs_stats_ktype = {
.release = damon_sysfs_stats_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_stats_groups,
};
/*
+ * filter directory
+ */
+
+struct damon_sysfs_scheme_filter {
+ struct kobject kobj;
+ enum damos_filter_type type;
+ bool matching;
+ char *memcg_path;
+};
+
+static struct damon_sysfs_scheme_filter *damon_sysfs_scheme_filter_alloc(void)
+{
+ return kzalloc(sizeof(struct damon_sysfs_scheme_filter), GFP_KERNEL);
+}
+
+/* Should match with enum damos_filter_type */
+static const char * const damon_sysfs_scheme_filter_type_strs[] = {
+ "anon",
+ "memcg",
+};
+
+static ssize_t type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%s\n",
+ damon_sysfs_scheme_filter_type_strs[filter->type]);
+}
+
+static ssize_t type_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ enum damos_filter_type type;
+ ssize_t ret = -EINVAL;
+
+ for (type = 0; type < NR_DAMOS_FILTER_TYPES; type++) {
+ if (sysfs_streq(buf, damon_sysfs_scheme_filter_type_strs[
+ type])) {
+ filter->type = type;
+ ret = count;
+ break;
+ }
+ }
+ return ret;
+}
+
+static ssize_t matching_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%c\n", filter->matching ? 'Y' : 'N');
+}
+
+static ssize_t matching_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ bool matching;
+ int err = kstrtobool(buf, &matching);
+
+ if (err)
+ return err;
+
+ filter->matching = matching;
+ return count;
+}
+
+static ssize_t memcg_path_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ return sysfs_emit(buf, "%s\n",
+ filter->memcg_path ? filter->memcg_path : "");
+}
+
+static ssize_t memcg_path_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+ char *path = kmalloc(sizeof(*path) * (count + 1), GFP_KERNEL);
+
+ if (!path)
+ return -ENOMEM;
+
+ strscpy(path, buf, count + 1);
+ filter->memcg_path = path;
+ return count;
+}
+
+static void damon_sysfs_scheme_filter_release(struct kobject *kobj)
+{
+ struct damon_sysfs_scheme_filter *filter = container_of(kobj,
+ struct damon_sysfs_scheme_filter, kobj);
+
+ kfree(filter->memcg_path);
+ kfree(filter);
+}
+
+static struct kobj_attribute damon_sysfs_scheme_filter_type_attr =
+ __ATTR_RW_MODE(type, 0600);
+
+static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr =
+ __ATTR_RW_MODE(matching, 0600);
+
+static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr =
+ __ATTR_RW_MODE(memcg_path, 0600);
+
+static struct attribute *damon_sysfs_scheme_filter_attrs[] = {
+ &damon_sysfs_scheme_filter_type_attr.attr,
+ &damon_sysfs_scheme_filter_matching_attr.attr,
+ &damon_sysfs_scheme_filter_memcg_path_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damon_sysfs_scheme_filter);
+
+static struct kobj_type damon_sysfs_scheme_filter_ktype = {
+ .release = damon_sysfs_scheme_filter_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damon_sysfs_scheme_filter_groups,
+};
+
+/*
+ * filters directory
+ */
+
+struct damon_sysfs_scheme_filters {
+ struct kobject kobj;
+ struct damon_sysfs_scheme_filter **filters_arr;
+ int nr;
+};
+
+static struct damon_sysfs_scheme_filters *
+damon_sysfs_scheme_filters_alloc(void)
+{
+ return kzalloc(sizeof(struct damon_sysfs_scheme_filters), GFP_KERNEL);
+}
+
+static void damon_sysfs_scheme_filters_rm_dirs(
+ struct damon_sysfs_scheme_filters *filters)
+{
+ struct damon_sysfs_scheme_filter **filters_arr = filters->filters_arr;
+ int i;
+
+ for (i = 0; i < filters->nr; i++)
+ kobject_put(&filters_arr[i]->kobj);
+ filters->nr = 0;
+ kfree(filters_arr);
+ filters->filters_arr = NULL;
+}
+
+static int damon_sysfs_scheme_filters_add_dirs(
+ struct damon_sysfs_scheme_filters *filters, int nr_filters)
+{
+ struct damon_sysfs_scheme_filter **filters_arr, *filter;
+ int err, i;
+
+ damon_sysfs_scheme_filters_rm_dirs(filters);
+ if (!nr_filters)
+ return 0;
+
+ filters_arr = kmalloc_array(nr_filters, sizeof(*filters_arr),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!filters_arr)
+ return -ENOMEM;
+ filters->filters_arr = filters_arr;
+
+ for (i = 0; i < nr_filters; i++) {
+ filter = damon_sysfs_scheme_filter_alloc();
+ if (!filter) {
+ damon_sysfs_scheme_filters_rm_dirs(filters);
+ return -ENOMEM;
+ }
+
+ err = kobject_init_and_add(&filter->kobj,
+ &damon_sysfs_scheme_filter_ktype,
+ &filters->kobj, "%d", i);
+ if (err) {
+ kobject_put(&filter->kobj);
+ damon_sysfs_scheme_filters_rm_dirs(filters);
+ return err;
+ }
+
+ filters_arr[i] = filter;
+ filters->nr++;
+ }
+ return 0;
+}
+
+static ssize_t nr_filters_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct damon_sysfs_scheme_filters *filters = container_of(kobj,
+ struct damon_sysfs_scheme_filters, kobj);
+
+ return sysfs_emit(buf, "%d\n", filters->nr);
+}
+
+static ssize_t nr_filters_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf, size_t count)
+{
+ struct damon_sysfs_scheme_filters *filters;
+ int nr, err = kstrtoint(buf, 0, &nr);
+
+ if (err)
+ return err;
+ if (nr < 0)
+ return -EINVAL;
+
+ filters = container_of(kobj, struct damon_sysfs_scheme_filters, kobj);
+
+ if (!mutex_trylock(&damon_sysfs_lock))
+ return -EBUSY;
+ err = damon_sysfs_scheme_filters_add_dirs(filters, nr);
+ mutex_unlock(&damon_sysfs_lock);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static void damon_sysfs_scheme_filters_release(struct kobject *kobj)
+{
+ kfree(container_of(kobj, struct damon_sysfs_scheme_filters, kobj));
+}
+
+static struct kobj_attribute damon_sysfs_scheme_filters_nr_attr =
+ __ATTR_RW_MODE(nr_filters, 0600);
+
+static struct attribute *damon_sysfs_scheme_filters_attrs[] = {
+ &damon_sysfs_scheme_filters_nr_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(damon_sysfs_scheme_filters);
+
+static struct kobj_type damon_sysfs_scheme_filters_ktype = {
+ .release = damon_sysfs_scheme_filters_release,
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = damon_sysfs_scheme_filters_groups,
+};
+
+/*
* watermarks directory
*/
@@ -427,7 +678,7 @@ static struct attribute *damon_sysfs_watermarks_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_watermarks);
-static struct kobj_type damon_sysfs_watermarks_ktype = {
+static const struct kobj_type damon_sysfs_watermarks_ktype = {
.release = damon_sysfs_watermarks_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_watermarks_groups,
@@ -538,7 +789,7 @@ static struct attribute *damon_sysfs_weights_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_weights);
-static struct kobj_type damon_sysfs_weights_ktype = {
+static const struct kobj_type damon_sysfs_weights_ktype = {
.release = damon_sysfs_weights_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_weights_groups,
@@ -669,7 +920,7 @@ static struct attribute *damon_sysfs_quotas_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_quotas);
-static struct kobj_type damon_sysfs_quotas_ktype = {
+static const struct kobj_type damon_sysfs_quotas_ktype = {
.release = damon_sysfs_quotas_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_quotas_groups,
@@ -768,7 +1019,7 @@ static struct attribute *damon_sysfs_access_pattern_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_access_pattern);
-static struct kobj_type damon_sysfs_access_pattern_ktype = {
+static const struct kobj_type damon_sysfs_access_pattern_ktype = {
.release = damon_sysfs_access_pattern_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_access_pattern_groups,
@@ -784,6 +1035,7 @@ struct damon_sysfs_scheme {
struct damon_sysfs_access_pattern *access_pattern;
struct damon_sysfs_quotas *quotas;
struct damon_sysfs_watermarks *watermarks;
+ struct damon_sysfs_scheme_filters *filters;
struct damon_sysfs_stats *stats;
struct damon_sysfs_scheme_regions *tried_regions;
};
@@ -878,6 +1130,24 @@ static int damon_sysfs_scheme_set_watermarks(struct damon_sysfs_scheme *scheme)
return err;
}
+static int damon_sysfs_scheme_set_filters(struct damon_sysfs_scheme *scheme)
+{
+ struct damon_sysfs_scheme_filters *filters =
+ damon_sysfs_scheme_filters_alloc();
+ int err;
+
+ if (!filters)
+ return -ENOMEM;
+ err = kobject_init_and_add(&filters->kobj,
+ &damon_sysfs_scheme_filters_ktype, &scheme->kobj,
+ "filters");
+ if (err)
+ kobject_put(&filters->kobj);
+ else
+ scheme->filters = filters;
+ return err;
+}
+
static int damon_sysfs_scheme_set_stats(struct damon_sysfs_scheme *scheme)
{
struct damon_sysfs_stats *stats = damon_sysfs_stats_alloc();
@@ -926,9 +1196,12 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
err = damon_sysfs_scheme_set_watermarks(scheme);
if (err)
goto put_quotas_access_pattern_out;
- err = damon_sysfs_scheme_set_stats(scheme);
+ err = damon_sysfs_scheme_set_filters(scheme);
if (err)
goto put_watermarks_quotas_access_pattern_out;
+ err = damon_sysfs_scheme_set_stats(scheme);
+ if (err)
+ goto put_filters_watermarks_quotas_access_pattern_out;
err = damon_sysfs_scheme_set_tried_regions(scheme);
if (err)
goto put_tried_regions_out;
@@ -937,6 +1210,9 @@ static int damon_sysfs_scheme_add_dirs(struct damon_sysfs_scheme *scheme)
put_tried_regions_out:
kobject_put(&scheme->tried_regions->kobj);
scheme->tried_regions = NULL;
+put_filters_watermarks_quotas_access_pattern_out:
+ kobject_put(&scheme->filters->kobj);
+ scheme->filters = NULL;
put_watermarks_quotas_access_pattern_out:
kobject_put(&scheme->watermarks->kobj);
scheme->watermarks = NULL;
@@ -956,6 +1232,8 @@ static void damon_sysfs_scheme_rm_dirs(struct damon_sysfs_scheme *scheme)
damon_sysfs_quotas_rm_dirs(scheme->quotas);
kobject_put(&scheme->quotas->kobj);
kobject_put(&scheme->watermarks->kobj);
+ damon_sysfs_scheme_filters_rm_dirs(scheme->filters);
+ kobject_put(&scheme->filters->kobj);
kobject_put(&scheme->stats->kobj);
damon_sysfs_scheme_regions_rm_dirs(scheme->tried_regions);
kobject_put(&scheme->tried_regions->kobj);
@@ -1001,7 +1279,7 @@ static struct attribute *damon_sysfs_scheme_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_scheme);
-static struct kobj_type damon_sysfs_scheme_ktype = {
+static const struct kobj_type damon_sysfs_scheme_ktype = {
.release = damon_sysfs_scheme_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_scheme_groups,
@@ -1118,12 +1396,85 @@ static struct attribute *damon_sysfs_schemes_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_schemes);
-struct kobj_type damon_sysfs_schemes_ktype = {
+const struct kobj_type damon_sysfs_schemes_ktype = {
.release = damon_sysfs_schemes_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_schemes_groups,
};
+static bool damon_sysfs_memcg_path_eq(struct mem_cgroup *memcg,
+ char *memcg_path_buf, char *path)
+{
+#ifdef CONFIG_MEMCG
+ cgroup_path(memcg->css.cgroup, memcg_path_buf, PATH_MAX);
+ if (sysfs_streq(memcg_path_buf, path))
+ return true;
+#endif /* CONFIG_MEMCG */
+ return false;
+}
+
+static int damon_sysfs_memcg_path_to_id(char *memcg_path, unsigned short *id)
+{
+ struct mem_cgroup *memcg;
+ char *path;
+ bool found = false;
+
+ if (!memcg_path)
+ return -EINVAL;
+
+ path = kmalloc(sizeof(*path) * PATH_MAX, GFP_KERNEL);
+ if (!path)
+ return -ENOMEM;
+
+ for (memcg = mem_cgroup_iter(NULL, NULL, NULL); memcg;
+ memcg = mem_cgroup_iter(NULL, memcg, NULL)) {
+ /* skip removed memcg */
+ if (!mem_cgroup_id(memcg))
+ continue;
+ if (damon_sysfs_memcg_path_eq(memcg, path, memcg_path)) {
+ *id = mem_cgroup_id(memcg);
+ found = true;
+ break;
+ }
+ }
+
+ kfree(path);
+ return found ? 0 : -EINVAL;
+}
+
+static int damon_sysfs_set_scheme_filters(struct damos *scheme,
+ struct damon_sysfs_scheme_filters *sysfs_filters)
+{
+ int i;
+ struct damos_filter *filter, *next;
+
+ damos_for_each_filter_safe(filter, next, scheme)
+ damos_destroy_filter(filter);
+
+ for (i = 0; i < sysfs_filters->nr; i++) {
+ struct damon_sysfs_scheme_filter *sysfs_filter =
+ sysfs_filters->filters_arr[i];
+ struct damos_filter *filter =
+ damos_new_filter(sysfs_filter->type,
+ sysfs_filter->matching);
+ int err;
+
+ if (!filter)
+ return -ENOMEM;
+ if (filter->type == DAMOS_FILTER_TYPE_MEMCG) {
+ err = damon_sysfs_memcg_path_to_id(
+ sysfs_filter->memcg_path,
+ &filter->memcg_id);
+ if (err) {
+ damos_destroy_filter(filter);
+ return err;
+ }
+ }
+ damos_add_filter(scheme, filter);
+ }
+ return 0;
+}
+
static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_scheme *sysfs_scheme)
{
@@ -1132,6 +1483,10 @@ static struct damos *damon_sysfs_mk_scheme(
struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
+ struct damon_sysfs_scheme_filters *sysfs_filters =
+ sysfs_scheme->filters;
+ struct damos *scheme;
+ int err;
struct damos_access_pattern pattern = {
.min_sz_region = access_pattern->sz->min,
@@ -1157,8 +1512,17 @@ static struct damos *damon_sysfs_mk_scheme(
.low = sysfs_wmarks->low,
};
- return damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
+ scheme = damon_new_scheme(&pattern, sysfs_scheme->action, &quota,
&wmarks);
+ if (!scheme)
+ return NULL;
+
+ err = damon_sysfs_set_scheme_filters(scheme, sysfs_filters);
+ if (err) {
+ damon_destroy_scheme(scheme);
+ return NULL;
+ }
+ return scheme;
}
static void damon_sysfs_update_scheme(struct damos *scheme,
@@ -1169,6 +1533,7 @@ static void damon_sysfs_update_scheme(struct damos *scheme,
struct damon_sysfs_quotas *sysfs_quotas = sysfs_scheme->quotas;
struct damon_sysfs_weights *sysfs_weights = sysfs_quotas->weights;
struct damon_sysfs_watermarks *sysfs_wmarks = sysfs_scheme->watermarks;
+ int err;
scheme->pattern.min_sz_region = access_pattern->sz->min;
scheme->pattern.max_sz_region = access_pattern->sz->max;
@@ -1191,6 +1556,10 @@ static void damon_sysfs_update_scheme(struct damos *scheme,
scheme->wmarks.high = sysfs_wmarks->high;
scheme->wmarks.mid = sysfs_wmarks->mid;
scheme->wmarks.low = sysfs_wmarks->low;
+
+ err = damon_sysfs_set_scheme_filters(scheme, sysfs_scheme->filters);
+ if (err)
+ damon_destroy_scheme(scheme);
}
int damon_sysfs_set_schemes(struct damon_ctx *ctx,
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index aeb0beb1da91..33e1d5c9cb54 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -81,7 +81,7 @@ static struct attribute *damon_sysfs_region_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_region);
-static struct kobj_type damon_sysfs_region_ktype = {
+static const struct kobj_type damon_sysfs_region_ktype = {
.release = damon_sysfs_region_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_region_groups,
@@ -198,7 +198,7 @@ static struct attribute *damon_sysfs_regions_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_regions);
-static struct kobj_type damon_sysfs_regions_ktype = {
+static const struct kobj_type damon_sysfs_regions_ktype = {
.release = damon_sysfs_regions_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_regions_groups,
@@ -277,7 +277,7 @@ static struct attribute *damon_sysfs_target_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_target);
-static struct kobj_type damon_sysfs_target_ktype = {
+static const struct kobj_type damon_sysfs_target_ktype = {
.release = damon_sysfs_target_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_target_groups,
@@ -402,7 +402,7 @@ static struct attribute *damon_sysfs_targets_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_targets);
-static struct kobj_type damon_sysfs_targets_ktype = {
+static const struct kobj_type damon_sysfs_targets_ktype = {
.release = damon_sysfs_targets_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_targets_groups,
@@ -530,7 +530,7 @@ static struct attribute *damon_sysfs_intervals_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_intervals);
-static struct kobj_type damon_sysfs_intervals_ktype = {
+static const struct kobj_type damon_sysfs_intervals_ktype = {
.release = damon_sysfs_intervals_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_intervals_groups,
@@ -612,7 +612,7 @@ static struct attribute *damon_sysfs_attrs_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_attrs);
-static struct kobj_type damon_sysfs_attrs_ktype = {
+static const struct kobj_type damon_sysfs_attrs_ktype = {
.release = damon_sysfs_attrs_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_attrs_groups,
@@ -800,7 +800,7 @@ static struct attribute *damon_sysfs_context_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_context);
-static struct kobj_type damon_sysfs_context_ktype = {
+static const struct kobj_type damon_sysfs_context_ktype = {
.release = damon_sysfs_context_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_context_groups,
@@ -926,7 +926,7 @@ static struct attribute *damon_sysfs_contexts_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_contexts);
-static struct kobj_type damon_sysfs_contexts_ktype = {
+static const struct kobj_type damon_sysfs_contexts_ktype = {
.release = damon_sysfs_contexts_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_contexts_groups,
@@ -1564,7 +1564,7 @@ static struct attribute *damon_sysfs_kdamond_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_kdamond);
-static struct kobj_type damon_sysfs_kdamond_ktype = {
+static const struct kobj_type damon_sysfs_kdamond_ktype = {
.release = damon_sysfs_kdamond_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_kdamond_groups,
@@ -1707,7 +1707,7 @@ static struct attribute *damon_sysfs_kdamonds_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_kdamonds);
-static struct kobj_type damon_sysfs_kdamonds_ktype = {
+static const struct kobj_type damon_sysfs_kdamonds_ktype = {
.release = damon_sysfs_kdamonds_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_kdamonds_groups,
@@ -1757,7 +1757,7 @@ static struct attribute *damon_sysfs_ui_dir_attrs[] = {
};
ATTRIBUTE_GROUPS(damon_sysfs_ui_dir);
-static struct kobj_type damon_sysfs_ui_dir_ktype = {
+static const struct kobj_type damon_sysfs_ui_dir_ktype = {
.release = damon_sysfs_ui_dir_release,
.sysfs_ops = &kobj_sysfs_ops,
.default_groups = damon_sysfs_ui_dir_groups,
diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h
index bce37c487540..c4b455b5ee30 100644
--- a/mm/damon/vaddr-test.h
+++ b/mm/damon/vaddr-test.h
@@ -14,19 +14,26 @@
#include <kunit/test.h>
-static void __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
+static int __link_vmas(struct maple_tree *mt, struct vm_area_struct *vmas,
ssize_t nr_vmas)
{
- int i;
+ int i, ret = -ENOMEM;
MA_STATE(mas, mt, 0, 0);
if (!nr_vmas)
- return;
+ return 0;
mas_lock(&mas);
- for (i = 0; i < nr_vmas; i++)
- vma_mas_store(&vmas[i], &mas);
+ for (i = 0; i < nr_vmas; i++) {
+ mas_set_range(&mas, vmas[i].vm_start, vmas[i].vm_end - 1);
+ if (mas_store_gfp(&mas, &vmas[i], GFP_KERNEL))
+ goto failed;
+ }
+
+ ret = 0;
+failed:
mas_unlock(&mas);
+ return ret;
}
/*
@@ -71,7 +78,8 @@ static void damon_test_three_regions_in_vmas(struct kunit *test)
};
mt_init_flags(&mm.mm_mt, MM_MT_FLAGS);
- __link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas));
+ if (__link_vmas(&mm.mm_mt, vmas, ARRAY_SIZE(vmas)))
+ kunit_skip(test, "Failed to create VMA tree");
__damon_va_three_regions(&mm, regions);
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 15f03df66db6..1fec16d7263e 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -335,9 +335,9 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
{
bool referenced = false;
pte_t entry = huge_ptep_get(pte);
- struct page *page = pte_page(entry);
+ struct folio *folio = pfn_folio(pte_pfn(entry));
- get_page(page);
+ folio_get(folio);
if (pte_young(entry)) {
referenced = true;
@@ -352,10 +352,10 @@ static void damon_hugetlb_mkold(pte_t *pte, struct mm_struct *mm,
#endif /* CONFIG_MMU_NOTIFIER */
if (referenced)
- set_page_young(page);
+ folio_set_young(folio);
- set_page_idle(page);
- put_page(page);
+ folio_set_idle(folio);
+ folio_put(folio);
}
static int damon_mkold_hugetlb_entry(pte_t *pte, unsigned long hmask,
@@ -422,7 +422,8 @@ static void damon_va_prepare_access_checks(struct damon_ctx *ctx)
}
struct damon_young_walk_private {
- unsigned long *page_sz;
+ /* size of the folio for the access checked virtual memory address */
+ unsigned long *folio_sz;
bool young;
};
@@ -431,7 +432,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
{
pte_t *pte;
spinlock_t *ptl;
- struct page *page;
+ struct folio *folio;
struct damon_young_walk_private *priv = walk->private;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -446,16 +447,15 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl);
goto regular_page;
}
- page = damon_get_page(pmd_pfn(*pmd));
- if (!page)
+ folio = damon_get_folio(pmd_pfn(*pmd));
+ if (!folio)
goto huge_out;
- if (pmd_young(*pmd) || !page_is_idle(page) ||
+ if (pmd_young(*pmd) || !folio_test_idle(folio) ||
mmu_notifier_test_young(walk->mm,
- addr)) {
- *priv->page_sz = HPAGE_PMD_SIZE;
+ addr))
priv->young = true;
- }
- put_page(page);
+ *priv->folio_sz = HPAGE_PMD_SIZE;
+ folio_put(folio);
huge_out:
spin_unlock(ptl);
return 0;
@@ -469,15 +469,14 @@ regular_page:
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
if (!pte_present(*pte))
goto out;
- page = damon_get_page(pte_pfn(*pte));
- if (!page)
+ folio = damon_get_folio(pte_pfn(*pte));
+ if (!folio)
goto out;
- if (pte_young(*pte) || !page_is_idle(page) ||
- mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = PAGE_SIZE;
+ if (pte_young(*pte) || !folio_test_idle(folio) ||
+ mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
- }
- put_page(page);
+ *priv->folio_sz = folio_size(folio);
+ folio_put(folio);
out:
pte_unmap_unlock(pte, ptl);
return 0;
@@ -490,7 +489,7 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
{
struct damon_young_walk_private *priv = walk->private;
struct hstate *h = hstate_vma(walk->vma);
- struct page *page;
+ struct folio *folio;
spinlock_t *ptl;
pte_t entry;
@@ -499,16 +498,15 @@ static int damon_young_hugetlb_entry(pte_t *pte, unsigned long hmask,
if (!pte_present(entry))
goto out;
- page = pte_page(entry);
- get_page(page);
+ folio = pfn_folio(pte_pfn(entry));
+ folio_get(folio);
- if (pte_young(entry) || !page_is_idle(page) ||
- mmu_notifier_test_young(walk->mm, addr)) {
- *priv->page_sz = huge_page_size(h);
+ if (pte_young(entry) || !folio_test_idle(folio) ||
+ mmu_notifier_test_young(walk->mm, addr))
priv->young = true;
- }
+ *priv->folio_sz = huge_page_size(h);
- put_page(page);
+ folio_put(folio);
out:
spin_unlock(ptl);
@@ -524,10 +522,10 @@ static const struct mm_walk_ops damon_young_ops = {
};
static bool damon_va_young(struct mm_struct *mm, unsigned long addr,
- unsigned long *page_sz)
+ unsigned long *folio_sz)
{
struct damon_young_walk_private arg = {
- .page_sz = page_sz,
+ .folio_sz = folio_sz,
.young = false,
};
@@ -547,18 +545,18 @@ static void __damon_va_check_access(struct mm_struct *mm,
struct damon_region *r, bool same_target)
{
static unsigned long last_addr;
- static unsigned long last_page_sz = PAGE_SIZE;
+ static unsigned long last_folio_sz = PAGE_SIZE;
static bool last_accessed;
/* If the region is in the last checked page, reuse the result */
- if (same_target && (ALIGN_DOWN(last_addr, last_page_sz) ==
- ALIGN_DOWN(r->sampling_addr, last_page_sz))) {
+ if (same_target && (ALIGN_DOWN(last_addr, last_folio_sz) ==
+ ALIGN_DOWN(r->sampling_addr, last_folio_sz))) {
if (last_accessed)
r->nr_accesses++;
return;
}
- last_accessed = damon_va_young(mm, r->sampling_addr, &last_page_sz);
+ last_accessed = damon_va_young(mm, r->sampling_addr, &last_folio_sz);
if (last_accessed)
r->nr_accesses++;