aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorAlex Shi (Tencent) <alexs@kernel.org>2024-07-12 12:14:35 +0800
committerVlastimil Babka <vbabka@suse.cz>2024-07-15 10:42:30 +0200
commita52c6330ff2fe1163333fa6609bdc6e8763ec286 (patch)
tree8b27b8d9841e632c9ec9b70ff9b06891c215d03b
parentmm, slab: move prepare_slab_obj_exts_hook under CONFIG_MEM_ALLOC_PROFILING (diff)
downloadwireguard-linux-a52c6330ff2fe1163333fa6609bdc6e8763ec286.tar.xz
wireguard-linux-a52c6330ff2fe1163333fa6609bdc6e8763ec286.zip
mm/memcg: alignment memcg_data define condition
commit 21c690a349ba ("mm: introduce slabobj_ext to support slab object extensions") changed the folio/page->memcg_data define condition from MEMCG to SLAB_OBJ_EXT. This action make memcg_data exposed while !MEMCG. As Vlastimil Babka suggested, let's add _unused_slab_obj_exts for SLAB_MATCH for slab.obj_exts while !MEMCG. That could resolve the match issue, clean up the feature logical. Signed-off-by: Alex Shi (Tencent) <alexs@kernel.org> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Yoann Congal <yoann.congal@smile.fr> Cc: Masahiro Yamada <masahiroy@kernel.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to '')
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--mm/slab.h4
2 files changed, 10 insertions, 3 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index af3a0256fa93..a199c48bc462 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -169,8 +169,10 @@ struct page {
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
#endif
/*
@@ -298,6 +300,7 @@ typedef struct {
* @_hugetlb_cgroup_rsvd: Do not use directly, use accessor in hugetlb_cgroup.h.
* @_hugetlb_hwpoison: Do not use directly, call raw_hwp_list_head().
* @_deferred_list: Folios to be split under memory pressure.
+ * @_unused_slab_obj_exts: Placeholder to match obj_exts in struct slab.
*
* A folio is a physically, virtually and logically contiguous set
* of bytes. It is a power-of-two in size, and it is aligned to that
@@ -332,8 +335,10 @@ struct folio {
};
atomic_t _mapcount;
atomic_t _refcount;
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
unsigned long memcg_data;
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+ unsigned long _unused_slab_obj_exts;
#endif
#if defined(WANT_PAGE_VIRTUAL)
void *virtual;
diff --git a/mm/slab.h b/mm/slab.h
index 5f8f47c5bee0..a240945487e0 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -97,8 +97,10 @@ struct slab {
SLAB_MATCH(flags, __page_flags);
SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */
SLAB_MATCH(_refcount, __page_refcount);
-#ifdef CONFIG_SLAB_OBJ_EXT
+#ifdef CONFIG_MEMCG
SLAB_MATCH(memcg_data, obj_exts);
+#elif defined(CONFIG_SLAB_OBJ_EXT)
+SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
#endif
#undef SLAB_MATCH
static_assert(sizeof(struct slab) <= sizeof(struct page));