aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorZeng Jingxiang <linuszeng@tencent.com>2025-02-27 16:22:23 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-03-17 00:05:32 -0700
commitb9585a3f3e0b30b3b60c85dc39f27ed3b06fb623 (patch)
tree79a1c0063c3b7bad76a060dae2b8996f74c40131
parentmm: rename GENERIC_PTDUMP and PTDUMP_CORE (diff)
downloadwireguard-linux-b9585a3f3e0b30b3b60c85dc39f27ed3b06fb623.tar.xz
wireguard-linux-b9585a3f3e0b30b3b60c85dc39f27ed3b06fb623.zip
mm/list_lru: make the case where mlru is NULL as unlikely
In the following memcg_list_lru_alloc() function, mlru here is almost always NULL, so in most cases this should save a function call, mark mlru as unlikely to optimize the code, and reusing the mlru for the next attempt when the tree insertion fails. do { xas_lock_irqsave(&xas, flags); if (!xas_load(&xas) && !css_is_dying(&pos->css)) { xas_store(&xas, mlru); if (!xas_error(&xas)) mlru = NULL; } xas_unlock_irqrestore(&xas, flags); } while (xas_nomem(&xas, GFP_KERNEL)); > if (mlru) kfree(mlru); Link: https://lkml.kernel.org/r/20250227082223.1173847-1-jingxiangzeng.cas@gmail.com Signed-off-by: Zeng Jingxiang <linuszeng@tencent.com> Reported-by: kernel test robot <lkp@intel.com> Closes: https://lore.kernel.org/oe-kbuild-all/202412290924.UTP7GH2Z-lkp@intel.com/ Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Muchun Song <muchun.song@linux.dev> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Shakeel Butt <shakeel.butt@linux.dev> Cc: Chengming Zhou <chengming.zhou@linux.dev> Cc: Jingxiang Zeng <linuszeng@tencent.com> Cc: Kairui Song <kasong@tencent.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Roman Gushchin <roman.gushchin@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/list_lru.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7d69434c70e0..490473af3122 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -510,7 +510,7 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
gfp_t gfp)
{
unsigned long flags;
- struct list_lru_memcg *mlru;
+ struct list_lru_memcg *mlru = NULL;
struct mem_cgroup *pos, *parent;
XA_STATE(xas, &lru->xa, 0);
@@ -535,9 +535,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
parent = parent_mem_cgroup(pos);
}
- mlru = memcg_init_list_lru_one(lru, gfp);
- if (!mlru)
- return -ENOMEM;
+ if (!mlru) {
+ mlru = memcg_init_list_lru_one(lru, gfp);
+ if (!mlru)
+ return -ENOMEM;
+ }
xas_set(&xas, pos->kmemcg_id);
do {
xas_lock_irqsave(&xas, flags);
@@ -548,10 +550,11 @@ int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru,
}
xas_unlock_irqrestore(&xas, flags);
} while (xas_nomem(&xas, gfp));
- if (mlru)
- kfree(mlru);
} while (pos != memcg && !css_is_dying(&pos->css));
+ if (unlikely(mlru))
+ kfree(mlru);
+
return xas_error(&xas);
}
#else