aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorWei Yang <richardw.yang@linux.intel.com>2020-04-01 21:10:09 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 09:35:31 -0700
commit6b700b5b3c595594c7c0c608cfe903c4e3a0af2d (patch)
treebedec11321b9c3a8da95562cc40b1dd56fd28a04 /mm/vmscan.c
parentmm: vmscan: replace open codings to NUMA_NO_NODE (diff)
downloadlinux-dev-6b700b5b3c595594c7c0c608cfe903c4e3a0af2d.tar.xz
linux-dev-6b700b5b3c595594c7c0c608cfe903c4e3a0af2d.zip
mm/vmscan.c: remove cpu online notification for now
kswapd kernel thread starts either with a CPU affinity set to the full cpu mask of its target node or without any affinity at all if the node is CPUless. There is a cpu hotplug callback (kswapd_cpu_online) that implements an elaborate way to update this mask when a cpu is onlined. It is not really clear whether there is any actual benefit from this scheme. Completely CPU-less NUMA nodes rarely gain a new CPU during runtime. Drop the code for that reason. If there is a real usecase then we can resurrect and simplify the code. [mhocko@suse.com rewrite changelog] Suggested-by: Michal Hocko <mhocko@suse.org> Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: Michal Hocko <mhocko@suse.com> Cc: David Rientjes <rientjes@google.com> Link: http://lkml.kernel.org/r/20200218224422.3407-1-richardw.yang@linux.intel.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 1 insertions, 26 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e66917cc9f6c..af0b6f0b59cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -4030,27 +4030,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
}
#endif /* CONFIG_HIBERNATION */
-/* It's optimal to keep kswapds on the same CPUs as their memory, but
- not required for correctness. So if the last cpu in a node goes
- away, we get changed to run anywhere: as the first one comes back,
- restore their cpu bindings. */
-static int kswapd_cpu_online(unsigned int cpu)
-{
- int nid;
-
- for_each_node_state(nid, N_MEMORY) {
- pg_data_t *pgdat = NODE_DATA(nid);
- const struct cpumask *mask;
-
- mask = cpumask_of_node(pgdat->node_id);
-
- if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
- /* One of our CPUs online: restore mask */
- set_cpus_allowed_ptr(pgdat->kswapd, mask);
- }
- return 0;
-}
-
/*
* This kswapd start function will be called by init and node-hot-add.
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
@@ -4090,15 +4069,11 @@ void kswapd_stop(int nid)
static int __init kswapd_init(void)
{
- int nid, ret;
+ int nid;
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
- "mm/vmscan:online", kswapd_cpu_online,
- NULL);
- WARN_ON(ret < 0);
return 0;
}