aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorKairui Song <kasong@tencent.com>2025-03-14 00:59:32 +0800
committerAndrew Morton <akpm@linux-foundation.org>2025-03-16 22:06:43 -0700
commit280cfccaa20c012f0979021939c68ada03c3d973 (patch)
tree0c6fe80def969f881538f5852994a973e1a42d0c
parentmm, swap: avoid redundant swap device pinning (diff)
downloadwireguard-linux-280cfccaa20c012f0979021939c68ada03c3d973.tar.xz
wireguard-linux-280cfccaa20c012f0979021939c68ada03c3d973.zip
mm, swap: don't update the counter up-front
The counter update before allocation design was useful to avoid unnecessary scan when device is full, so it will abort early if the counter indicates the device is full. But that is an uncommon case, and now scanning of a full device is very fast, so the up-front update is not helpful any more. Remove it and simplify the slot allocation logic. Link: https://lkml.kernel.org/r/20250313165935.63303-5-ryncsn@gmail.com Signed-off-by: Kairui Song <kasong@tencent.com> Reviewed-by: Baoquan He <bhe@redhat.com> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Cc: Barry Song <v-songbaohua@oppo.com> Cc: Chris Li <chrisl@kernel.org> Cc: "Huang, Ying" <ying.huang@linux.alibaba.com> Cc: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Kalesh Singh <kaleshsingh@google.com> Cc: Matthew Wilcow (Oracle) <willy@infradead.org> Cc: Nhat Pham <nphamcs@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--mm/swapfile.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 6f2de59c6355..db836670c334 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1201,22 +1201,10 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
int order = swap_entry_order(entry_order);
unsigned long size = 1 << order;
struct swap_info_struct *si, *next;
- long avail_pgs;
int n_ret = 0;
int node;
spin_lock(&swap_avail_lock);
-
- avail_pgs = atomic_long_read(&nr_swap_pages) / size;
- if (avail_pgs <= 0) {
- spin_unlock(&swap_avail_lock);
- goto noswap;
- }
-
- n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
-
- atomic_long_sub(n_goal * size, &nr_swap_pages);
-
start_over:
node = numa_node_id();
plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
@@ -1250,10 +1238,8 @@ start_over:
spin_unlock(&swap_avail_lock);
check_out:
- if (n_ret < n_goal)
- atomic_long_add((long)(n_goal - n_ret) * size,
- &nr_swap_pages);
-noswap:
+ atomic_long_sub(n_ret * size, &nr_swap_pages);
+
return n_ret;
}