aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/swap.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2020-10-09 08:55:17 +0200
committerIngo Molnar <mingo@kernel.org>2020-10-09 08:55:17 +0200
commite705d397965811ac528d7213b42d74ffe43caf38 (patch)
tree8a5bbe85cc42e64992b97859976e307027f83e33 /mm/swap.c
parentlocking/atomics: Check atomic-arch-fallback.h too (diff)
parentlockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables" (diff)
downloadwireguard-linux-e705d397965811ac528d7213b42d74ffe43caf38.tar.xz
wireguard-linux-e705d397965811ac528d7213b42d74ffe43caf38.zip
Merge branch 'locking/urgent' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index a1ec807e325d..65ef7e3525bf 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
+ int nr_pages = thp_nr_pages(page);
/*
* We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled.
*/
- __mod_zone_page_state(page_zone(page), NR_MLOCK,
- thp_nr_pages(page));
- count_vm_event(UNEVICTABLE_PGMLOCKED);
+ __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
+ count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
}
lru_cache_add(page);
}