aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKefeng Wang <wangkefeng.wang@huawei.com>2024-06-18 17:12:42 +0800
committerAndrew Morton <akpm@linux-foundation.org>2024-07-03 19:30:20 -0700
commit2f9f0854360f0ed0f530dbc4566b052a4a7f3637 (patch)
treefcb868e2586c92cebf701a4e5db82e9855b73166 /mm/memory.c
parentmm: memory: improve copy_user_large_folio() (diff)
downloadwireguard-linux-2f9f0854360f0ed0f530dbc4566b052a4a7f3637.tar.xz
wireguard-linux-2f9f0854360f0ed0f530dbc4566b052a4a7f3637.zip
mm: memory: rename pages_per_huge_page to nr_pages
Since the callers are converted to use nr_pages naming, use it inside too. Link: https://lkml.kernel.org/r/20240618091242.2140164-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com> Cc: David Hildenbrand <david@redhat.com> Cc: "Huang, Ying" <ying.huang@intel.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 8ebac8a70ab5..9e87932c30ec 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6387,23 +6387,23 @@ EXPORT_SYMBOL(__might_fault);
* cache lines hot.
*/
static inline int process_huge_page(
- unsigned long addr_hint, unsigned int pages_per_huge_page,
+ unsigned long addr_hint, unsigned int nr_pages,
int (*process_subpage)(unsigned long addr, int idx, void *arg),
void *arg)
{
int i, n, base, l, ret;
unsigned long addr = addr_hint &
- ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
+ ~(((unsigned long)nr_pages << PAGE_SHIFT) - 1);
/* Process target subpage last to keep its cache lines hot */
might_sleep();
n = (addr_hint - addr) / PAGE_SIZE;
- if (2 * n <= pages_per_huge_page) {
+ if (2 * n <= nr_pages) {
/* If target subpage in first half of huge page */
base = 0;
l = n;
/* Process subpages at the end of huge page */
- for (i = pages_per_huge_page - 1; i >= 2 * n; i--) {
+ for (i = nr_pages - 1; i >= 2 * n; i--) {
cond_resched();
ret = process_subpage(addr + i * PAGE_SIZE, i, arg);
if (ret)
@@ -6411,8 +6411,8 @@ static inline int process_huge_page(
}
} else {
/* If target subpage in second half of huge page */
- base = pages_per_huge_page - 2 * (pages_per_huge_page - n);
- l = pages_per_huge_page - n;
+ base = nr_pages - 2 * (nr_pages - n);
+ l = nr_pages - n;
/* Process subpages at the begin of huge page */
for (i = 0; i < base; i++) {
cond_resched();
@@ -6442,12 +6442,12 @@ static inline int process_huge_page(
}
static void clear_gigantic_page(struct folio *folio, unsigned long addr,
- unsigned int pages_per_huge_page)
+ unsigned int nr_pages)
{
int i;
might_sleep();
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0; i < nr_pages; i++) {
cond_resched();
clear_user_highpage(folio_page(folio, i), addr + i * PAGE_SIZE);
}
@@ -6477,15 +6477,15 @@ void folio_zero_user(struct folio *folio, unsigned long addr_hint)
}
static int copy_user_gigantic_page(struct folio *dst, struct folio *src,
- unsigned long addr,
- struct vm_area_struct *vma,
- unsigned int pages_per_huge_page)
+ unsigned long addr,
+ struct vm_area_struct *vma,
+ unsigned int nr_pages)
{
int i;
struct page *dst_page;
struct page *src_page;
- for (i = 0; i < pages_per_huge_page; i++) {
+ for (i = 0; i < nr_pages; i++) {
dst_page = folio_page(dst, i);
src_page = folio_page(src, i);