aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2025-02-26 16:31:29 +0000
committerAndrew Morton <akpm@linux-foundation.org>2025-03-17 22:06:58 -0700
commitfa17ad58f8328e5c089377fed55ca8ed62f7cd1d (patch)
tree6f87be552fb813f045b34f6fc1121a53e3153425
parentmm: swap_cgroup: remove double initialization of locals (diff)
downloadwireguard-linux-fa17ad58f8328e5c089377fed55ca8ed62f7cd1d.tar.xz
wireguard-linux-fa17ad58f8328e5c089377fed55ca8ed62f7cd1d.zip
hugetlb: convert hugetlb_vma_maps_page() to hugetlb_vma_maps_pfn()
pte_page() is more expensive than pte_pfn() (often it's defined as pfn_to_page(pte_pfn())), so it makes sense to do the conversion to pfn once (by calling folio_pfn()) rather than convert the pfn to a page each time. While this is a very small advantage, the main motivation is removing a reference to folio->page. Link: https://lkml.kernel.org/r/20250226163131.3795869-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--fs/hugetlbfs/inode.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0fc179a59830..a427d41fbca0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -338,8 +338,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio)
* mutex for the page in the mapping. So, we can not race with page being
* faulted into the vma.
*/
-static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
- unsigned long addr, struct page *page)
+static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
pte_t *ptep, pte;
@@ -351,7 +351,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (huge_pte_none(pte) || !pte_present(pte))
return false;
- if (pte_page(pte) == page)
+ if (pte_pfn(pte) == pfn)
return true;
return false;
@@ -396,7 +396,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
{
struct rb_root_cached *root = &mapping->i_mmap;
struct hugetlb_vma_lock *vma_lock;
- struct page *page = &folio->page;
+ unsigned long pfn = folio_pfn(folio);
struct vm_area_struct *vma;
unsigned long v_start;
unsigned long v_end;
@@ -412,7 +412,7 @@ retry:
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (!hugetlb_vma_maps_page(vma, v_start, page))
+ if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
continue;
if (!hugetlb_vma_trylock_write(vma)) {
@@ -462,7 +462,7 @@ retry:
*/
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (hugetlb_vma_maps_page(vma, v_start, page))
+ if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);