aboutsummaryrefslogtreecommitdiffstats
path: root/mm/internal.h
diff options
context:
space:
mode:
authorMuchun Song <songmuchun@bytedance.com>2022-04-28 23:16:10 -0700
committerakpm <akpm@linux-foundation.org>2022-04-28 23:16:10 -0700
commit6a8e0596f00469c15ec556b9f3624acd2e9a04f9 (patch)
tree6d25a184335a5b2e2dd2e84fdc4b7e1cc989573f /mm/internal.h
parentdax: fix cache flush on PMD-mapped pages (diff)
downloadlinux-dev-6a8e0596f00469c15ec556b9f3624acd2e9a04f9.tar.xz
linux-dev-6a8e0596f00469c15ec556b9f3624acd2e9a04f9.zip
mm: rmap: introduce pfn_mkclean_range() to cleans PTEs
The page_mkclean_one() is supposed to be used with the pfn that has a associated struct page, but not all the pfns (e.g. DAX) have a struct page. Introduce a new function pfn_mkclean_range() to cleans the PTEs (including PMDs) mapped with range of pfns which has no struct page associated with them. This helper will be used by DAX device in the next patch to make pfns clean. Link: https://lkml.kernel.org/r/20220403053957.10770-4-songmuchun@bytedance.com Signed-off-by: Muchun Song <songmuchun@bytedance.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Ross Zwisler <zwisler@kernel.org> Cc: Xiongchun Duan <duanxiongchun@bytedance.com> Cc: Xiyu Yang <xiyuyang19@fudan.edu.cn> Cc: Yang Shi <shy828301@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/internal.h')
-rw-r--r--mm/internal.h26
1 files changed, 17 insertions, 9 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 6293468fd7b7..69a5eabe0943 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -525,26 +525,22 @@ void mlock_page_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/*
- * At what user virtual address is page expected in vma?
- * Returns -EFAULT if all of the page is outside the range of vma.
- * If page is a compound head, the entire compound page is considered.
+ * Return the start of user virtual address at the specific offset within
+ * a vma.
*/
static inline unsigned long
-vma_address(struct page *page, struct vm_area_struct *vma)
+vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
+ struct vm_area_struct *vma)
{
- pgoff_t pgoff;
unsigned long address;
- VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
- pgoff = page_to_pgoff(page);
if (pgoff >= vma->vm_pgoff) {
address = vma->vm_start +
((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
/* Check for address beyond vma (or wrapped through 0?) */
if (address < vma->vm_start || address >= vma->vm_end)
address = -EFAULT;
- } else if (PageHead(page) &&
- pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
+ } else if (pgoff + nr_pages - 1 >= vma->vm_pgoff) {
/* Test above avoids possibility of wrap to 0 on 32-bit */
address = vma->vm_start;
} else {
@@ -554,6 +550,18 @@ vma_address(struct page *page, struct vm_area_struct *vma)
}
/*
+ * Return the start of user virtual address of a page within a vma.
+ * Returns -EFAULT if all of the page is outside the range of vma.
+ * If page is a compound head, the entire compound page is considered.
+ */
+static inline unsigned long
+vma_address(struct page *page, struct vm_area_struct *vma)
+{
+ VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
+ return vma_pgoff_address(page_to_pgoff(page), compound_nr(page), vma);
+}
+
+/*
* Then at what user virtual address will none of the range be found in vma?
* Assumes that vma_address() already returned a good starting address.
*/