aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2021-02-12 14:59:10 +0000
committerWill Deacon <will@kernel.org>2021-02-12 14:59:10 +0000
commit6b76c3aedb07588ef558ba33896d6ae75229c7b7 (patch)
tree20aa9df0681ec6d4c17d68dd06b0bccbfb1d8422 /include/linux
parentMerge branch 'for-next/errata' into for-next/core (diff)
parentmm: filemap: Fix microblaze build failure with 'mmu_defconfig' (diff)
downloadwireguard-linux-6b76c3aedb07588ef558ba33896d6ae75229c7b7.tar.xz
wireguard-linux-6b76c3aedb07588ef558ba33896d6ae75229c7b7.zip
Merge branch 'for-next/faultaround' into for-next/core
Initialise prefaulted PTEs as 'old' for arm64 when hardware access-flag updates are supported, which drastically improves vmscan performance. * for-next/faultaround: mm: filemap: Fix microblaze build failure with 'mmu_defconfig' mm/nommu: Fix return type of filemap_map_pages() mm: Mark anonymous struct field of 'struct vm_fault' as 'const' mm: Use static initialisers for immutable fields of 'struct vm_fault' mm: Avoid modifying vmf.address in __collapse_huge_page_swapin() mm: Pass 'address' to map to do_set_pte() and drop FAULT_FLAG_PREFAULT mm: Move immutable fields of 'struct vm_fault' into anonymous struct arm64: mm: Implement arch_wants_old_prefaulted_pte() mm: Allow architectures to request 'old' entries when prefaulting mm: Cleanup faultaround and finish_fault() codepaths
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/mm.h25
-rw-r--r--include/linux/pgtable.h11
2 files changed, 26 insertions, 10 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ecdf8a8cd6ae..7ff3d9817d38 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -514,11 +514,14 @@ static inline bool fault_flag_allow_retry_first(unsigned int flags)
* pgoff should be used in favour of virtual_address, if possible.
*/
struct vm_fault {
- struct vm_area_struct *vma; /* Target VMA */
- unsigned int flags; /* FAULT_FLAG_xxx flags */
- gfp_t gfp_mask; /* gfp mask to be used for allocations */
- pgoff_t pgoff; /* Logical page offset based on vma */
- unsigned long address; /* Faulting virtual address */
+ const struct {
+ struct vm_area_struct *vma; /* Target VMA */
+ gfp_t gfp_mask; /* gfp mask to be used for allocations */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ unsigned long address; /* Faulting virtual address */
+ };
+ unsigned int flags; /* FAULT_FLAG_xxx flags
+ * XXX: should really be 'const' */
pmd_t *pmd; /* Pointer to pmd entry matching
* the 'address' */
pud_t *pud; /* Pointer to pud entry matching
@@ -542,8 +545,8 @@ struct vm_fault {
* is not NULL, otherwise pmd.
*/
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
- * vm_ops->map_pages() calls
- * alloc_set_pte() from atomic context.
+ * vm_ops->map_pages() sets up a page
+ * table from atomic context.
* do_fault_around() pre-allocates
* page table to avoid allocation from
* atomic context.
@@ -578,7 +581,7 @@ struct vm_operations_struct {
vm_fault_t (*fault)(struct vm_fault *vmf);
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
enum page_entry_size pe_size);
- void (*map_pages)(struct vm_fault *vmf,
+ vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -988,7 +991,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
+vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
+void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
@@ -2622,7 +2627,7 @@ extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
-extern void filemap_map_pages(struct vm_fault *vmf,
+extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 8fcdfa52eb4b..36eb748f3c97 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1314,6 +1314,17 @@ static inline int pmd_trans_unstable(pmd_t *pmd)
#endif
}
+/*
+ * the ordering of these checks is important for pmds with _page_devmap set.
+ * if we check pmd_trans_unstable() first we will trip the bad_pmd() check
+ * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly
+ * returning 1 but not before it spams dmesg with the pmd_clear_bad() output.
+ */
+static inline int pmd_devmap_trans_unstable(pmd_t *pmd)
+{
+ return pmd_devmap(*pmd) || pmd_trans_unstable(pmd);
+}
+
#ifndef CONFIG_NUMA_BALANCING
/*
* Technically a PTE can be PROTNONE even when not doing NUMA balancing but