aboutsummaryrefslogtreecommitdiffstats
path: root/fs/xfs/xfs_file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r--fs/xfs/xfs_file.c99
1 files changed, 35 insertions, 64 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index c4893e226fd8..ec3e44fcf771 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1011,96 +1011,67 @@ xfs_file_llseek(
* page_lock (MM)
* i_lock (XFS - extent map serialisation)
*/
-
-/*
- * mmap()d file has taken write protection fault and is being made writable. We
- * can set the page state up correctly for a writable page, which means we can
- * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
- * mapping.
- */
-STATIC int
-xfs_filemap_page_mkwrite(
- struct vm_fault *vmf)
+static int
+__xfs_filemap_fault(
+ struct vm_fault *vmf,
+ enum page_entry_size pe_size,
+ bool write_fault)
{
struct inode *inode = file_inode(vmf->vma->vm_file);
+ struct xfs_inode *ip = XFS_I(inode);
int ret;
- trace_xfs_filemap_page_mkwrite(XFS_I(inode));
+ trace_xfs_filemap_fault(ip, pe_size, write_fault);
- sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+ if (write_fault) {
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+ }
+ xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
+ ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
} else {
- ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
- ret = block_page_mkwrite_return(ret);
+ if (write_fault)
+ ret = iomap_page_mkwrite(vmf, &xfs_iomap_ops);
+ else
+ ret = filemap_fault(vmf);
}
-
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- sb_end_pagefault(inode->i_sb);
+ if (write_fault)
+ sb_end_pagefault(inode->i_sb);
return ret;
}
-STATIC int
+static int
xfs_filemap_fault(
struct vm_fault *vmf)
{
- struct inode *inode = file_inode(vmf->vma->vm_file);
- int ret;
-
- trace_xfs_filemap_fault(XFS_I(inode));
-
/* DAX can shortcut the normal fault path on write faults! */
- if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
- return xfs_filemap_page_mkwrite(vmf);
-
- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- if (IS_DAX(inode))
- ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
- else
- ret = filemap_fault(vmf);
- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-
- return ret;
+ return __xfs_filemap_fault(vmf, PE_SIZE_PTE,
+ IS_DAX(file_inode(vmf->vma->vm_file)) &&
+ (vmf->flags & FAULT_FLAG_WRITE));
}
-/*
- * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
- * both read and write faults. Hence we need to handle both cases. There is no
- * ->huge_mkwrite callout for huge pages, so we have a single function here to
- * handle both cases here. @flags carries the information on the type of fault
- * occuring.
- */
-STATIC int
+static int
xfs_filemap_huge_fault(
struct vm_fault *vmf,
enum page_entry_size pe_size)
{
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct xfs_inode *ip = XFS_I(inode);
- int ret;
-
- if (!IS_DAX(inode))
+ if (!IS_DAX(file_inode(vmf->vma->vm_file)))
return VM_FAULT_FALLBACK;
- trace_xfs_filemap_huge_fault(ip);
-
- if (vmf->flags & FAULT_FLAG_WRITE) {
- sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
- }
-
- xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = dax_iomap_fault(vmf, pe_size, &xfs_iomap_ops);
- xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-
- if (vmf->flags & FAULT_FLAG_WRITE)
- sb_end_pagefault(inode->i_sb);
+ /* DAX can shortcut the normal fault path on write faults! */
+ return __xfs_filemap_fault(vmf, pe_size,
+ (vmf->flags & FAULT_FLAG_WRITE));
+}
- return ret;
+static int
+xfs_filemap_page_mkwrite(
+ struct vm_fault *vmf)
+{
+ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true);
}
/*
@@ -1130,7 +1101,7 @@ xfs_filemap_pfn_mkwrite(
if (vmf->pgoff >= size)
ret = VM_FAULT_SIGBUS;
else if (IS_DAX(inode))
- ret = dax_pfn_mkwrite(vmf);
+ ret = dax_iomap_fault(vmf, PE_SIZE_PTE, &xfs_iomap_ops);
xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
sb_end_pagefault(inode->i_sb);
return ret;