aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c46
1 files changed, 35 insertions, 11 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index d1060b8d3cd6..5dfc093ceb3d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2379,7 +2379,8 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
ssize_t retval;
- size_t write_len = 0;
+ size_t write_len;
+ pgoff_t end = 0; /* silence gcc */
/*
* If it's a write, unmap all mmappings of the file up-front. This
@@ -2388,23 +2389,46 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
*/
if (rw == WRITE) {
write_len = iov_length(iov, nr_segs);
+ end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
if (mapping_mapped(mapping))
unmap_mapping_range(mapping, offset, write_len, 0);
}
retval = filemap_write_and_wait(mapping);
- if (retval == 0) {
- retval = mapping->a_ops->direct_IO(rw, iocb, iov,
- offset, nr_segs);
- if (rw == WRITE && mapping->nrpages) {
- pgoff_t end = (offset + write_len - 1)
- >> PAGE_CACHE_SHIFT;
- int err = invalidate_inode_pages2_range(mapping,
+ if (retval)
+ goto out;
+
+ /*
+ * After a write we want buffered reads to be sure to go to disk to get
+ * the new data. We invalidate clean cached page from the region we're
+ * about to write. We do this *before* the write so that we can return
+ * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
+ */
+ if (rw == WRITE && mapping->nrpages) {
+ retval = invalidate_inode_pages2_range(mapping,
offset >> PAGE_CACHE_SHIFT, end);
- if (err)
- retval = err;
- }
+ if (retval)
+ goto out;
}
+
+ retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
+ if (retval)
+ goto out;
+
+ /*
+ * Finally, try again to invalidate clean pages which might have been
+ * faulted in by get_user_pages() if the source of the write was an
+ * mmap()ed region of the file we're writing. That's a pretty crazy
+ * thing to do, so we don't support it 100%. If this invalidation
+ * fails and we have -EIOCBQUEUED we ignore the failure.
+ */
+ if (rw == WRITE && mapping->nrpages) {
+ int err = invalidate_inode_pages2_range(mapping,
+ offset >> PAGE_CACHE_SHIFT, end);
+ if (err && retval >= 0)
+ retval = err;
+ }
+out:
return retval;
}