diff options
Diffstat (limited to 'fs/9p/vfs_addr.c')
-rw-r--r-- | fs/9p/vfs_addr.c | 336 |
1 files changed, 180 insertions, 156 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index cce9ace651a2..47b9a1122f34 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * linux/fs/9p/vfs_addr.c - * * This file contians vfs address (mmap) ops for 9P2000. * * Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com> @@ -18,8 +16,9 @@ #include <linux/pagemap.h> #include <linux/idr.h> #include <linux/sched.h> +#include <linux/swap.h> #include <linux/uio.h> -#include <linux/bvec.h> +#include <linux/netfs.h> #include <net/9p/9p.h> #include <net/9p/client.h> @@ -29,196 +28,213 @@ #include "fid.h" /** - * v9fs_fid_readpage - read an entire page in from 9P - * - * @fid: fid being read - * @page: structure to page - * + * v9fs_issue_read - Issue a read from 9P + * @subreq: The read to make */ -static int v9fs_fid_readpage(void *data, struct page *page) +static void v9fs_issue_read(struct netfs_io_subrequest *subreq) { - struct p9_fid *fid = data; - struct inode *inode = page->mapping->host; - struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; + struct netfs_io_request *rreq = subreq->rreq; + struct p9_fid *fid = rreq->netfs_priv; struct iov_iter to; - int retval, err; + loff_t pos = subreq->start + subreq->transferred; + size_t len = subreq->len - subreq->transferred; + int total, err; - p9_debug(P9_DEBUG_VFS, "\n"); + iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len); - BUG_ON(!PageLocked(page)); + total = p9_client_read(fid, pos, &to, &err); - retval = v9fs_readpage_from_fscache(inode, page); - if (retval == 0) - return retval; + /* if we just extended the file size, any portion not in + * cache won't be on server and is zeroes */ + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); - iov_iter_bvec(&to, READ, &bvec, 1, PAGE_SIZE); + netfs_subreq_terminated(subreq, err ?: total, false); +} - retval = p9_client_read(fid, page_offset(page), &to, &err); - if (err) { - v9fs_uncache_page(inode, page); - retval = err; - goto done; - } +/** + * v9fs_init_request - Initialise a read request + * @rreq: The read request + * @file: The file being read from + */ +static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) +{ + struct inode *inode = file_inode(file); + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_fid *fid = file->private_data; - zero_user(page, retval, PAGE_SIZE - retval); - flush_dcache_page(page); - SetPageUptodate(page); + BUG_ON(!fid); - v9fs_readpage_to_fscache(inode, page); - retval = 0; + /* we might need to read from a fid that was opened write-only + * for read-modify-write of page cache, use the writeback fid + * for that */ + if (rreq->origin == NETFS_READ_FOR_WRITE && + (fid->mode & O_ACCMODE) == O_WRONLY) { + fid = v9inode->writeback_fid; + BUG_ON(!fid); + } -done: - unlock_page(page); - return retval; + p9_fid_get(fid); + rreq->netfs_priv = fid; + return 0; } /** - * v9fs_vfs_readpage - read an entire page in from 9P - * - * @filp: file being read - * @page: structure to page - * + * v9fs_free_request - Cleanup request initialized by v9fs_init_rreq + * @rreq: The I/O request to clean up */ - -static int v9fs_vfs_readpage(struct file *filp, struct page *page) +static void v9fs_free_request(struct netfs_io_request *rreq) { - return v9fs_fid_readpage(filp->private_data, page); + struct p9_fid *fid = rreq->netfs_priv; + + p9_fid_put(fid); } /** - * v9fs_vfs_readpages - read a set of pages from 9P - * - * @filp: file being read - * @mapping: the address space - * @pages: list of pages to read - * @nr_pages: count of pages to read - * + * v9fs_begin_cache_operation - Begin a cache operation for a read + * @rreq: The read request */ - -static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) +static int v9fs_begin_cache_operation(struct netfs_io_request *rreq) { - int ret = 0; - struct inode *inode; - - inode = mapping->host; - p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); - - ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); - if (ret == 0) - return ret; +#ifdef CONFIG_9P_FSCACHE + struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode)); - ret = read_cache_pages(mapping, pages, v9fs_fid_readpage, - filp->private_data); - p9_debug(P9_DEBUG_VFS, " = %d\n", ret); - return ret; + return fscache_begin_read_operation(&rreq->cache_resources, cookie); +#else + return -ENOBUFS; +#endif } +const struct netfs_request_ops v9fs_req_ops = { + .init_request = v9fs_init_request, + .free_request = v9fs_free_request, + .begin_cache_operation = v9fs_begin_cache_operation, + .issue_read = v9fs_issue_read, +}; + /** - * v9fs_release_page - release the private state associated with a page + * v9fs_release_folio - release the private state associated with a folio + * @folio: The folio to be released + * @gfp: The caller's allocation restrictions * - * Returns 1 if the page can be released, false otherwise. + * Returns true if the page can be released, false otherwise. */ -static int v9fs_release_page(struct page *page, gfp_t gfp) +static bool v9fs_release_folio(struct folio *folio, gfp_t gfp) { - if (PagePrivate(page)) - return 0; - return v9fs_fscache_release_page(page, gfp); + struct inode *inode = folio_inode(folio); + + if (folio_test_private(folio)) + return false; +#ifdef CONFIG_9P_FSCACHE + if (folio_test_fscache(folio)) { + if (current_is_kswapd() || !(gfp & __GFP_FS)) + return false; + folio_wait_fscache(folio); + } +#endif + fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode))); + return true; } -/** - * v9fs_invalidate_page - Invalidate a page completely or partially - * - * @page: structure to page - * @offset: offset in the page - */ +static void v9fs_invalidate_folio(struct folio *folio, size_t offset, + size_t length) +{ + folio_wait_fscache(folio); +} -static void v9fs_invalidate_page(struct page *page, unsigned int offset, - unsigned int length) +static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, + bool was_async) { - /* - * If called with zero offset, we should release - * the private state assocated with the page - */ - if (offset == 0 && length == PAGE_SIZE) - v9fs_fscache_invalidate_page(page); + struct v9fs_inode *v9inode = priv; + __le32 version; + + if (IS_ERR_VALUE(transferred_or_error) && + transferred_or_error != -ENOBUFS) { + version = cpu_to_le32(v9inode->qid.version); + fscache_invalidate(v9fs_inode_cookie(v9inode), &version, + i_size_read(&v9inode->netfs.inode), 0); + } } -static int v9fs_vfs_writepage_locked(struct page *page) +static int v9fs_vfs_write_folio_locked(struct folio *folio) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio_inode(folio); struct v9fs_inode *v9inode = V9FS_I(inode); - loff_t size = i_size_read(inode); + struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode); + loff_t start = folio_pos(folio); + loff_t i_size = i_size_read(inode); struct iov_iter from; - struct bio_vec bvec; - int err, len; + size_t len = folio_size(folio); + int err; - if (page->index == size >> PAGE_SHIFT) - len = size & ~PAGE_MASK; - else - len = PAGE_SIZE; + if (start >= i_size) + return 0; /* Simultaneous truncation occurred */ - bvec.bv_page = page; - bvec.bv_offset = 0; - bvec.bv_len = len; - iov_iter_bvec(&from, WRITE, &bvec, 1, len); + len = min_t(loff_t, i_size - start, len); + + iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len); /* We should have writeback_fid always set */ BUG_ON(!v9inode->writeback_fid); - set_page_writeback(page); + folio_wait_fscache(folio); + folio_start_writeback(folio); + + p9_client_write(v9inode->writeback_fid, start, &from, &err); - p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); + if (err == 0 && + fscache_cookie_enabled(cookie) && + test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) { + folio_start_fscache(folio); + fscache_write_to_cache(v9fs_inode_cookie(v9inode), + folio_mapping(folio), start, len, i_size, + v9fs_write_to_cache_done, v9inode, + true); + } - end_page_writeback(page); + folio_end_writeback(folio); return err; } static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) { + struct folio *folio = page_folio(page); int retval; - p9_debug(P9_DEBUG_VFS, "page %p\n", page); + p9_debug(P9_DEBUG_VFS, "folio %p\n", folio); - retval = v9fs_vfs_writepage_locked(page); + retval = v9fs_vfs_write_folio_locked(folio); if (retval < 0) { if (retval == -EAGAIN) { - redirty_page_for_writepage(wbc, page); + folio_redirty_for_writepage(wbc, folio); retval = 0; } else { - SetPageError(page); - mapping_set_error(page->mapping, retval); + mapping_set_error(folio_mapping(folio), retval); } } else retval = 0; - unlock_page(page); + folio_unlock(folio); return retval; } -/** - * v9fs_launder_page - Writeback a dirty page - * Returns 0 on success. - */ - -static int v9fs_launder_page(struct page *page) +static int v9fs_launder_folio(struct folio *folio) { int retval; - struct inode *inode = page->mapping->host; - v9fs_fscache_wait_on_page_write(inode, page); - if (clear_page_dirty_for_io(page)) { - retval = v9fs_vfs_writepage_locked(page); + if (folio_clear_dirty_for_io(folio)) { + retval = v9fs_vfs_write_folio_locked(folio); if (retval) return retval; } + folio_wait_fscache(folio); return 0; } /** * v9fs_direct_IO - 9P address space operation for direct I/O * @iocb: target I/O control block + * @iter: The data/buffer to use * * The presence of v9fs_direct_IO() in the address space ops vector * allowes open() O_DIRECT flags which would have failed otherwise. @@ -238,11 +254,13 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) loff_t pos = iocb->ki_pos; ssize_t n; int err = 0; + if (iov_iter_rw(iter) == WRITE) { n = p9_client_write(file->private_data, pos, iter, &err); if (n) { struct inode *inode = file_inode(file); loff_t i_size = i_size_read(inode); + if (pos + n > i_size) inode_add_bytes(inode, pos + n - i_size); } @@ -253,58 +271,49 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } static int v9fs_write_begin(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, - struct page **pagep, void **fsdata) + loff_t pos, unsigned int len, + struct page **subpagep, void **fsdata) { - int retval = 0; - struct page *page; - struct v9fs_inode *v9inode; - pgoff_t index = pos >> PAGE_SHIFT; - struct inode *inode = mapping->host; - + int retval; + struct folio *folio; + struct v9fs_inode *v9inode = V9FS_I(mapping->host); p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); - v9inode = V9FS_I(inode); -start: - page = grab_cache_page_write_begin(mapping, index, flags); - if (!page) { - retval = -ENOMEM; - goto out; - } BUG_ON(!v9inode->writeback_fid); - if (PageUptodate(page)) - goto out; - if (len == PAGE_SIZE) - goto out; + /* Prefetch area to be written into the cache if we're caching this + * file. We need to do this before we get a lock on the page in case + * there's more than one writer competing for the same cache block. + */ + retval = netfs_write_begin(&v9inode->netfs, filp, mapping, pos, len, &folio, fsdata); + if (retval < 0) + return retval; - retval = v9fs_fid_readpage(v9inode->writeback_fid, page); - put_page(page); - if (!retval) - goto start; -out: - *pagep = page; + *subpagep = &folio->page; return retval; } static int v9fs_write_end(struct file *filp, struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata) + loff_t pos, unsigned int len, unsigned int copied, + struct page *subpage, void *fsdata) { loff_t last_pos = pos + copied; - struct inode *inode = page->mapping->host; + struct folio *folio = page_folio(subpage); + struct inode *inode = mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { if (unlikely(copied < len)) { copied = 0; goto out; - } else if (len == PAGE_SIZE) { - SetPageUptodate(page); } + + folio_mark_uptodate(folio); } + /* * No need to use i_size_read() here, the i_size * cannot change under us because we hold the i_mutex. @@ -312,25 +321,40 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, if (last_pos > inode->i_size) { inode_add_bytes(inode, last_pos - inode->i_size); i_size_write(inode, last_pos); + fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos); } - set_page_dirty(page); + folio_mark_dirty(folio); out: - unlock_page(page); - put_page(page); + folio_unlock(folio); + folio_put(folio); return copied; } +#ifdef CONFIG_9P_FSCACHE +/* + * Mark a page as having been made dirty and thus needing writeback. We also + * need to pin the cache object to write back to. + */ +static bool v9fs_dirty_folio(struct address_space *mapping, struct folio *folio) +{ + struct v9fs_inode *v9inode = V9FS_I(mapping->host); + + return fscache_dirty_folio(mapping, folio, v9fs_inode_cookie(v9inode)); +} +#else +#define v9fs_dirty_folio filemap_dirty_folio +#endif const struct address_space_operations v9fs_addr_operations = { - .readpage = v9fs_vfs_readpage, - .readpages = v9fs_vfs_readpages, - .set_page_dirty = __set_page_dirty_nobuffers, + .read_folio = netfs_read_folio, + .readahead = netfs_readahead, + .dirty_folio = v9fs_dirty_folio, .writepage = v9fs_vfs_writepage, .write_begin = v9fs_write_begin, .write_end = v9fs_write_end, - .releasepage = v9fs_release_page, - .invalidatepage = v9fs_invalidate_page, - .launder_page = v9fs_launder_page, + .release_folio = v9fs_release_folio, + .invalidate_folio = v9fs_invalidate_folio, + .launder_folio = v9fs_launder_folio, .direct_IO = v9fs_direct_IO, }; |