diff options
Diffstat (limited to 'fs/afs/write.c')
-rw-r--r-- | fs/afs/write.c | 134 |
1 files changed, 100 insertions, 34 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c index ca4909baf5e6..9ebdd36eaf2f 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -12,23 +12,37 @@ #include <linux/writeback.h> #include <linux/pagevec.h> #include <linux/netfs.h> -#include <linux/fscache.h> #include "internal.h" +static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len, + loff_t i_size, bool caching); + +#ifdef CONFIG_AFS_FSCACHE /* - * mark a page as having been made dirty and thus needing writeback + * Mark a page as having been made dirty and thus needing writeback. We also + * need to pin the cache object to write back to. */ -int afs_set_page_dirty(struct page *page) +bool afs_dirty_folio(struct address_space *mapping, struct folio *folio) +{ + return fscache_dirty_folio(mapping, folio, + afs_vnode_cache(AFS_FS_I(mapping->host))); +} +static void afs_folio_start_fscache(bool caching, struct folio *folio) +{ + if (caching) + folio_start_fscache(folio); +} +#else +static void afs_folio_start_fscache(bool caching, struct folio *folio) { - _enter(""); - return __set_page_dirty_nobuffers(page); } +#endif /* * prepare to perform part of a write to a page */ int afs_write_begin(struct file *file, struct address_space *mapping, - loff_t pos, unsigned len, unsigned flags, + loff_t pos, unsigned len, struct page **_page, void **fsdata) { struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); @@ -46,8 +60,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, * file. We need to do this before we get a lock on the page in case * there's more than one writer competing for the same cache block. */ - ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata, - &afs_req_ops, NULL); + ret = netfs_write_begin(&vnode->netfs, file, mapping, pos, len, &folio, fsdata); if (ret < 0) return ret; @@ -78,7 +91,7 @@ try_again: goto flush_conflicting_write; } - *_page = &folio->page; + *_page = folio_file_page(folio, pos / PAGE_SIZE); _leave(" = 0"); return 0; @@ -114,7 +127,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, unsigned long priv; unsigned int f, from = offset_in_folio(folio, pos); unsigned int t, to = from + copied; - loff_t i_size, maybe_i_size; + loff_t i_size, write_end_pos; _enter("{%llx:%llu},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio)); @@ -131,15 +144,16 @@ int afs_write_end(struct file *file, struct address_space *mapping, if (copied == 0) goto out; - maybe_i_size = pos + copied; + write_end_pos = pos + copied; - i_size = i_size_read(&vnode->vfs_inode); - if (maybe_i_size > i_size) { + i_size = i_size_read(&vnode->netfs.inode); + if (write_end_pos > i_size) { write_seqlock(&vnode->cb_lock); - i_size = i_size_read(&vnode->vfs_inode); - if (maybe_i_size > i_size) - afs_set_i_size(vnode, maybe_i_size); + i_size = i_size_read(&vnode->netfs.inode); + if (write_end_pos > i_size) + afs_set_i_size(vnode, write_end_pos); write_sequnlock(&vnode->cb_lock); + fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos); } if (folio_test_private(folio)) { @@ -243,7 +257,7 @@ static void afs_redirty_pages(struct writeback_control *wbc, */ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len) { - struct address_space *mapping = vnode->vfs_inode.i_mapping; + struct address_space *mapping = vnode->netfs.inode.i_mapping; struct folio *folio; pgoff_t end; @@ -342,7 +356,7 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t { struct afs_operation *op; struct afs_wb_key *wbk = NULL; - loff_t size = iov_iter_count(iter), i_size; + loff_t size = iov_iter_count(iter); int ret = -ENOKEY; _enter("%s{%llx:%llu.%u},%llx,%llx", @@ -364,17 +378,15 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t return -ENOMEM; } - i_size = i_size_read(&vnode->vfs_inode); - afs_op_set_vnode(op, 0, vnode); op->file[0].dv_delta = 1; op->file[0].modification = true; op->store.write_iter = iter; op->store.pos = pos; op->store.size = size; - op->store.i_size = max(pos + size, i_size); + op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); op->store.laundering = laundering; - op->mtime = vnode->vfs_inode.i_mtime; + op->mtime = vnode->netfs.inode.i_mtime; op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation; @@ -418,6 +430,7 @@ static void afs_extend_writeback(struct address_space *mapping, loff_t start, loff_t max_len, bool new_content, + bool caching, unsigned int *_len) { struct pagevec pvec; @@ -464,7 +477,9 @@ static void afs_extend_writeback(struct address_space *mapping, folio_put(folio); break; } - if (!folio_test_dirty(folio) || folio_test_writeback(folio)) { + if (!folio_test_dirty(folio) || + folio_test_writeback(folio) || + folio_test_fscache(folio)) { folio_unlock(folio); folio_put(folio); break; @@ -512,6 +527,7 @@ static void afs_extend_writeback(struct address_space *mapping, BUG(); if (folio_start_writeback(folio)) BUG(); + afs_folio_start_fscache(caching, folio); *_count -= folio_nr_pages(folio); folio_unlock(folio); @@ -537,8 +553,9 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, struct iov_iter iter; unsigned long priv; unsigned int offset, to, len, max_len; - loff_t i_size = i_size_read(&vnode->vfs_inode); + loff_t i_size = i_size_read(&vnode->netfs.inode); bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags); + bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode)); long count = wbc->nr_to_write; int ret; @@ -546,6 +563,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, if (folio_start_writeback(folio)) BUG(); + afs_folio_start_fscache(caching, folio); count -= folio_nr_pages(folio); @@ -572,7 +590,8 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, if (len < max_len && (to == folio_size(folio) || new_content)) afs_extend_writeback(mapping, vnode, &count, - start, max_len, new_content, &len); + start, max_len, new_content, + caching, &len); len = min_t(loff_t, len, max_len); } @@ -585,12 +604,18 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, if (start < i_size) { _debug("write back %x @%llx [%llx]", len, start, i_size); + /* Speculatively write to the cache. We have to fix this up + * later if the store fails. + */ + afs_write_to_cache(vnode, start, len, i_size, caching); + iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len); ret = afs_store_data(vnode, &iter, start, false); } else { _debug("write discard %x @%llx [%llx]", len, start, i_size); /* The dirty region was entirely beyond the EOF. */ + fscache_clear_page_bits(mapping, start, len, caching); afs_pages_written_back(vnode, start, len); ret = 0; } @@ -610,6 +635,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping, case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: + case -ENETRESET: afs_redirty_pages(wbc, mapping, start, len); mapping_set_error(mapping, ret); break; @@ -649,6 +675,10 @@ int afs_writepage(struct page *subpage, struct writeback_control *wbc) _enter("{%lx},", folio_index(folio)); +#ifdef CONFIG_AFS_FSCACHE + folio_wait_fscache(folio); +#endif + start = folio_index(folio) * PAGE_SIZE; ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc, folio, start, LLONG_MAX - start); @@ -671,7 +701,7 @@ static int afs_writepages_region(struct address_space *mapping, struct folio *folio; struct page *head_page; ssize_t ret; - int n; + int n, skips = 0; _enter("%llx,%llx,", start, end); @@ -714,11 +744,23 @@ static int afs_writepages_region(struct address_space *mapping, continue; } - if (folio_test_writeback(folio)) { + if (folio_test_writeback(folio) || + folio_test_fscache(folio)) { folio_unlock(folio); - if (wbc->sync_mode != WB_SYNC_NONE) + if (wbc->sync_mode != WB_SYNC_NONE) { folio_wait_writeback(folio); +#ifdef CONFIG_AFS_FSCACHE + folio_wait_fscache(folio); +#endif + } else { + start += folio_size(folio); + } folio_put(folio); + if (wbc->sync_mode == WB_SYNC_NONE) { + if (skips >= 5 || need_resched()) + break; + skips++; + } continue; } @@ -802,7 +844,7 @@ ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from) _enter("{%llx:%llu},{%zu},", vnode->fid.vid, vnode->fid.vnode, count); - if (IS_SWAPFILE(&vnode->vfs_inode)) { + if (IS_SWAPFILE(&vnode->netfs.inode)) { printk(KERN_INFO "AFS: Attempt to write to active swap file!\n"); return -EBUSY; @@ -915,8 +957,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode) /* Discard unused keys */ spin_lock(&vnode->wb_lock); - if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) && - !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) { + if (!mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_WRITEBACK) && + !mapping_tagged(&vnode->netfs.inode.i_data, PAGECACHE_TAG_DIRTY)) { list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) { if (refcount_read(&wbk->usage) == 1) list_move(&wbk->vnode_link, &graveyard); @@ -935,9 +977,8 @@ void afs_prune_wb_keys(struct afs_vnode *vnode) /* * Clean up a page during invalidation. */ -int afs_launder_page(struct page *subpage) +int afs_launder_folio(struct folio *folio) { - struct folio *folio = page_folio(subpage); struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio)); struct iov_iter iter; struct bio_vec bv[1]; @@ -945,7 +986,7 @@ int afs_launder_page(struct page *subpage) unsigned int f, t; int ret = 0; - _enter("{%lx}", folio_index(folio)); + _enter("{%lx}", folio->index); priv = (unsigned long)folio_get_private(folio); if (folio_clear_dirty_for_io(folio)) { @@ -970,3 +1011,28 @@ int afs_launder_page(struct page *subpage) folio_wait_fscache(folio); return ret; } + +/* + * Deal with the completion of writing the data to the cache. + */ +static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error, + bool was_async) +{ + struct afs_vnode *vnode = priv; + + if (IS_ERR_VALUE(transferred_or_error) && + transferred_or_error != -ENOBUFS) + afs_invalidate_cache(vnode, 0); +} + +/* + * Save the write to the cache also. + */ +static void afs_write_to_cache(struct afs_vnode *vnode, + loff_t start, size_t len, loff_t i_size, + bool caching) +{ + fscache_write_to_cache(afs_vnode_cache(vnode), + vnode->netfs.inode.i_mapping, start, len, i_size, + afs_write_to_cache_done, vnode, caching); +} |