diff options
author | 2024-12-16 20:40:55 +0000 | |
---|---|---|
committer | 2024-12-20 22:34:02 +0100 | |
commit | 06fa229ceb36898e68022b5654c017d2c6582d7d (patch) | |
tree | db540d16164012b660f6096819f5288356749f39 /fs/netfs/write_issue.c | |
parent | netfs: Add a tracepoint to log the lifespan of folio_queue structs (diff) | |
download | wireguard-linux-06fa229ceb36898e68022b5654c017d2c6582d7d.tar.xz wireguard-linux-06fa229ceb36898e68022b5654c017d2c6582d7d.zip |
netfs: Abstract out a rolling folio buffer implementation
A rolling buffer is a series of folios held in a list of folio_queues. New
folios and folio_queue structs may be inserted at the head simultaneously
with spent ones being removed from the tail without the need for locking.
The rolling buffer includes an iov_iter and it has to be careful managing
this as the list of folio_queues is extended such that an oops doesn't
incurred because the iterator was pointing to the end of a folio_queue
segment that got appended to and then removed.
We need to use the mechanism twice, once for read and once for write, and,
in future patches, we will use a second rolling buffer to handle bounce
buffering for content encryption.
Signed-off-by: David Howells <dhowells@redhat.com>
Link: https://lore.kernel.org/r/20241216204124.3752367-6-dhowells@redhat.com
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/write_issue.c')
-rw-r--r-- | fs/netfs/write_issue.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 87e5cf4a0957..88ceba49ff69 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -107,6 +107,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, ictx = netfs_inode(wreq->inode); if (is_buffered && netfs_is_cache_enabled(ictx)) fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); + if (rolling_buffer_init(&wreq->buffer, wreq->debug_id, ITER_SOURCE) < 0) + goto nomem; wreq->cleaned_to = wreq->start; @@ -129,6 +131,10 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, } return wreq; +nomem: + wreq->error = -ENOMEM; + netfs_put_request(wreq, false, netfs_rreq_trace_put_failed); + return ERR_PTR(-ENOMEM); } /** @@ -153,16 +159,15 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, loff_t start) { struct netfs_io_subrequest *subreq; - struct iov_iter *wreq_iter = &wreq->io_iter; + struct iov_iter *wreq_iter = &wreq->buffer.iter; /* Make sure we don't point the iterator at a used-up folio_queue * struct being used as a placeholder to prevent the queue from * collapsing. In such a case, extend the queue. */ if (iov_iter_is_folioq(wreq_iter) && - wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) { - netfs_buffer_make_space(wreq, netfs_trace_folioq_prep_write); - } + wreq_iter->folioq_slot >= folioq_nr_slots(wreq_iter->folioq)) + rolling_buffer_make_space(&wreq->buffer); subreq = netfs_alloc_subrequest(wreq); subreq->source = stream->source; @@ -327,6 +332,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq, _enter(""); + if (rolling_buffer_make_space(&wreq->buffer) < 0) + return -ENOMEM; + /* netfs_perform_write() may shift i_size around the page or from out * of the page to beyond it, but cannot move i_size into or through the * page since we have it locked. @@ -431,7 +439,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, } /* Attach the folio to the rolling buffer. */ - netfs_buffer_append_folio(wreq, folio, false); + rolling_buffer_append(&wreq->buffer, folio, 0); /* Move the submission point forward to allow for write-streaming data * not starting at the front of the page. We don't do write-streaming @@ -478,7 +486,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, /* Advance the iterator(s). */ if (stream->submit_off > iter_off) { - iov_iter_advance(&wreq->io_iter, stream->submit_off - iter_off); + rolling_buffer_advance(&wreq->buffer, stream->submit_off - iter_off); iter_off = stream->submit_off; } @@ -496,7 +504,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, } if (fsize > iter_off) - iov_iter_advance(&wreq->io_iter, fsize - iter_off); + rolling_buffer_advance(&wreq->buffer, fsize - iter_off); atomic64_set(&wreq->issued_to, fpos + fsize); if (!debug) @@ -635,7 +643,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c struct folio **writethrough_cache) { _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", - wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); + wreq->debug_id, wreq->buffer.iter.count, wreq->wsize, copied, to_page_end); if (!*writethrough_cache) { if (folio_test_dirty(folio)) @@ -710,7 +718,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t part = netfs_advance_write(wreq, upload, start, len, false); start += part; len -= part; - iov_iter_advance(&wreq->io_iter, part); + rolling_buffer_advance(&wreq->buffer, part); if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE); |