aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/netfs/write_issue.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2024-06-06 07:48:55 +0100
committerChristian Brauner <brauner@kernel.org>2024-09-12 12:20:40 +0200
commit983cdcf8fe141b0ce16bc71959a5dc55bcb0764d (patch)
treecccc55b248bdb2dbf5cff34750bc57ff88339ed3 /fs/netfs/write_issue.c
parentnetfs: Provide an iterator-reset function (diff)
downloadwireguard-linux-983cdcf8fe141b0ce16bc71959a5dc55bcb0764d.tar.xz
wireguard-linux-983cdcf8fe141b0ce16bc71959a5dc55bcb0764d.zip
netfs: Simplify the writeback code
Use the new folio_queue structures to simplify the writeback code. The problem with referring to the i_pages xarray directly is that we may have gaps in the sequence of folios we're writing from that we need to skip when we're removing the writeback mark from the folios we're writing back from. At the moment the code tries to deal with this by carefully tracking the gaps in each writeback stream (eg. write to server and write to cache) and divining when there's a gap that spans folios (something that's not helped by folios not being a consistent size). Instead, the folio_queue buffer contains pointers only the folios we're dealing with, has them in ascending order and indicates a gap by placing non-consequitive folios next to each other. This makes it possible to track where we need to clean up to by just keeping track of where we've processed to on each stream and taking the minimum. Note that the I/O iterator is always rounded up to the end of the folio, even if that is beyond the EOF position, so that the cache can do DIO from the page. The excess space is cleared, though mmapped writes clobber it. Signed-off-by: David Howells <dhowells@redhat.com> cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Link: https://lore.kernel.org/r/20240814203850.2240469-18-dhowells@redhat.com/ # v2 Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/write_issue.c')
-rw-r--r--fs/netfs/write_issue.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 9ead075962f0..25fb7e166cc0 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -107,7 +107,6 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
if (is_buffered && netfs_is_cache_enabled(ictx))
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
- wreq->contiguity = wreq->start;
wreq->cleaned_to = wreq->start;
wreq->io_streams[0].stream_nr = 0;
@@ -158,6 +157,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
subreq->source = stream->source;
subreq->start = start;
subreq->stream_nr = stream->stream_nr;
+ subreq->io_iter = wreq->io_iter;
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
@@ -213,22 +213,15 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
* netfs_write_subrequest_terminated() when complete.
*/
static void netfs_do_issue_write(struct netfs_io_stream *stream,
- struct netfs_io_subrequest *subreq,
- struct iov_iter *source)
+ struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *wreq = subreq->rreq;
- size_t size = subreq->len - subreq->transferred;
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
- // TODO: Use encrypted buffer
- subreq->io_iter = *source;
- iov_iter_advance(source, size);
- iov_iter_truncate(&subreq->io_iter, size);
-
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
stream->issue_write(subreq);
}
@@ -237,8 +230,15 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq,
struct iov_iter *source)
{
+ size_t size = subreq->len - subreq->transferred;
+
+ // TODO: Use encrypted buffer
+ subreq->io_iter = *source;
+ iov_iter_advance(source, size);
+ iov_iter_truncate(&subreq->io_iter, size);
+
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
- netfs_do_issue_write(stream, subreq, source);
+ netfs_do_issue_write(stream, subreq);
}
static void netfs_issue_write(struct netfs_io_request *wreq,
@@ -249,10 +249,8 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
if (!subreq)
return;
stream->construct = NULL;
-
- if (subreq->start + subreq->len > wreq->start + wreq->submitted)
- WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
- netfs_do_issue_write(stream, subreq, &wreq->io_iter);
+ subreq->io_iter.count = subreq->len;
+ netfs_do_issue_write(stream, subreq);
}
/*
@@ -464,10 +462,11 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (choose_s < 0)
break;
stream = &wreq->io_streams[choose_s];
+ wreq->io_iter.iov_offset = stream->submit_off;
+ atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
stream->submit_len, to_eof);
- atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
stream->submit_off += part;
stream->submit_max_len -= part;
if (part > stream->submit_len)
@@ -478,6 +477,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
debug = true;
}
+ wreq->io_iter.iov_offset = 0;
+ iov_iter_advance(&wreq->io_iter, fsize);
atomic64_set(&wreq->issued_to, fpos + fsize);
if (!debug)
@@ -526,10 +527,10 @@ int netfs_writepages(struct address_space *mapping,
netfs_stat(&netfs_n_wh_writepages);
do {
- _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
+ _debug("wbiter %lx %llx", folio->index, atomic64_read(&wreq->issued_to));
/* It appears we don't have to handle cyclic writeback wrapping. */
- WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
+ WARN_ON_ONCE(wreq && folio_pos(folio) < atomic64_read(&wreq->issued_to));
if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE &&
unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) {
@@ -673,6 +674,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
part = netfs_advance_write(wreq, upload, start, len, false);
start += part;
len -= part;
+ iov_iter_advance(&wreq->io_iter, part);
if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE);