aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/netfs/write_issue.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2025-05-19 10:07:04 +0100
committerChristian Brauner <brauner@kernel.org>2025-05-21 14:35:21 +0200
commit2b1424cd131cfaba4cf7040473133d26cddac088 (patch)
treecf279e47bbadebd7cad6cd589641c4d99f0da644 /fs/netfs/write_issue.c
parentnetfs: Fix the request's work item to not require a ref (diff)
downloadwireguard-linux-2b1424cd131cfaba4cf7040473133d26cddac088.tar.xz
wireguard-linux-2b1424cd131cfaba4cf7040473133d26cddac088.zip
netfs: Fix wait/wake to be consistent about the waitqueue used
Fix further inconsistencies in the use of waitqueues (clear_and_wake_up_bit() vs private waitqueue). Move some of this stuff from the read and write sides into common code so that it can be done in fewer places. To make this work, async I/O needs to set NETFS_RREQ_OFFLOAD_COLLECTION to indicate that a workqueue will do the collecting and places that call the wait function need to deal with it returning the amount transferred. Fixes: e2d46f2ec332 ("netfs: Change the read result collector to only use one work item") Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/20250519090707.2848510-5-dhowells@redhat.com cc: Marc Dionne <marc.dionne@auristor.com> cc: Steve French <stfrench@microsoft.com> cc: Ihor Solodrai <ihor.solodrai@pm.me> cc: Eric Van Hensbergen <ericvh@kernel.org> cc: Latchesar Ionkov <lucho@ionkov.net> cc: Dominique Martinet <asmadeus@codewreck.org> cc: Christian Schoenebeck <linux_oss@crudebyte.com> cc: Paulo Alcantara <pc@manguebit.com> cc: Jeff Layton <jlayton@kernel.org> cc: v9fs@lists.linux.dev cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'fs/netfs/write_issue.c')
-rw-r--r--fs/netfs/write_issue.c28
1 files changed, 13 insertions, 15 deletions
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 8744ed3faf29..50bee2c4130d 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -542,7 +542,7 @@ static void netfs_end_issue_write(struct netfs_io_request *wreq)
}
if (needs_poke)
- netfs_wake_write_collector(wreq);
+ netfs_wake_collector(wreq);
}
/*
@@ -576,6 +576,7 @@ int netfs_writepages(struct address_space *mapping,
goto couldnt_start;
}
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
trace_netfs_write(wreq, netfs_write_trace_writeback);
netfs_stat(&netfs_n_wh_writepages);
@@ -599,7 +600,7 @@ int netfs_writepages(struct address_space *mapping,
netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
- netfs_wake_write_collector(wreq);
+ netfs_wake_collector(wreq);
netfs_put_request(wreq, netfs_rreq_trace_put_return);
_leave(" = %d", error);
@@ -674,11 +675,11 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
/*
* End a write operation used when writing through the pagecache.
*/
-int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
- struct folio *writethrough_cache)
+ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache)
{
struct netfs_inode *ictx = netfs_inode(wreq->inode);
- int ret;
+ ssize_t ret;
_enter("R=%x", wreq->debug_id);
@@ -689,12 +690,10 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
mutex_unlock(&ictx->wb_lock);
- if (wreq->iocb) {
+ if (wreq->iocb)
ret = -EIOCBQUEUED;
- } else {
- wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE);
- ret = wreq->error;
- }
+ else
+ ret = netfs_wait_for_write(wreq);
netfs_put_request(wreq, netfs_rreq_trace_put_return);
return ret;
}
@@ -723,10 +722,8 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
start += part;
len -= part;
rolling_buffer_advance(&wreq->buffer, part);
- if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) {
- trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause);
- wait_event(wreq->waitq, !test_bit(NETFS_RREQ_PAUSE, &wreq->flags));
- }
+ if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
+ netfs_wait_for_paused_write(wreq);
if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
break;
}
@@ -886,6 +883,7 @@ int netfs_writeback_single(struct address_space *mapping,
goto couldnt_start;
}
+ __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
trace_netfs_write(wreq, netfs_write_trace_writeback_single);
netfs_stat(&netfs_n_wh_writepages);
@@ -915,7 +913,7 @@ stop:
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
mutex_unlock(&ictx->wb_lock);
- netfs_wake_write_collector(wreq);
+ netfs_wake_collector(wreq);
netfs_put_request(wreq, netfs_rreq_trace_put_return);
_leave(" = %d", ret);