diff options
author | 2025-02-27 14:39:12 -0800 | |
---|---|---|
committer | 2025-02-28 07:05:26 -0700 | |
commit | ff92d824d0b55e35ed2ee77021cbd2ed3e7ae7a2 (patch) | |
tree | dcb2c8a187280f119af100fb20feccc3bb09ca3d /io_uring/rw.c | |
parent | io_uring/rw: move buffer_select outside generic prep (diff) | |
download | wireguard-linux-ff92d824d0b55e35ed2ee77021cbd2ed3e7ae7a2.tar.xz wireguard-linux-ff92d824d0b55e35ed2ee77021cbd2ed3e7ae7a2.zip |
io_uring/rw: move fixed buffer import to issue path
Registered buffers may depend on a linked command, which makes the prep
path too early to import. Move to the issue path when the node is
actually needed like all the other users of fixed buffers.
Signed-off-by: Keith Busch <kbusch@kernel.org>
Link: https://lore.kernel.org/r/20250227223916.143006-3-kbusch@meta.com
Reviewed-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/rw.c')
-rw-r--r-- | io_uring/rw.c | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/io_uring/rw.c b/io_uring/rw.c index b21b423b3cf8..7bc23802a388 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -357,31 +357,30 @@ int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_prep_rwv(req, sqe, ITER_SOURCE); } -static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, +static int io_init_rw_fixed(struct io_kiocb *req, unsigned int issue_flags, int ddir) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - struct io_async_rw *io; + struct io_async_rw *io = req->async_data; int ret; - ret = __io_prep_rw(req, sqe, ddir); - if (unlikely(ret)) - return ret; + if (io->bytes_done) + return 0; - io = req->async_data; - ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, 0); + ret = io_import_reg_buf(req, &io->iter, rw->addr, rw->len, ddir, + issue_flags); iov_iter_save_state(&io->iter, &io->iter_state); return ret; } int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - return io_prep_rw_fixed(req, sqe, ITER_DEST); + return __io_prep_rw(req, sqe, ITER_DEST); } int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - return io_prep_rw_fixed(req, sqe, ITER_SOURCE); + return __io_prep_rw(req, sqe, ITER_SOURCE); } /* @@ -1147,6 +1146,28 @@ ret_eagain: } } +int io_read_fixed(struct io_kiocb *req, unsigned int issue_flags) +{ + int ret; + + ret = io_init_rw_fixed(req, issue_flags, ITER_DEST); + if (unlikely(ret)) + return ret; + + return io_read(req, issue_flags); +} + +int io_write_fixed(struct io_kiocb *req, unsigned int issue_flags) +{ + int ret; + + ret = io_init_rw_fixed(req, issue_flags, ITER_SOURCE); + if (unlikely(ret)) + return ret; + + return io_write(req, issue_flags); +} + void io_rw_fail(struct io_kiocb *req) { int res; |