diff options
author | 2024-03-18 20:41:58 -0600 | |
---|---|---|
committer | 2024-04-15 08:10:25 -0600 | |
commit | d10f19dff56eac5ae44dc270336b18071a8bd51c (patch) | |
tree | ecc43b35af05671e2821eaeb455184791ab9efbf /io_uring/uring_cmd.h | |
parent | io_uring/net: move connect to always using async data (diff) | |
download | wireguard-linux-d10f19dff56eac5ae44dc270336b18071a8bd51c.tar.xz wireguard-linux-d10f19dff56eac5ae44dc270336b18071a8bd51c.zip |
io_uring/uring_cmd: switch to always allocating async data
Basic conversion ensuring async_data is allocated off the prep path. Adds
a basic alloc cache as well, as passthrough IO can be quite high in rate.
Tested-by: Anuj Gupta <anuj20.g@samsung.com>
Reviewed-by: Anuj Gupta <anuj20.g@samsung.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r-- | io_uring/uring_cmd.h | 10 |
1 files changed, 9 insertions, 1 deletions
diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h index 7356bf9aa655..b0ccff7091ee 100644 --- a/io_uring/uring_cmd.h +++ b/io_uring/uring_cmd.h @@ -1,8 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 +struct uring_cache { + union { + struct io_cache_entry cache; + struct io_uring_sqe sqes[2]; + }; +}; + int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags); int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_uring_cmd_prep_async(struct io_kiocb *req); +void io_uring_cache_free(struct io_cache_entry *entry); bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, - struct task_struct *task, bool cancel_all);
\ No newline at end of file + struct task_struct *task, bool cancel_all); |