aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2025-02-05 11:36:44 +0000
committerJens Axboe <axboe@kernel.dk>2025-02-17 05:34:45 -0700
commitdd4fbb11e7ccc15dbb197a5bbfb2ca8bfda89fcd (patch)
tree7d6ddaa84bd92d09d18728de3443448b62d4619b
parentio_uring/kbuf: remove legacy kbuf kmem cache (diff)
downloadlinux-rng-dd4fbb11e7ccc15dbb197a5bbfb2ca8bfda89fcd.tar.xz
linux-rng-dd4fbb11e7ccc15dbb197a5bbfb2ca8bfda89fcd.zip
io_uring/kbuf: move locking into io_kbuf_drop()
Move the burden of locking out of the caller into io_kbuf_drop(), that will help with furher refactoring. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/530f0cf1f06963029399f819a9a58b1a34bebef3.1738724373.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--io_uring/io_uring.c5
-rw-r--r--io_uring/kbuf.h4
2 files changed, 3 insertions, 6 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 6fa1e88e40fb..ed7c9081352a 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -398,11 +398,8 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- spin_lock(&req->ctx->completion_lock);
+ if (unlikely(req->flags & REQ_F_BUFFER_SELECTED))
io_kbuf_drop(req);
- spin_unlock(&req->ctx->completion_lock);
- }
if (req->flags & REQ_F_NEED_CLEANUP) {
const struct io_cold_def *def = &io_cold_defs[req->opcode];
diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h
index bd80c44c5af1..310f94a0727a 100644
--- a/io_uring/kbuf.h
+++ b/io_uring/kbuf.h
@@ -174,13 +174,13 @@ static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
static inline void io_kbuf_drop(struct io_kiocb *req)
{
- lockdep_assert_held(&req->ctx->completion_lock);
-
if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
return;
+ spin_lock(&req->ctx->completion_lock);
/* len == 0 is fine here, non-ring will always drop all of it */
__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
+ spin_unlock(&req->ctx->completion_lock);
}
static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,