aboutsummaryrefslogtreecommitdiffstats
path: root/fs/io_uring.c
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2020-10-18 10:17:40 +0100
committerJens Axboe <axboe@kernel.dk>2020-10-19 13:29:29 -0600
commit2c3bac6dd6c7af4cb38d1a67cafd29e96ba1fea7 (patch)
tree07607f8885f070cb722cad8c7445de989ff7ad23 /fs/io_uring.c
parentio_uring: inline io_fail_links() (diff)
downloadlinux-dev-2c3bac6dd6c7af4cb38d1a67cafd29e96ba1fea7.tar.xz
linux-dev-2c3bac6dd6c7af4cb38d1a67cafd29e96ba1fea7.zip
io_uring: make cached_cq_overflow non atomic_t
ctx->cached_cq_overflow is changed only under completion_lock. Convert it from atomic_t to just int, and mark all places when it's read without lock with READ_ONCE, which guarantees atomicity (relaxed ordering). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io_uring.c')
-rw-r--r--fs/io_uring.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index dce73e40eecb..620a11f9795a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -277,7 +277,7 @@ struct io_ring_ctx {
unsigned sq_mask;
unsigned sq_thread_idle;
unsigned cached_sq_dropped;
- atomic_t cached_cq_overflow;
+ unsigned cached_cq_overflow;
unsigned long sq_check_overflow;
struct list_head defer_list;
@@ -1179,7 +1179,7 @@ static bool req_need_defer(struct io_kiocb *req, u32 seq)
struct io_ring_ctx *ctx = req->ctx;
return seq != ctx->cached_cq_tail
- + atomic_read(&ctx->cached_cq_overflow);
+ + READ_ONCE(ctx->cached_cq_overflow);
}
return false;
@@ -1624,8 +1624,9 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
WRITE_ONCE(cqe->res, req->result);
WRITE_ONCE(cqe->flags, req->compl.cflags);
} else {
+ ctx->cached_cq_overflow++;
WRITE_ONCE(ctx->rings->cq_overflow,
- atomic_inc_return(&ctx->cached_cq_overflow));
+ ctx->cached_cq_overflow);
}
}
@@ -1667,8 +1668,8 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
* then we cannot store the request for later flushing, we need
* to drop it on the floor.
*/
- WRITE_ONCE(ctx->rings->cq_overflow,
- atomic_inc_return(&ctx->cached_cq_overflow));
+ ctx->cached_cq_overflow++;
+ WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
} else {
if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->sq_check_overflow);