aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c41
1 files changed, 22 insertions, 19 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 58ac13b69dc8..db623b3185c8 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -731,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
size_t ocq_size = sizeof(struct io_overflow_cqe);
bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
+ lockdep_assert_held(&ctx->completion_lock);
+
if (is_cqe32)
ocq_size += sizeof(struct io_uring_cqe);
@@ -820,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
{
struct io_uring_cqe *cqe;
- if (!ctx->task_complete)
- lockdep_assert_held(&ctx->completion_lock);
-
ctx->cq_extra++;
/*
@@ -1236,13 +1235,18 @@ static void io_req_local_work_add(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
+ percpu_ref_get(&ctx->refs);
+
+ if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
+ percpu_ref_put(&ctx->refs);
return;
+ }
/* need it for the following io_cqring_wake() */
smp_mb__after_atomic();
if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
io_move_task_work_from_local(ctx);
+ percpu_ref_put(&ctx->refs);
return;
}
@@ -1252,6 +1256,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
if (ctx->has_evfd)
io_eventfd_signal(ctx);
__io_cqring_wake(ctx);
+ percpu_ref_put(&ctx->refs);
}
void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
@@ -1760,17 +1765,12 @@ queue:
}
spin_unlock(&ctx->completion_lock);
- ret = io_req_prep_async(req);
- if (ret) {
-fail:
- io_req_defer_failed(req, ret);
- return;
- }
io_prep_async_link(req);
de = kmalloc(sizeof(*de), GFP_KERNEL);
if (!de) {
ret = -ENOMEM;
- goto fail;
+ io_req_defer_failed(req, ret);
+ return;
}
spin_lock(&ctx->completion_lock);
@@ -2043,13 +2043,16 @@ static void io_queue_sqe_fallback(struct io_kiocb *req)
req->flags &= ~REQ_F_HARDLINK;
req->flags |= REQ_F_LINK;
io_req_defer_failed(req, req->cqe.res);
- } else if (unlikely(req->ctx->drain_active)) {
- io_drain_req(req);
} else {
int ret = io_req_prep_async(req);
- if (unlikely(ret))
+ if (unlikely(ret)) {
io_req_defer_failed(req, ret);
+ return;
+ }
+
+ if (unlikely(req->ctx->drain_active))
+ io_drain_req(req);
else
io_queue_iowq(req, NULL);
}
@@ -2465,7 +2468,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
/* when returns >0, the caller should retry */
static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
struct io_wait_queue *iowq,
- ktime_t timeout)
+ ktime_t *timeout)
{
int ret;
unsigned long check_cq;
@@ -2483,7 +2486,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
return -EBADR;
}
- if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+ if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
return -ETIME;
/*
@@ -2559,7 +2562,7 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
}
prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
TASK_INTERRUPTIBLE);
- ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
+ ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
if (__io_cqring_events_user(ctx) >= min_events)
break;
cond_resched();
@@ -3669,7 +3672,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
&& !(ctx->flags & IORING_SETUP_R_DISABLED))
- ctx->submitter_task = get_task_struct(current);
+ WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
file = io_uring_get_file(ctx);
if (IS_ERR(file)) {
@@ -3863,7 +3866,7 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
return -EBADFD;
if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
- ctx->submitter_task = get_task_struct(current);
+ WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
if (ctx->restrictions.registered)
ctx->restricted = 1;