aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2021-02-12 03:23:52 +0000
committerJens Axboe <axboe@kernel.dk>2021-02-12 05:30:25 -0700
commit4e32635834a30b8aa9583d3899a8ecc6416023fb (patch)
tree6f0b0ab7d9b18523981c1fa7dd35a3d04357edbc /fs
parentio_uring: optimise out unlikely link queue (diff)
downloadwireguard-linux-4e32635834a30b8aa9583d3899a8ecc6416023fb.tar.xz
wireguard-linux-4e32635834a30b8aa9583d3899a8ecc6416023fb.zip
io_uring: optimise SQPOLL mm/files grabbing
There are two reasons for this. First is to optimise io_sq_thread_acquire_mm_files() for non-SQPOLL case, which currently do too many checks and function calls in the hot path, e.g. in io_init_req(). The second is to not grab mm/files when there are not needed. As __io_queue_sqe() issues only one request now, we can reuse io_sq_thread_acquire_mm_files() instead of unconditional acquire mm/files. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 26d1080217e5..813d1ccd7a69 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1145,9 +1145,6 @@ static void io_sq_thread_drop_mm_files(void)
static int __io_sq_thread_acquire_files(struct io_ring_ctx *ctx)
{
- if (current->flags & PF_EXITING)
- return -EFAULT;
-
if (!current->files) {
struct files_struct *files;
struct nsproxy *nsproxy;
@@ -1175,15 +1172,9 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
{
struct mm_struct *mm;
- if (current->flags & PF_EXITING)
- return -EFAULT;
if (current->mm)
return 0;
- /* Should never happen */
- if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
- return -EFAULT;
-
task_lock(ctx->sqo_task);
mm = ctx->sqo_task->mm;
if (unlikely(!mm || !mmget_not_zero(mm)))
@@ -1198,8 +1189,8 @@ static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
return -EFAULT;
}
-static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
- struct io_kiocb *req)
+static int __io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
int ret;
@@ -1219,6 +1210,16 @@ static int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
return 0;
}
+static inline int io_sq_thread_acquire_mm_files(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ if (unlikely(current->flags & PF_EXITING))
+ return -EFAULT;
+ if (!(ctx->flags & IORING_SETUP_SQPOLL))
+ return 0;
+ return __io_sq_thread_acquire_mm_files(ctx, req);
+}
+
static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
struct cgroup_subsys_state **cur_css)
@@ -2336,9 +2337,7 @@ static void __io_req_task_submit(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
mutex_lock(&ctx->uring_lock);
- if (!ctx->sqo_dead &&
- !__io_sq_thread_acquire_mm(ctx) &&
- !__io_sq_thread_acquire_files(ctx))
+ if (!ctx->sqo_dead && !io_sq_thread_acquire_mm_files(ctx, req))
__io_queue_sqe(req);
else
__io_req_task_cancel(req, -EFAULT);