aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/fs/io-wq.h
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2019-11-07 11:41:16 -0700
committerJens Axboe <axboe@kernel.dk>2019-11-07 11:41:35 -0700
commitc5def4ab849494d3c97f6c9fc84b2ddb868fe78c (patch)
tree5009eb525156d82f3e223f43583d4a235e27cb12 /fs/io-wq.h
parentio-wq: io_wqe_run_queue() doesn't need to use list_empty_careful() (diff)
downloadwireguard-linux-c5def4ab849494d3c97f6c9fc84b2ddb868fe78c.tar.xz
wireguard-linux-c5def4ab849494d3c97f6c9fc84b2ddb868fe78c.zip
io-wq: add support for bounded vs unbunded work
io_uring supports request types that basically have two different lifetimes: 1) Bounded completion time. These are requests like disk reads or writes, which we know will finish in a finite amount of time. 2) Unbounded completion time. These are generally networked IO, where we have no idea how long they will take to complete. Another example is POLL commands. This patch provides support for io-wq to handle these differently, so we don't starve bounded requests by tying up workers for too long. By default all work is bounded, unless otherwise specified in the work item. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.h')
-rw-r--r--fs/io-wq.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 3de192dc73fc..8cb345256f35 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -9,6 +9,7 @@ enum {
IO_WQ_WORK_HASHED = 4,
IO_WQ_WORK_NEEDS_USER = 8,
IO_WQ_WORK_NEEDS_FILES = 16,
+ IO_WQ_WORK_UNBOUND = 32,
IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
};
@@ -33,7 +34,8 @@ struct io_wq_work {
(work)->files = NULL; \
} while (0) \
-struct io_wq *io_wq_create(unsigned concurrency, struct mm_struct *mm);
+struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
+ struct user_struct *user);
void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);