aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring/io_uring.c
diff options
context:
space:
mode:
authorKeith Busch <kbusch@kernel.org>2025-02-27 14:39:16 -0800
committerJens Axboe <axboe@kernel.dk>2025-02-28 07:05:46 -0700
commited9f3112a8a8f6e6919d3b9da2651fa302df7be3 (patch)
tree4d6c3d569625be201e5fb33a44d2f0fb9a224e95 /io_uring/io_uring.c
parentublk: zc register/unregister bvec (diff)
downloadlinux-rng-ed9f3112a8a8f6e6919d3b9da2651fa302df7be3.tar.xz
linux-rng-ed9f3112a8a8f6e6919d3b9da2651fa302df7be3.zip
io_uring: cache nodes and mapped buffers
Frequent alloc/free cycles on these is pretty costly. Use an io cache to more efficiently reuse these buffers. Signed-off-by: Keith Busch <kbusch@kernel.org> Link: https://lore.kernel.org/r/20250227223916.143006-7-kbusch@meta.com [axboe: fix imu leak] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring/io_uring.c')
-rw-r--r--io_uring/io_uring.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index f2bdb1eab577..ccc343f61a57 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -291,6 +291,7 @@ static void io_free_alloc_caches(struct io_ring_ctx *ctx)
io_alloc_cache_free(&ctx->uring_cache, kfree);
io_alloc_cache_free(&ctx->msg_cache, kfree);
io_futex_cache_free(ctx);
+ io_rsrc_cache_free(ctx);
}
static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
@@ -338,6 +339,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
sizeof(struct io_kiocb), 0);
ret |= io_futex_cache_init(ctx);
+ ret |= io_rsrc_cache_init(ctx);
if (ret)
goto free_ref;
init_completion(&ctx->ref_comp);