aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/io_uring.c14
-rw-r--r--io_uring/net.c59
-rw-r--r--io_uring/net.h1
-rw-r--r--io_uring/notif.c83
-rw-r--r--io_uring/notif.h54
-rw-r--r--io_uring/opdef.c12
-rw-r--r--io_uring/rsrc.c55
-rw-r--r--io_uring/rsrc.h4
8 files changed, 52 insertions, 230 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 77616279000b..f9be9b7eb654 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -2640,7 +2640,6 @@ static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
io_unregister_personality(ctx, index);
if (ctx->rings)
io_poll_remove_all(ctx, NULL, true);
- io_notif_unregister(ctx);
mutex_unlock(&ctx->uring_lock);
/* failed during ring init, it couldn't have issued any requests */
@@ -3839,15 +3838,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_file_alloc_range(ctx, arg);
break;
- case IORING_REGISTER_NOTIFIERS:
- ret = io_notif_register(ctx, arg, nr_args);
- break;
- case IORING_UNREGISTER_NOTIFIERS:
- ret = -EINVAL;
- if (arg || nr_args)
- break;
- ret = io_notif_unregister(ctx);
- break;
default:
ret = -EINVAL;
break;
@@ -3933,8 +3923,8 @@ static int __init io_uring_init(void)
BUILD_BUG_SQE_ELEM(42, __u16, personality);
BUILD_BUG_SQE_ELEM(44, __s32, splice_fd_in);
BUILD_BUG_SQE_ELEM(44, __u32, file_index);
- BUILD_BUG_SQE_ELEM(44, __u16, notification_idx);
- BUILD_BUG_SQE_ELEM(46, __u16, addr_len);
+ BUILD_BUG_SQE_ELEM(44, __u16, addr_len);
+ BUILD_BUG_SQE_ELEM(46, __u16, __pad3[0]);
BUILD_BUG_SQE_ELEM(48, __u64, addr3);
BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
BUILD_BUG_SQE_ELEM(56, __u64, __pad2);
diff --git a/io_uring/net.c b/io_uring/net.c
index 0af8a02df580..7047c1342541 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -65,12 +65,12 @@ struct io_sendzc {
struct file *file;
void __user *buf;
size_t len;
- u16 slot_idx;
unsigned msg_flags;
unsigned flags;
unsigned addr_len;
void __user *addr;
size_t done_io;
+ struct io_kiocb *notif;
};
#define IO_APOLL_MULTI_POLLED (REQ_F_APOLL_MULTISHOT | REQ_F_POLLED)
@@ -879,17 +879,31 @@ out_free:
return ret;
}
+void io_sendzc_cleanup(struct io_kiocb *req)
+{
+ struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
+
+ zc->notif->flags |= REQ_F_CQE_SKIP;
+ io_notif_flush(zc->notif);
+ zc->notif = NULL;
+}
+
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
struct io_ring_ctx *ctx = req->ctx;
+ struct io_kiocb *notif;
- if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3))
+ if (READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3) ||
+ READ_ONCE(sqe->__pad3[0]))
+ return -EINVAL;
+ /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
+ if (req->flags & REQ_F_CQE_SKIP)
return -EINVAL;
zc->flags = READ_ONCE(sqe->ioprio);
if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
- IORING_RECVSEND_FIXED_BUF | IORING_RECVSEND_NOTIF_FLUSH))
+ IORING_RECVSEND_FIXED_BUF))
return -EINVAL;
if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
unsigned idx = READ_ONCE(sqe->buf_index);
@@ -900,11 +914,17 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
req->imu = READ_ONCE(ctx->user_bufs[idx]);
io_req_set_rsrc_node(req, ctx, 0);
}
+ notif = zc->notif = io_alloc_notif(ctx);
+ if (!notif)
+ return -ENOMEM;
+ notif->cqe.user_data = req->cqe.user_data;
+ notif->cqe.res = 0;
+ notif->cqe.flags = IORING_CQE_F_NOTIF;
+ req->flags |= REQ_F_NEED_CLEANUP;
zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
zc->len = READ_ONCE(sqe->len);
zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
- zc->slot_idx = READ_ONCE(sqe->notification_idx);
if (zc->msg_flags & MSG_DONTWAIT)
req->flags |= REQ_F_NOWAIT;
@@ -956,7 +976,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
shinfo->nr_frags = frag;
from->bvec += bi.bi_idx;
from->nr_segs -= bi.bi_idx;
- from->count = bi.bi_size;
+ from->count -= copied;
from->iov_offset = bi.bi_bvec_done;
skb->data_len += copied;
@@ -976,33 +996,20 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
{
struct sockaddr_storage __address, *addr = NULL;
- struct io_ring_ctx *ctx = req->ctx;
struct io_sendzc *zc = io_kiocb_to_cmd(req, struct io_sendzc);
- struct io_notif_slot *notif_slot;
- struct io_kiocb *notif;
struct msghdr msg;
struct iovec iov;
struct socket *sock;
- unsigned msg_flags;
+ unsigned msg_flags, cflags;
int ret, min_ret = 0;
if (!(req->flags & REQ_F_POLLED) &&
(zc->flags & IORING_RECVSEND_POLL_FIRST))
return -EAGAIN;
-
- if (issue_flags & IO_URING_F_UNLOCKED)
- return -EAGAIN;
sock = sock_from_file(req->file);
if (unlikely(!sock))
return -ENOTSOCK;
- notif_slot = io_get_notif_slot(ctx, zc->slot_idx);
- if (!notif_slot)
- return -EINVAL;
- notif = io_get_notif(ctx, notif_slot);
- if (!notif)
- return -ENOMEM;
-
msg.msg_name = NULL;
msg.msg_control = NULL;
msg.msg_controllen = 0;
@@ -1033,7 +1040,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
&msg.msg_iter);
if (unlikely(ret))
return ret;
- ret = io_notif_account_mem(notif, zc->len);
+ ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
}
@@ -1045,7 +1052,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
msg.msg_flags = msg_flags;
- msg.msg_ubuf = &io_notif_to_data(notif)->uarg;
+ msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
msg.sg_from_iter = io_sg_from_iter;
ret = sock_sendmsg(sock, &msg);
@@ -1060,18 +1067,22 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
req->flags |= REQ_F_PARTIAL_IO;
return io_setup_async_addr(req, addr, issue_flags);
}
+ if (ret < 0 && !zc->done_io)
+ zc->notif->flags |= REQ_F_CQE_SKIP;
if (ret == -ERESTARTSYS)
ret = -EINTR;
req_set_fail(req);
- } else if (zc->flags & IORING_RECVSEND_NOTIF_FLUSH) {
- io_notif_slot_flush_submit(notif_slot, 0);
}
if (ret >= 0)
ret += zc->done_io;
else if (zc->done_io)
ret = zc->done_io;
- io_req_set_res(req, ret, 0);
+
+ io_notif_flush(zc->notif);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ cflags = ret >= 0 ? IORING_CQE_F_MORE : 0;
+ io_req_set_res(req, ret, cflags);
return IOU_OK;
}
diff --git a/io_uring/net.h b/io_uring/net.h
index f91f56c6eeac..d744a0a874e7 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -55,6 +55,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc(struct io_kiocb *req, unsigned int issue_flags);
int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+void io_sendzc_cleanup(struct io_kiocb *req);
void io_netmsg_cache_free(struct io_cache_entry *entry);
#else
diff --git a/io_uring/notif.c b/io_uring/notif.c
index 96f076b175e0..38d77165edc3 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -42,8 +42,7 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
}
}
-struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot)
+struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_kiocb *notif;
@@ -59,101 +58,23 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
io_get_task_refs(1);
notif->rsrc_node = NULL;
io_req_set_rsrc_node(notif, ctx, 0);
- notif->cqe.user_data = slot->tag;
- notif->cqe.flags = slot->seq++;
- notif->cqe.res = 0;
nd = io_notif_to_data(notif);
nd->account_pages = 0;
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.callback = io_uring_tx_zerocopy_callback;
- /* master ref owned by io_notif_slot, will be dropped on flush */
refcount_set(&nd->uarg.refcnt, 1);
return notif;
}
-void io_notif_slot_flush(struct io_notif_slot *slot)
+void io_notif_flush(struct io_kiocb *notif)
__must_hold(&slot->notif->ctx->uring_lock)
{
- struct io_kiocb *notif = slot->notif;
struct io_notif_data *nd = io_notif_to_data(notif);
- slot->notif = NULL;
-
/* drop slot's master ref */
if (refcount_dec_and_test(&nd->uarg.refcnt)) {
notif->io_task_work.func = __io_notif_complete_tw;
io_req_task_work_add(notif);
}
}
-
-__cold int io_notif_unregister(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- int i;
-
- if (!ctx->notif_slots)
- return -ENXIO;
-
- for (i = 0; i < ctx->nr_notif_slots; i++) {
- struct io_notif_slot *slot = &ctx->notif_slots[i];
- struct io_kiocb *notif = slot->notif;
- struct io_notif_data *nd;
-
- if (!notif)
- continue;
- nd = io_notif_to_data(notif);
- slot->notif = NULL;
- if (!refcount_dec_and_test(&nd->uarg.refcnt))
- continue;
- notif->io_task_work.func = __io_notif_complete_tw;
- io_req_task_work_add(notif);
- }
-
- kvfree(ctx->notif_slots);
- ctx->notif_slots = NULL;
- ctx->nr_notif_slots = 0;
- return 0;
-}
-
-__cold int io_notif_register(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int size)
- __must_hold(&ctx->uring_lock)
-{
- struct io_uring_notification_slot __user *slots;
- struct io_uring_notification_slot slot;
- struct io_uring_notification_register reg;
- unsigned i;
-
- if (ctx->nr_notif_slots)
- return -EBUSY;
- if (size != sizeof(reg))
- return -EINVAL;
- if (copy_from_user(&reg, arg, sizeof(reg)))
- return -EFAULT;
- if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS)
- return -EINVAL;
- if (reg.resv || reg.resv2 || reg.resv3)
- return -EINVAL;
-
- slots = u64_to_user_ptr(reg.data);
- ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]),
- GFP_KERNEL_ACCOUNT);
- if (!ctx->notif_slots)
- return -ENOMEM;
-
- for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) {
- struct io_notif_slot *notif_slot = &ctx->notif_slots[i];
-
- if (copy_from_user(&slot, &slots[i], sizeof(slot))) {
- io_notif_unregister(ctx);
- return -EFAULT;
- }
- if (slot.resv[0] | slot.resv[1] | slot.resv[2]) {
- io_notif_unregister(ctx);
- return -EINVAL;
- }
- notif_slot->tag = slot.tag;
- }
- return 0;
-}
diff --git a/io_uring/notif.h b/io_uring/notif.h
index 80f6445e0c2b..5b4d710c8ca5 100644
--- a/io_uring/notif.h
+++ b/io_uring/notif.h
@@ -8,7 +8,6 @@
#include "rsrc.h"
#define IO_NOTIF_SPLICE_BATCH 32
-#define IORING_MAX_NOTIF_SLOTS (1U << 15)
struct io_notif_data {
struct file *file;
@@ -16,63 +15,14 @@ struct io_notif_data {
unsigned long account_pages;
};
-struct io_notif_slot {
- /*
- * Current/active notifier. A slot holds only one active notifier at a
- * time and keeps one reference to it. Flush releases the reference and
- * lazily replaces it with a new notifier.
- */
- struct io_kiocb *notif;
-
- /*
- * Default ->user_data for this slot notifiers CQEs
- */
- u64 tag;
- /*
- * Notifiers of a slot live in generations, we create a new notifier
- * only after flushing the previous one. Track the sequential number
- * for all notifiers and copy it into notifiers's cqe->cflags
- */
- u32 seq;
-};
-
-int io_notif_register(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int size);
-int io_notif_unregister(struct io_ring_ctx *ctx);
-
-void io_notif_slot_flush(struct io_notif_slot *slot);
-struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot);
+void io_notif_flush(struct io_kiocb *notif);
+struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx);
static inline struct io_notif_data *io_notif_to_data(struct io_kiocb *notif)
{
return io_kiocb_to_cmd(notif, struct io_notif_data);
}
-static inline struct io_kiocb *io_get_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot)
-{
- if (!slot->notif)
- slot->notif = io_alloc_notif(ctx, slot);
- return slot->notif;
-}
-
-static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
- unsigned idx)
- __must_hold(&ctx->uring_lock)
-{
- if (idx >= ctx->nr_notif_slots)
- return NULL;
- idx = array_index_nospec(idx, ctx->nr_notif_slots);
- return &ctx->notif_slots[idx];
-}
-
-static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot,
- unsigned int issue_flags)
-{
- io_notif_slot_flush(slot);
-}
-
static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len)
{
struct io_ring_ctx *ctx = notif->ctx;
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 41410126c1c6..c61494e0a602 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -246,13 +246,12 @@ const struct io_op_def io_op_defs[] = {
.prep = io_close_prep,
.issue = io_close,
},
- [IORING_OP_RSRC_UPDATE] = {
+ [IORING_OP_FILES_UPDATE] = {
.audit_skip = 1,
.iopoll = 1,
- .name = "RSRC_UPDATE",
- .prep = io_rsrc_update_prep,
- .issue = io_rsrc_update,
- .ioprio = 1,
+ .name = "FILES_UPDATE",
+ .prep = io_files_update_prep,
+ .issue = io_files_update,
},
[IORING_OP_STATX] = {
.audit_skip = 1,
@@ -471,7 +470,7 @@ const struct io_op_def io_op_defs[] = {
.issue = io_uring_cmd,
.prep_async = io_uring_cmd_prep_async,
},
- [IORING_OP_SENDZC_NOTIF] = {
+ [IORING_OP_SEND_ZC] = {
.name = "SENDZC_NOTIF",
.needs_file = 1,
.unbound_nonreg_file = 1,
@@ -484,6 +483,7 @@ const struct io_op_def io_op_defs[] = {
.prep = io_sendzc_prep,
.issue = io_sendzc,
.prep_async = io_sendzc_prep_async,
+ .cleanup = io_sendzc_cleanup,
#else
.prep = io_eopnotsupp_prep,
#endif
diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
index 71359a4d0bd4..cf3272113214 100644
--- a/io_uring/rsrc.c
+++ b/io_uring/rsrc.c
@@ -15,14 +15,12 @@
#include "io_uring.h"
#include "openclose.h"
#include "rsrc.h"
-#include "notif.h"
struct io_rsrc_update {
struct file *file;
u64 arg;
u32 nr_args;
u32 offset;
- int type;
};
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
@@ -655,7 +653,7 @@ __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
return -EINVAL;
}
-int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
@@ -669,7 +667,6 @@ int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (!up->nr_args)
return -EINVAL;
up->arg = READ_ONCE(sqe->addr);
- up->type = READ_ONCE(sqe->ioprio);
return 0;
}
@@ -712,7 +709,7 @@ static int io_files_update_with_index_alloc(struct io_kiocb *req,
return ret;
}
-static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
+int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
struct io_ring_ctx *ctx = req->ctx;
@@ -741,54 +738,6 @@ static int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
return IOU_OK;
}
-static int io_notif_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
- struct io_ring_ctx *ctx = req->ctx;
- unsigned len = up->nr_args;
- unsigned idx_end, idx = up->offset;
- int ret = 0;
-
- io_ring_submit_lock(ctx, issue_flags);
- if (unlikely(check_add_overflow(idx, len, &idx_end))) {
- ret = -EOVERFLOW;
- goto out;
- }
- if (unlikely(idx_end > ctx->nr_notif_slots)) {
- ret = -EINVAL;
- goto out;
- }
-
- for (; idx < idx_end; idx++) {
- struct io_notif_slot *slot = &ctx->notif_slots[idx];
-
- if (!slot->notif)
- continue;
- if (up->arg)
- slot->tag = up->arg;
- io_notif_slot_flush_submit(slot, issue_flags);
- }
-out:
- io_ring_submit_unlock(ctx, issue_flags);
- if (ret < 0)
- req_set_fail(req);
- io_req_set_res(req, ret, 0);
- return IOU_OK;
-}
-
-int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags)
-{
- struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
-
- switch (up->type) {
- case IORING_RSRC_UPDATE_FILES:
- return io_files_update(req, issue_flags);
- case IORING_RSRC_UPDATE_NOTIF:
- return io_notif_update(req, issue_flags);
- }
- return -EINVAL;
-}
-
int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
struct io_rsrc_node *node, void *rsrc)
{
diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h
index f3a9a177941f..9bce15665444 100644
--- a/io_uring/rsrc.h
+++ b/io_uring/rsrc.h
@@ -167,8 +167,8 @@ static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx)
return &data->tags[table_idx][off];
}
-int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags);
-int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_files_update(struct io_kiocb *req, unsigned int issue_flags);
+int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
int __io_account_mem(struct user_struct *user, unsigned long nr_pages);