aboutsummaryrefslogtreecommitdiffstats
path: root/io_uring/notif.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-09-02 16:37:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-09-02 16:37:01 -0700
commitcec53f4c8df0b3f45796127a31c10b86ec125f55 (patch)
treef1765cb76b1115c0b0ad58c52003b0db2024686a /io_uring/notif.c
parentMerge tag '6.0-rc3-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6 (diff)
parentselftests/net: return back io_uring zc send tests (diff)
downloadlinux-dev-cec53f4c8df0b3f45796127a31c10b86ec125f55.tar.xz
linux-dev-cec53f4c8df0b3f45796127a31c10b86ec125f55.zip
Merge tag 'io_uring-6.0-2022-09-02' of git://git.kernel.dk/linux-block
Pull io_uring fixes from Jens Axboe: - A single fix for over-eager retries for networking (Pavel) - Revert the notification slot support for zerocopy sends. It turns out that even after more than a year or development and testing, there's not full agreement on whether just using plain ordered notifications is Good Enough to avoid the complexity of using the notifications slots. Because of that, we decided that it's best left to a future final decision. We can always bring back this feature, but we can't really change it or remove it once we've released 6.0 with it enabled. The reverts leave the usual CQE notifications as the primary interface for knowing when data was sent, and when it was acked. (Pavel) * tag 'io_uring-6.0-2022-09-02' of git://git.kernel.dk/linux-block: selftests/net: return back io_uring zc send tests io_uring/net: simplify zerocopy send user API io_uring/notif: remove notif registration Revert "io_uring: rename IORING_OP_FILES_UPDATE" Revert "io_uring: add zc notification flush requests" selftests/net: temporarily disable io_uring zc test io_uring/net: fix overexcessive retries
Diffstat (limited to 'io_uring/notif.c')
-rw-r--r--io_uring/notif.c83
1 files changed, 2 insertions, 81 deletions
diff --git a/io_uring/notif.c b/io_uring/notif.c
index 96f076b175e0..38d77165edc3 100644
--- a/io_uring/notif.c
+++ b/io_uring/notif.c
@@ -42,8 +42,7 @@ static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
}
}
-struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
- struct io_notif_slot *slot)
+struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_kiocb *notif;
@@ -59,101 +58,23 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
io_get_task_refs(1);
notif->rsrc_node = NULL;
io_req_set_rsrc_node(notif, ctx, 0);
- notif->cqe.user_data = slot->tag;
- notif->cqe.flags = slot->seq++;
- notif->cqe.res = 0;
nd = io_notif_to_data(notif);
nd->account_pages = 0;
nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
nd->uarg.callback = io_uring_tx_zerocopy_callback;
- /* master ref owned by io_notif_slot, will be dropped on flush */
refcount_set(&nd->uarg.refcnt, 1);
return notif;
}
-void io_notif_slot_flush(struct io_notif_slot *slot)
+void io_notif_flush(struct io_kiocb *notif)
__must_hold(&slot->notif->ctx->uring_lock)
{
- struct io_kiocb *notif = slot->notif;
struct io_notif_data *nd = io_notif_to_data(notif);
- slot->notif = NULL;
-
/* drop slot's master ref */
if (refcount_dec_and_test(&nd->uarg.refcnt)) {
notif->io_task_work.func = __io_notif_complete_tw;
io_req_task_work_add(notif);
}
}
-
-__cold int io_notif_unregister(struct io_ring_ctx *ctx)
- __must_hold(&ctx->uring_lock)
-{
- int i;
-
- if (!ctx->notif_slots)
- return -ENXIO;
-
- for (i = 0; i < ctx->nr_notif_slots; i++) {
- struct io_notif_slot *slot = &ctx->notif_slots[i];
- struct io_kiocb *notif = slot->notif;
- struct io_notif_data *nd;
-
- if (!notif)
- continue;
- nd = io_notif_to_data(notif);
- slot->notif = NULL;
- if (!refcount_dec_and_test(&nd->uarg.refcnt))
- continue;
- notif->io_task_work.func = __io_notif_complete_tw;
- io_req_task_work_add(notif);
- }
-
- kvfree(ctx->notif_slots);
- ctx->notif_slots = NULL;
- ctx->nr_notif_slots = 0;
- return 0;
-}
-
-__cold int io_notif_register(struct io_ring_ctx *ctx,
- void __user *arg, unsigned int size)
- __must_hold(&ctx->uring_lock)
-{
- struct io_uring_notification_slot __user *slots;
- struct io_uring_notification_slot slot;
- struct io_uring_notification_register reg;
- unsigned i;
-
- if (ctx->nr_notif_slots)
- return -EBUSY;
- if (size != sizeof(reg))
- return -EINVAL;
- if (copy_from_user(&reg, arg, sizeof(reg)))
- return -EFAULT;
- if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS)
- return -EINVAL;
- if (reg.resv || reg.resv2 || reg.resv3)
- return -EINVAL;
-
- slots = u64_to_user_ptr(reg.data);
- ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]),
- GFP_KERNEL_ACCOUNT);
- if (!ctx->notif_slots)
- return -ENOMEM;
-
- for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) {
- struct io_notif_slot *notif_slot = &ctx->notif_slots[i];
-
- if (copy_from_user(&slot, &slots[i], sizeof(slot))) {
- io_notif_unregister(ctx);
- return -EFAULT;
- }
- if (slot.resv[0] | slot.resv[1] | slot.resv[2]) {
- io_notif_unregister(ctx);
- return -EINVAL;
- }
- notif_slot->tag = slot.tag;
- }
- return 0;
-}