aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/io_uring
diff options
context:
space:
mode:
authorPavel Begunkov <asml.silence@gmail.com>2022-09-08 13:20:32 +0100
committerJens Axboe <axboe@kernel.dk>2022-09-21 10:30:43 -0600
commitcd9021e88fddf0d9fa9704564153af2bdb5dc13c (patch)
treee2599891c185678958d9db1ca2859934de6169d3 /io_uring
parentio_uring/net: io_async_msghdr caches for sendzc (diff)
downloadwireguard-linux-cd9021e88fddf0d9fa9704564153af2bdb5dc13c.tar.xz
wireguard-linux-cd9021e88fddf0d9fa9704564153af2bdb5dc13c.zip
io_uring/net: add non-bvec sg chunking callback
Add a sg_from_iter() for when we initiate non-bvec zerocopy sends, which helps us to remove some extra steps from io_sg_from_iter(). The only thing the new function has to do before giving control away to __zerocopy_sg_from_iter() is to check if the skb has managed frags and downgrade them if so. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/cda3dea0d36f7931f63a70f350130f085ac3f3dd.1662639236.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'io_uring')
-rw-r--r--io_uring/net.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/io_uring/net.c b/io_uring/net.c
index 07f6b9e93c00..9b76cebc0a65 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -948,6 +948,13 @@ int io_sendzc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
+static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
+ struct iov_iter *from, size_t length)
+{
+ skb_zcopy_downgrade_managed(skb);
+ return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
+}
+
static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
struct iov_iter *from, size_t length)
{
@@ -958,13 +965,10 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
ssize_t copied = 0;
unsigned long truesize = 0;
- if (!shinfo->nr_frags)
+ if (!frag)
shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
-
- if (!skb_zcopy_managed(skb) || !iov_iter_is_bvec(from)) {
- skb_zcopy_downgrade_managed(skb);
+ else if (unlikely(!skb_zcopy_managed(skb)))
return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
- }
bi.bi_size = min(from->count, length);
bi.bi_bvec_done = from->iov_offset;
@@ -1045,6 +1049,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
(u64)(uintptr_t)zc->buf, zc->len);
if (unlikely(ret))
return ret;
+ msg.sg_from_iter = io_sg_from_iter;
} else {
ret = import_single_range(WRITE, zc->buf, zc->len, &iov,
&msg.msg_iter);
@@ -1053,6 +1058,7 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
ret = io_notif_account_mem(zc->notif, zc->len);
if (unlikely(ret))
return ret;
+ msg.sg_from_iter = io_sg_from_iter_iovec;
}
msg_flags = zc->msg_flags | MSG_ZEROCOPY;
@@ -1063,7 +1069,6 @@ int io_sendzc(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_flags = msg_flags;
msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
- msg.sg_from_iter = io_sg_from_iter;
ret = sock_sendmsg(sock, &msg);
if (unlikely(ret < min_ret)) {