aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp.c')
-rw-r--r--net/ipv4/tcp.c75
1 files changed, 28 insertions, 47 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 38c2bcb8dd5d..3075723c729b 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -835,47 +835,29 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
int large_allowed)
{
struct tcp_sock *tp = tcp_sk(sk);
- u32 xmit_size_goal, old_size_goal;
-
- xmit_size_goal = mss_now;
-
- if (large_allowed && sk_can_gso(sk)) {
- u32 gso_size, hlen;
-
- /* Maybe we should/could use sk->sk_prot->max_header here ? */
- hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
- inet_csk(sk)->icsk_ext_hdr_len +
- tp->tcp_header_len;
-
- /* Goal is to send at least one packet per ms,
- * not one big TSO packet every 100 ms.
- * This preserves ACK clocking and is consistent
- * with tcp_tso_should_defer() heuristic.
- */
- gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
- gso_size = max_t(u32, gso_size,
- sysctl_tcp_min_tso_segs * mss_now);
-
- xmit_size_goal = min_t(u32, gso_size,
- sk->sk_gso_max_size - 1 - hlen);
-
- xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
-
- /* We try hard to avoid divides here */
- old_size_goal = tp->xmit_size_goal_segs * mss_now;
-
- if (likely(old_size_goal <= xmit_size_goal &&
- old_size_goal + mss_now > xmit_size_goal)) {
- xmit_size_goal = old_size_goal;
- } else {
- tp->xmit_size_goal_segs =
- min_t(u16, xmit_size_goal / mss_now,
- sk->sk_gso_max_segs);
- xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
- }
+ u32 new_size_goal, size_goal, hlen;
+
+ if (!large_allowed || !sk_can_gso(sk))
+ return mss_now;
+
+ /* Maybe we should/could use sk->sk_prot->max_header here ? */
+ hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+ inet_csk(sk)->icsk_ext_hdr_len +
+ tp->tcp_header_len;
+
+ new_size_goal = sk->sk_gso_max_size - 1 - hlen;
+ new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal);
+
+ /* We try hard to avoid divides here */
+ size_goal = tp->gso_segs * mss_now;
+ if (unlikely(new_size_goal < size_goal ||
+ new_size_goal >= size_goal + mss_now)) {
+ tp->gso_segs = min_t(u16, new_size_goal / mss_now,
+ sk->sk_gso_max_segs);
+ size_goal = tp->gso_segs * mss_now;
}
- return max(xmit_size_goal, mss_now);
+ return max(size_goal, mss_now);
}
static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
@@ -1085,7 +1067,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
- struct iovec *iov;
+ const struct iovec *iov;
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int iovlen, flags, err, copied = 0;
@@ -1136,8 +1118,8 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
mss_now = tcp_send_mss(sk, &size_goal, flags);
/* Ok commence sending. */
- iovlen = msg->msg_iovlen;
- iov = msg->msg_iov;
+ iovlen = msg->msg_iter.nr_segs;
+ iov = msg->msg_iter.iov;
copied = 0;
err = -EPIPE;
@@ -1349,7 +1331,7 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
if (len > 0) {
if (!(flags & MSG_TRUNC))
- err = memcpy_toiovec(msg->msg_iov, &c, 1);
+ err = memcpy_to_msg(msg, &c, 1);
len = 1;
} else
msg->msg_flags |= MSG_TRUNC;
@@ -1377,7 +1359,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
/* XXX -- need to support SO_PEEK_OFF */
skb_queue_walk(&sk->sk_write_queue, skb) {
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
+ err = skb_copy_datagram_msg(skb, 0, msg, skb->len);
if (err)
break;
@@ -1729,7 +1711,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
user_recv = current;
tp->ucopy.task = user_recv;
- tp->ucopy.iov = msg->msg_iov;
+ tp->ucopy.msg = msg;
}
tp->ucopy.len = len;
@@ -1833,8 +1815,7 @@ do_prequeue:
}
if (!(flags & MSG_TRUNC)) {
- err = skb_copy_datagram_iovec(skb, offset,
- msg->msg_iov, used);
+ err = skb_copy_datagram_msg(skb, offset, msg, used);
if (err) {
/* Exception. Bailout! */
if (!copied)