aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--net/ipv4/syncookies.c8
-rw-r--r--net/ipv4/tcp.c4
-rw-r--r--net/ipv4/tcp_bbr.c22
-rw-r--r--net/ipv4/tcp_input.c96
-rw-r--r--net/ipv4/tcp_ipv4.c17
-rw-r--r--net/ipv4/tcp_lp.c12
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c16
-rw-r--r--net/ipv4/tcp_rate.c16
-rw-r--r--net/ipv4/tcp_recovery.c23
-rw-r--r--net/ipv4/tcp_timer.c8
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/netfilter/nf_synproxy_core.c2
14 files changed, 116 insertions, 118 deletions
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0257d965f111..6426250a58ea 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/
-__u32 cookie_init_timestamp(struct request_sock *req)
+u64 cookie_init_timestamp(struct request_sock *req)
{
struct inet_request_sock *ireq;
- u32 ts, ts_now = tcp_time_stamp;
+ u32 ts, ts_now = tcp_time_stamp_raw();
u32 options = 0;
ireq = inet_rsk(req);
@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS;
ts |= options;
}
- return ts;
+ return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
}
@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->tfo_listener = false;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 850054800526..b5d18484746d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (!tp->repair)
err = -EPERM;
else
- tp->tsoffset = val - tcp_time_stamp;
+ tp->tsoffset = val - tcp_time_stamp_raw();
break;
case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen);
@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break;
case TCP_TIMESTAMP:
- val = tcp_time_stamp + tp->tsoffset;
+ val = tcp_time_stamp_raw() + tp->tsoffset;
break;
case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat;
diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
index 40dc4fc5f6ac..dbcc9352a48f 100644
--- a/net/ipv4/tcp_bbr.c
+++ b/net/ipv4/tcp_bbr.c
@@ -91,7 +91,7 @@ struct bbr {
struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
u32 rtt_cnt; /* count of packet-timed rounds elapsed */
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
- struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */
+ u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */
@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
bool is_full_length =
- skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) >
+ tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
bbr->min_rtt_us;
u32 inflight, bw;
@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk);
- bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies;
+ bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
bbr->lt_last_delivered = tp->delivered;
bbr->lt_last_lost = tp->lost;
bbr->lt_rtt_cnt = 0;
@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
struct bbr *bbr = inet_csk_ca(sk);
u32 lost, delivered;
u64 bw;
- s32 t;
+ u32 t;
if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
return;
/* Find average delivery rate in this sampling interval. */
- t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp);
- if (t < 1)
- return; /* interval is less than one jiffy, so wait */
- t = jiffies_to_usecs(t);
- /* Interval long enough for jiffies_to_usecs() to return a bogus 0? */
- if (t < 1) {
+ t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
+ if ((s32)t < 1)
+ return; /* interval is less than one ms, so wait */
+ /* Check if can multiply without overflow */
+ if (t >= ~0U / USEC_PER_MSEC) {
bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
return;
}
+ t *= USEC_PER_MSEC;
bw = (u64)delivered * BW_UNIT;
do_div(bw, t);
bbr_lt_bw_interval_done(sk, bw);
@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk)
bbr->idle_restart = 0;
bbr->full_bw = 0;
bbr->full_bw_cnt = 0;
- bbr->cycle_mstamp.v64 = 0;
+ bbr->cycle_mstamp = 0;
bbr->cycle_idx = 0;
bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 10e6775464f6..9a5a9e8eda89 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -441,7 +441,7 @@ void tcp_init_buffer_space(struct sock *sk)
tcp_sndbuf_expand(sk);
tp->rcvq_space.space = tp->rcv_wnd;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rcvq_space.time = tp->tcp_mstamp;
tp->rcvq_space.seq = tp->copied_seq;
@@ -555,11 +555,11 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
{
u32 delta_us;
- if (tp->rcv_rtt_est.time.v64 == 0)
+ if (tp->rcv_rtt_est.time == 0)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
- delta_us = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcv_rtt_est.time);
+ delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time);
tcp_rcv_rtt_update(tp, delta_us, 1);
new_measure:
@@ -571,13 +571,15 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
+
if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq -
- TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
- tcp_rcv_rtt_update(tp,
- jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr),
- 0);
+ TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ tcp_rcv_rtt_update(tp, delta_us, 0);
+ }
}
/*
@@ -590,7 +592,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
int time;
int copied;
- time = skb_mstamp_us_delta(&tp->tcp_mstamp, &tp->rcvq_space.time);
+ time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time);
if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0)
return;
@@ -1134,8 +1136,8 @@ struct tcp_sacktag_state {
* that was SACKed. RTO needs the earliest RTT to stay conservative,
* but congestion control should still get an accurate delay signal.
*/
- struct skb_mstamp first_sackt;
- struct skb_mstamp last_sackt;
+ u64 first_sackt;
+ u64 last_sackt;
struct rate_sample *rate;
int flag;
};
@@ -1200,7 +1202,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
int dup_sack, int pcount,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1242,9 +1244,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
- if (state->first_sackt.v64 == 0)
- state->first_sackt = *xmit_time;
- state->last_sackt = *xmit_time;
+ if (state->first_sackt == 0)
+ state->first_sackt = xmit_time;
+ state->last_sackt = xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1304,7 +1306,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (skb == tp->lost_skb_hint)
@@ -1356,8 +1358,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_advance_highest_sack(sk, skb);
tcp_skb_collapse_tstamp(prev, skb);
- if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp.v64))
- TCP_SKB_CB(prev)->tx.delivered_mstamp.v64 = 0;
+ if (unlikely(TCP_SKB_CB(prev)->tx.delivered_mstamp))
+ TCP_SKB_CB(prev)->tx.delivered_mstamp = 0;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
@@ -1587,7 +1589,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
- &skb->skb_mstamp);
+ skb->skb_mstamp);
tcp_rate_skb_delivered(sk, skb, state->rate);
if (!before(TCP_SKB_CB(skb)->seq,
@@ -2936,9 +2938,12 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
- flag & FLAG_ACKED)
- seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp -
- tp->rx_opt.rcv_tsecr);
+ flag & FLAG_ACKED) {
+ u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr;
+ u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ);
+
+ seq_rtt_us = ca_rtt_us = delta_us;
+ }
if (seq_rtt_us < 0)
return false;
@@ -2960,12 +2965,8 @@ void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
long rtt_us = -1L;
- if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) {
- struct skb_mstamp now;
-
- skb_mstamp_get(&now);
- rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack);
- }
+ if (req && !req->num_retrans && tcp_rsk(req)->snt_synack)
+ rtt_us = tcp_stamp_us_delta(tcp_clock_us(), tcp_rsk(req)->snt_synack);
tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us);
}
@@ -3003,7 +3004,7 @@ void tcp_rearm_rto(struct sock *sk)
struct sk_buff *skb = tcp_write_queue_head(sk);
const u32 rto_time_stamp =
tcp_skb_timestamp(skb) + rto;
- s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
+ s32 delta = (s32)(rto_time_stamp - tcp_jiffies32);
/* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
@@ -3060,9 +3061,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
struct tcp_sacktag_state *sack)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct skb_mstamp first_ackt, last_ackt;
+ u64 first_ackt, last_ackt;
struct tcp_sock *tp = tcp_sk(sk);
- struct skb_mstamp *now = &tp->tcp_mstamp;
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
bool fully_acked = true;
@@ -3075,7 +3075,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
bool rtt_update;
int flag = 0;
- first_ackt.v64 = 0;
+ first_ackt = 0;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3106,8 +3106,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
flag |= FLAG_RETRANS_DATA_ACKED;
} else if (!(sacked & TCPCB_SACKED_ACKED)) {
last_ackt = skb->skb_mstamp;
- WARN_ON_ONCE(last_ackt.v64 == 0);
- if (!first_ackt.v64)
+ WARN_ON_ONCE(last_ackt == 0);
+ if (!first_ackt)
first_ackt = last_ackt;
last_in_flight = TCP_SKB_CB(skb)->tx.in_flight;
@@ -3122,7 +3122,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb))
tcp_rack_advance(tp, sacked, scb->end_seq,
- &skb->skb_mstamp);
+ skb->skb_mstamp);
}
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3165,13 +3165,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
- seq_rtt_us = skb_mstamp_us_delta(now, &first_ackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &last_ackt);
+ if (likely(first_ackt) && !(flag & FLAG_RETRANS_DATA_ACKED)) {
+ seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt);
}
- if (sack->first_sackt.v64) {
- sack_rtt_us = skb_mstamp_us_delta(now, &sack->first_sackt);
- ca_rtt_us = skb_mstamp_us_delta(now, &sack->last_sackt);
+ if (sack->first_sackt) {
+ sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt);
+ ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt);
}
sack->rate->rtt_us = ca_rtt_us; /* RTT of last (S)ACKed packet, or -1 */
rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us,
@@ -3201,7 +3201,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
} else if (skb && rtt_update && sack_rtt_us >= 0 &&
- sack_rtt_us > skb_mstamp_us_delta(now, &skb->skb_mstamp)) {
+ sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3553,7 +3553,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
- sack_state.first_sackt.v64 = 0;
+ sack_state.first_sackt = 0;
sack_state.rate = &rs;
/* We very likely will need to access write queue head. */
@@ -5356,7 +5356,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
{
struct tcp_sock *tp = tcp_sk(sk);
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
if (unlikely(!sk->sk_rx_dst))
inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb);
/*
@@ -5672,7 +5672,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
- tcp_time_stamp)) {
+ tcp_time_stamp(tp))) {
NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
@@ -5917,7 +5917,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_SYN_SENT:
tp->rx_opt.saw_tstamp = 0;
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
queued = tcp_rcv_synsent_state_process(sk, skb, th);
if (queued >= 0)
return queued;
@@ -5929,7 +5929,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
return 0;
}
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
tp->rx_opt.saw_tstamp = 0;
req = tp->fastopen_rsk;
if (req) {
@@ -6202,7 +6202,7 @@ static void tcp_openreq_init(struct request_sock *req,
req->cookie_ts = 0;
tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
- skb_mstamp_get(&tcp_rsk(req)->snt_synack);
+ tcp_rsk(req)->snt_synack = tcp_clock_us();
tcp_rsk(req)->last_oow_ack_time = 0;
req->mss = rx_opt->mss_clamp;
req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d8fe25db79f2..191b2f78b19d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
struct sock *sk;
struct sk_buff *skb;
struct request_sock *fastopen;
- __u32 seq, snd_una;
- __u32 remaining;
+ u32 seq, snd_una;
+ s32 remaining;
+ u32 delta_us;
int err;
struct net *net = dev_net(icmp_skb->dev);
@@ -483,12 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
+ delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto -
- min(icsk->icsk_rto,
- tcp_time_stamp - tcp_skb_timestamp(skb));
+ usecs_to_jiffies(delta_us);
- if (remaining) {
+ if (remaining > 0) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
} else {
@@ -812,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
@@ -840,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index ef3122abb373..ae10ed64fe13 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -37,7 +37,7 @@
#include <net/tcp.h>
/* resolution of owd */
-#define LP_RESOL 1000
+#define LP_RESOL TCP_TS_HZ
/**
* enum tcp_lp_state
@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out;
- m = HZ * (tp->rx_opt.rcv_tsval -
- lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr -
- lp->local_ref_time);
+ m = TCP_TS_HZ *
+ (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
+ (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0)
m = -m;
@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
if (lp->flag & LP_VALID_RHZ) {
owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
- tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ);
+ tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0)
owd = -owd;
}
@@ -264,7 +264,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
- u32 now = tcp_time_stamp;
+ u32 now = tcp_time_stamp(tp);
u32 delta;
if (sample->rtt_us > 0)
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 6504f1082bdf..d0642df73044 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0;
- newtp->lsndtime = treq->snt_synack.stamp_jiffies;
+ newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans;
@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
- newtp->rack.mstamp.v64 = 0;
+ newtp->rack.mstamp = 0;
newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 65472e931a0b..478f75baee31 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1962,7 +1962,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
head = tcp_write_queue_head(sk);
- age = skb_mstamp_us_delta(&tp->tcp_mstamp, &head->skb_mstamp);
+ age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
/* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4))
goto send_now;
@@ -2279,7 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
max_segs = tcp_tso_segs(sk, mss_now);
- skb_mstamp_get(&tp->tcp_mstamp);
+ tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) {
unsigned int limit;
@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST);
- skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp);
+ tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
- skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
+ skb->skb_mstamp = cookie_init_timestamp(req);
else
#endif
- skb_mstamp_get(&skb->skb_mstamp);
+ skb->skb_mstamp = tcp_clock_us();
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
@@ -3453,8 +3453,8 @@ int tcp_connect(struct sock *sk)
return -ENOBUFS;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
- skb_mstamp_get(&tp->tcp_mstamp);
- tp->retrans_stamp = tp->tcp_mstamp.stamp_jiffies;
+ tcp_mstamp_refresh(tp);
+ tp->retrans_stamp = tcp_time_stamp(tp);
tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff);
@@ -3615,7 +3615,7 @@ void tcp_send_window_probe(struct sock *sk)
{
if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
- skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp);
+ tcp_mstamp_refresh(tcp_sk(sk));
tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
}
}
diff --git a/net/ipv4/tcp_rate.c b/net/ipv4/tcp_rate.c
index c6a9fa894646..ad99569d4c1e 100644
--- a/net/ipv4/tcp_rate.c
+++ b/net/ipv4/tcp_rate.c
@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- if (!scb->tx.delivered_mstamp.v64)
+ if (!scb->tx.delivered_mstamp)
return;
if (!rs->prior_delivered ||
@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Find the duration of the "send phase" of this window: */
- rs->interval_us = skb_mstamp_us_delta(
- &skb->skb_mstamp,
- &scb->tx.first_tx_mstamp);
+ rs->interval_us = tcp_stamp_us_delta(
+ skb->skb_mstamp,
+ scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp;
@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
* we don't need to reset since it'll be freed soon.
*/
if (scb->sacked & TCPCB_SACKED_ACKED)
- scb->tx.delivered_mstamp.v64 = 0;
+ scb->tx.delivered_mstamp = 0;
}
/* Update the connection delivery information and generate a rate sample. */
@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available. */
- if (!rs->prior_mstamp.v64) {
+ if (!rs->prior_mstamp) {
rs->delivered = -1;
rs->interval_us = -1;
return;
@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
* longer phase.
*/
snd_us = rs->interval_us; /* send phase */
- ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &rs->prior_mstamp); /* ack phase */
+ ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
+ rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us);
/* Normally we expect interval_us >= min-rtt.
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index cd72b3d3879e..fe9a493d0208 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
}
}
-static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
- const struct skb_mstamp *t2,
- u32 seq1, u32 seq2)
+static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
- return skb_mstamp_after(t1, t2) ||
- (t1->v64 == t2->v64 && after(seq1, seq2));
+ return t1 > t2 || (t1 == t2 && after(seq1, seq2));
}
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
scb->sacked & TCPCB_SACKED_ACKED)
continue;
- if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
+ if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
tp->rack.end_seq, scb->end_seq)) {
/* Step 3 in draft-cheng-tcpm-rack-00.txt:
* A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window.
*/
- u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp,
- &skb->skb_mstamp);
+ u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
+ skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) {
@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk)
* draft-cheng-tcpm-rack-00.txt
*/
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time)
+ u64 xmit_time)
{
u32 rtt_us;
- if (tp->rack.mstamp.v64 &&
- !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
+ if (tp->rack.mstamp &&
+ !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
end_seq, tp->rack.end_seq))
return;
- rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time);
+ rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
if (sacked & TCPCB_RETRANS) {
/* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior
@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
return;
}
tp->rack.rtt_us = rtt_us;
- tp->rack.mstamp = *xmit_time;
+ tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq;
tp->rack.advanced = 1;
}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 6629f47aa7f0..27a667bce806 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int timeout,
bool syn_set)
{
- unsigned int linear_backoff_thresh, start_ts;
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
+ unsigned int linear_backoff_thresh, start_ts;
if (!inet_csk(sk)->icsk_retransmits)
return false;
@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk,
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX;
}
- return (tcp_time_stamp - start_ts) >= timeout;
+ return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
}
/* A write timeout has occurred. Process the after effects. */
@@ -341,7 +341,7 @@ static void tcp_probe_timer(struct sock *sk)
if (!start_ts)
tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout &&
- (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
+ (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
@@ -561,7 +561,7 @@ void tcp_write_timer_handler(struct sock *sk)
goto out;
}
- skb_mstamp_get(&tcp_sk(sk)->tcp_mstamp);
+ tcp_mstamp_refresh(tcp_sk(sk));
event = icsk->icsk_pending;
switch (event) {
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 5abc3692b901..971823359f5b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
- treq->snt_synack.v64 = 0;
+ treq->snt_synack = 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
treq->ts_off = 0;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 4f4310a36a04..233edfabe1db 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
- tcp_time_stamp + tcptw->tw_ts_offset,
+ tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
- tcp_time_stamp + tcp_rsk(req)->ts_off,
+ tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index a504e87c6ddf..49bd8bb16b18 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
struct synproxy_options *opts)
{
opts->tsecr = opts->tsval;
- opts->tsval = tcp_time_stamp & ~0x3f;
+ opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale;