aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-09-28 10:28:44 -0700
committerDavid S. Miller <davem@davemloft.net>2018-10-01 23:18:51 -0700
commitfb420d5d91c1274d5966917725e71f27ed092a85 (patch)
treef4a5a44cf2c8c53e9af214bc686c5a25736bca5e /net/sched
parentselftests/tls: Fix recv(MSG_PEEK) & splice() test cases (diff)
downloadlinux-dev-fb420d5d91c1274d5966917725e71f27ed092a85.tar.xz
linux-dev-fb420d5d91c1274d5966917725e71f27ed092a85.zip
tcp/fq: move back to CLOCK_MONOTONIC
In the recent TCP/EDT patch series, I switched TCP and sch_fq clocks from MONOTONIC to TAI, in order to meet the choice done earlier for sch_etf packet scheduler. But sure enough, this broke some setups were the TAI clock jumps forward (by almost 50 year...), as reported by Leonard Crestez. If we want to converge later, we'll probably need to add an skb field to differentiate the clock bases, or a socket option. In the meantime, an UDP application will need to use CLOCK_MONOTONIC base for its SCM_TXTIME timestamps if using fq packet scheduler. Fixes: 72b0094f9182 ("tcp: switch tcp_clock_ns() to CLOCK_TAI base") Fixes: 142537e41923 ("net_sched: sch_fq: switch to CLOCK_TAI") Fixes: fd2bca2aa789 ("tcp: switch internal pacing timer to CLOCK_TAI") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Leonard Crestez <leonard.crestez@nxp.com> Tested-by: Leonard Crestez <leonard.crestez@nxp.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/sch_fq.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 628a2cdcfc6f..338222a6c664 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -412,7 +412,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
static struct sk_buff *fq_dequeue(struct Qdisc *sch)
{
struct fq_sched_data *q = qdisc_priv(sch);
- u64 now = ktime_get_tai_ns();
+ u64 now = ktime_get_ns();
struct fq_flow_head *head;
struct sk_buff *skb;
struct fq_flow *f;
@@ -776,7 +776,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
q->fq_trees_log = ilog2(1024);
q->orphan_mask = 1024 - 1;
q->low_rate_threshold = 550000 / 8;
- qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI);
+ qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
if (opt)
err = fq_change(sch, opt, extack);
@@ -831,7 +831,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
st.flows_plimit = q->stat_flows_plimit;
st.pkts_too_long = q->stat_pkts_too_long;
st.allocation_errors = q->stat_allocation_errors;
- st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_tai_ns();
+ st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
st.flows = q->flows;
st.inactive_flows = q->inactive_flows;
st.throttled_flows = q->throttled_flows;