aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPablo Neira Ayuso <pablo@netfilter.org>2019-08-09 11:01:35 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2019-08-09 14:41:21 +0200
commit1e5b2471bcc4838df298080ae1ec042c2cbc9ce9 (patch)
treeafb60cb11b763df36f19564e1d85e93ff7f3ee31 /net
parentnetfilter: nf_flow_table: conntrack picks up expired flows (diff)
downloadlinux-dev-1e5b2471bcc4838df298080ae1ec042c2cbc9ce9.tar.xz
linux-dev-1e5b2471bcc4838df298080ae1ec042c2cbc9ce9.zip
netfilter: nf_flow_table: teardown flow timeout race
Flows that are in teardown state (due to RST / FIN TCP packet) still have their offload flag set on. Hence, the conntrack garbage collector may race to undo the timeout adjustment that the fixup routine performs, leaving the conntrack entry in place with the internal offload timeout (one day). Update teardown flow state to ESTABLISHED and set tracking to liberal, then once the offload bit is cleared, adjust timeout if it is more than the default fixup timeout (conntrack might already have set a lower timeout from the packet path). Fixes: da5984e51063 ("netfilter: nf_flow_table: add support for sending flows back to the slow path") Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_flow_table_core.c34
1 files changed, 25 insertions, 9 deletions
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 68a24471ffee..80a8f9ae4c93 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -111,15 +111,16 @@ static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
#define NF_FLOWTABLE_TCP_PICKUP_TIMEOUT (120 * HZ)
#define NF_FLOWTABLE_UDP_PICKUP_TIMEOUT (30 * HZ)
-static void flow_offload_fixup_ct(struct nf_conn *ct)
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - (u32)jiffies);
+}
+
+static void flow_offload_fixup_ct_timeout(struct nf_conn *ct)
{
const struct nf_conntrack_l4proto *l4proto;
+ int l4num = nf_ct_protonum(ct);
unsigned int timeout;
- int l4num;
-
- l4num = nf_ct_protonum(ct);
- if (l4num == IPPROTO_TCP)
- flow_offload_fixup_tcp(&ct->proto.tcp);
l4proto = nf_ct_l4proto_find(l4num);
if (!l4proto)
@@ -132,7 +133,20 @@ static void flow_offload_fixup_ct(struct nf_conn *ct)
else
return;
- ct->timeout = nfct_time_stamp + timeout;
+ if (nf_flow_timeout_delta(ct->timeout) > (__s32)timeout)
+ ct->timeout = nfct_time_stamp + timeout;
+}
+
+static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+{
+ if (nf_ct_protonum(ct) == IPPROTO_TCP)
+ flow_offload_fixup_tcp(&ct->proto.tcp);
+}
+
+static void flow_offload_fixup_ct(struct nf_conn *ct)
+{
+ flow_offload_fixup_ct_state(ct);
+ flow_offload_fixup_ct_timeout(ct);
}
void flow_offload_free(struct flow_offload *flow)
@@ -210,7 +224,7 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
static inline bool nf_flow_has_expired(const struct flow_offload *flow)
{
- return (__s32)(flow->timeout - (u32)jiffies) <= 0;
+ return nf_flow_timeout_delta(flow->timeout) <= 0;
}
static void flow_offload_del(struct nf_flowtable *flow_table,
@@ -230,6 +244,8 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
if (nf_flow_has_expired(flow))
flow_offload_fixup_ct(e->ct);
+ else if (flow->flags & FLOW_OFFLOAD_TEARDOWN)
+ flow_offload_fixup_ct_timeout(e->ct);
flow_offload_free(flow);
}
@@ -241,7 +257,7 @@ void flow_offload_teardown(struct flow_offload *flow)
flow->flags |= FLOW_OFFLOAD_TEARDOWN;
e = container_of(flow, struct flow_offload_entry, flow);
- flow_offload_fixup_ct(e->ct);
+ flow_offload_fixup_ct_state(e->ct);
}
EXPORT_SYMBOL_GPL(flow_offload_teardown);