aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2016-08-25 15:33:34 +0200
committerPablo Neira Ayuso <pablo@netfilter.org>2016-08-30 11:43:09 +0200
commitc023c0e4a0eb7f0dfebe29b41801bf7373bf4f1d (patch)
tree305690a520a7cc64387314baa74eca78a0b323ab /net
parentnetfilter: conntrack: add gc worker to remove timed-out entries (diff)
downloadlinux-dev-c023c0e4a0eb7f0dfebe29b41801bf7373bf4f1d.tar.xz
linux-dev-c023c0e4a0eb7f0dfebe29b41801bf7373bf4f1d.zip
netfilter: conntrack: resched gc again if eviction rate is high
If we evicted a large fraction of the scanned conntrack entries re-schedule the next gc cycle for immediate execution. This triggers during tests where load is high, then drops to zero and many connections will be in TW/CLOSE state with < 30 second timeouts. Without this change it will take several minutes until conntrack count comes back to normal. Signed-off-by: Florian Westphal <fw@strlen.de> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nf_conntrack_core.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index f95a9e999f09..7c66ce401ce9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -945,6 +945,7 @@ static void gc_worker(struct work_struct *work)
{
unsigned int i, goal, buckets = 0, expired_count = 0;
unsigned long next_run = GC_INTERVAL;
+ unsigned int ratio, scanned = 0;
struct conntrack_gc_work *gc_work;
gc_work = container_of(work, struct conntrack_gc_work, dwork.work);
@@ -969,6 +970,7 @@ static void gc_worker(struct work_struct *work)
hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[i], hnnode) {
tmp = nf_ct_tuplehash_to_ctrack(h);
+ scanned++;
if (nf_ct_is_expired(tmp)) {
nf_ct_gc_expired(tmp);
expired_count++;
@@ -988,6 +990,10 @@ static void gc_worker(struct work_struct *work)
if (gc_work->exiting)
return;
+ ratio = scanned ? expired_count * 100 / scanned : 0;
+ if (ratio >= 90)
+ next_run = 0;
+
gc_work->last_bucket = i;
schedule_delayed_work(&gc_work->dwork, next_run);
}