aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Dunwoodie <ncon@noconroy.net>2021-04-21 11:48:58 +1000
committerMatt Dunwoodie <ncon@noconroy.net>2021-04-21 11:48:58 +1000
commit23dc8e4926e9af068b9e361e8390eda4c1d3c2f7 (patch)
treec8c7f435b82ab7d6ee7727a6a48d917f53cfbaa0
parentglobal: update timer-type comments (diff)
downloadwireguard-freebsd-23dc8e4926e9af068b9e361e8390eda4c1d3c2f7.tar.xz
wireguard-freebsd-23dc8e4926e9af068b9e361e8390eda4c1d3c2f7.zip
wg_cookie: ensure gc is called regularly
Previously we relied on gc being called when adding a new entry, which could leave us in a gc "blind spot". With this change, we schedule a callout to run gc whenever we have entries in the table. The callout will continue to run every ELEMENT_TIMEOUT seconds until the table is empty. Access to rl_gc is locked by rl_lock, so we will never have any threads racing to callout_{pending,stop,reset}. The alternative (which Linux does currently) is just to run the callout every ELEMENT_TIMEOUT (1) second even when no entries are in the table. However, the callout solution proposed here seems simple enough. Signed-off-by: Matt Dunwoodie <ncon@noconroy.net>
-rw-r--r--src/wg_cookie.c71
-rw-r--r--src/wg_cookie.h2
2 files changed, 44 insertions, 29 deletions
diff --git a/src/wg_cookie.c b/src/wg_cookie.c
index 0a9d988..dc65012 100644
--- a/src/wg_cookie.c
+++ b/src/wg_cookie.c
@@ -26,7 +26,9 @@ static void cookie_checker_make_cookie(struct cookie_checker *,
uint8_t[COOKIE_COOKIE_SIZE], struct sockaddr *);
static int ratelimit_init(struct ratelimit *, uma_zone_t);
static void ratelimit_deinit(struct ratelimit *);
-static void ratelimit_gc(struct ratelimit *, int);
+static void ratelimit_gc_callout(void *);
+static void ratelimit_gc_schedule(struct ratelimit *);
+static void ratelimit_gc(struct ratelimit *, bool);
static int ratelimit_allow(struct ratelimit *, struct sockaddr *);
static uint64_t siphash13(const uint8_t [SIPHASH_KEY_LENGTH], const void *, size_t);
@@ -273,6 +275,7 @@ static int
ratelimit_init(struct ratelimit *rl, uma_zone_t zone)
{
rw_init(&rl->rl_lock, "ratelimit_lock");
+ callout_init_rw(&rl->rl_gc, &rl->rl_lock, 0);
arc4random_buf(rl->rl_secret, sizeof(rl->rl_secret));
rl->rl_table = hashinit_flags(RATELIMIT_SIZE, M_DEVBUF,
&rl->rl_table_mask, M_NOWAIT);
@@ -285,47 +288,58 @@ static void
ratelimit_deinit(struct ratelimit *rl)
{
rw_wlock(&rl->rl_lock);
- ratelimit_gc(rl, 1);
+ callout_stop(&rl->rl_gc);
+ ratelimit_gc(rl, true);
hashdestroy(rl->rl_table, M_DEVBUF, rl->rl_table_mask);
rw_wunlock(&rl->rl_lock);
}
static void
-ratelimit_gc(struct ratelimit *rl, int force)
+ratelimit_gc_callout(void *_rl)
+{
+ /* callout will wlock rl_lock for us */
+ ratelimit_gc(_rl, false);
+}
+
+static void
+ratelimit_gc_schedule(struct ratelimit *rl)
+{
+ /* Trigger another GC if needed. There is no point calling GC if there
+ * are no entries in the table. We also want to ensure that GC occurs
+ * on a regular interval, so don't override a currently pending GC.
+ *
+ * In the case of a forced ratelimit_gc, there will be no entries left
+ * so we will will not schedule another GC. */
+ if (rl->rl_table_num > 0 && !callout_pending(&rl->rl_gc))
+ callout_reset(&rl->rl_gc, ELEMENT_TIMEOUT * hz,
+ ratelimit_gc_callout, rl);
+}
+
+static void
+ratelimit_gc(struct ratelimit *rl, bool force)
{
size_t i;
struct ratelimit_entry *r, *tr;
- sbintime_t expiry, now;
+ sbintime_t expiry;
rw_assert(&rl->rl_lock, RA_WLOCKED);
- if (force) {
- for (i = 0; i < RATELIMIT_SIZE; i++) {
- LIST_FOREACH_SAFE(r, &rl->rl_table[i], r_entry, tr) {
+ if (rl->rl_table_num == 0)
+ return;
+
+ expiry = getsbinuptime() - ELEMENT_TIMEOUT * SBT_1S;
+
+ for (i = 0; i < RATELIMIT_SIZE; i++) {
+ LIST_FOREACH_SAFE(r, &rl->rl_table[i], r_entry, tr) {
+ if (r->r_last_time < expiry || force) {
rl->rl_table_num--;
LIST_REMOVE(r, r_entry);
uma_zfree(rl->rl_zone, r);
}
}
- return;
}
- if ((cookie_timer_expired(rl->rl_last_gc, ELEMENT_TIMEOUT, 0) &&
- rl->rl_table_num > 0)) {
- now = getsbinuptime();
- expiry = now - ELEMENT_TIMEOUT * SBT_1S;
- rl->rl_last_gc = now;
-
- for (i = 0; i < RATELIMIT_SIZE; i++) {
- LIST_FOREACH_SAFE(r, &rl->rl_table[i], r_entry, tr) {
- if (r->r_last_time < expiry) {
- rl->rl_table_num--;
- LIST_REMOVE(r, r_entry);
- uma_zfree(rl->rl_zone, r);
- }
- }
- }
- }
+ ratelimit_gc_schedule(rl);
}
static int
@@ -388,10 +402,8 @@ ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa)
}
}
- /* If we get to here, we didn't have an entry for the endpoint. */
- ratelimit_gc(rl, 0);
-
- /* Hard limit on number of entries */
+ /* If we get to here, we didn't have an entry for the endpoint, let's
+ * add one if we have space. */
if (rl->rl_table_num >= RATELIMIT_SIZE_MAX)
goto error;
@@ -413,6 +425,9 @@ ratelimit_allow(struct ratelimit *rl, struct sockaddr *sa)
r->r_last_time = getsbinuptime();
r->r_tokens = TOKEN_MAX - INITIATION_COST;
+
+ /* If we've added a new entry, let's trigger GC. */
+ ratelimit_gc_schedule(rl);
ok:
ret = 0;
error:
diff --git a/src/wg_cookie.h b/src/wg_cookie.h
index 42271ca..690c650 100644
--- a/src/wg_cookie.h
+++ b/src/wg_cookie.h
@@ -62,10 +62,10 @@ struct ratelimit {
uma_zone_t rl_zone;
struct rwlock rl_lock;
+ struct callout rl_gc;
LIST_HEAD(, ratelimit_entry) *rl_table;
u_long rl_table_mask;
size_t rl_table_num;
- sbintime_t rl_last_gc; /* sbinuptime */
};
struct cookie_maker {