summaryrefslogtreecommitdiffstats
path: root/sys/net
diff options
context:
space:
mode:
authormcbride <mcbride@openbsd.org>2006-05-28 02:04:15 +0000
committermcbride <mcbride@openbsd.org>2006-05-28 02:04:15 +0000
commit79a6b5c50f727f266168f5d54eeebcce5598f1aa (patch)
treef67918cc76f5aee4095a66fd0679c2026eee7e2d /sys/net
parentadd ERANGE error detection, found when looking at bgpd's parse.y (diff)
downloadwireguard-openbsd-79a6b5c50f727f266168f5d54eeebcce5598f1aa.tar.xz
wireguard-openbsd-79a6b5c50f727f266168f5d54eeebcce5598f1aa.zip
Only preemptively increase the replay counter for outbound TDBs.
Another ipsec failover fix from nathanael at polymorpheus dot com. ok hshoexer@
Diffstat (limited to 'sys/net')
-rw-r--r--sys/net/if_pfsync.c43
-rw-r--r--sys/net/if_pfsync.h4
2 files changed, 24 insertions, 23 deletions
diff --git a/sys/net/if_pfsync.c b/sys/net/if_pfsync.c
index 7f907254a71..9f43fc4be85 100644
--- a/sys/net/if_pfsync.c
+++ b/sys/net/if_pfsync.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_pfsync.c,v 1.64 2006/05/13 05:23:45 mcbride Exp $ */
+/* $OpenBSD: if_pfsync.c,v 1.65 2006/05/28 02:04:15 mcbride Exp $ */
/*
* Copyright (c) 2002 Michael Shalayeff
@@ -1548,24 +1548,7 @@ pfsync_update_net_tdb(struct pfsync_tdb *pt)
s = spltdb();
tdb = gettdb(pt->spi, &pt->dst, pt->sproto);
if (tdb) {
- /*
- * When a failover happens, the master's rpl is probably above
- * what we see here (we may be up to a second late), so
- * increase it a bit to manage most such situations.
- *
- * For now, just add an offset that is likely to be larger
- * than the number of packets we can see in one second. The RFC
- * just says the next packet must have a higher seq value.
- *
- * XXX What is a good algorithm for this? We could use
- * a rate-determined increase, but to know it, we would have
- * to extend struct tdb.
- * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
- * will soon be replaced anyway. For now, just don't handle
- * this edge case.
- */
-#define RPL_INCR 16384
- pt->rpl = ntohl(pt->rpl) + RPL_INCR;
+ pt->rpl = ntohl(pt->rpl);
pt->cur_bytes = betoh64(pt->cur_bytes);
/* Neither replay nor byte counter should ever decrease. */
@@ -1591,7 +1574,7 @@ pfsync_update_net_tdb(struct pfsync_tdb *pt)
/* One of our local tdbs have been updated, need to sync rpl with others */
int
-pfsync_update_tdb(struct tdb *tdb)
+pfsync_update_tdb(struct tdb *tdb, int output)
{
struct ifnet *ifp = &pfsyncif.sc_if;
struct pfsync_softc *sc = ifp->if_softc;
@@ -1667,7 +1650,25 @@ pfsync_update_tdb(struct tdb *tdb)
pt->sproto = tdb->tdb_sproto;
}
- pt->rpl = htonl(tdb->tdb_rpl);
+ /*
+ * When a failover happens, the master's rpl is probably above
+ * what we see here (we may be up to a second late), so
+ * increase it a bit for outbound tdbs to manage most such
+ * situations.
+ *
+ * For now, just add an offset that is likely to be larger
+ * than the number of packets we can see in one second. The RFC
+ * just says the next packet must have a higher seq value.
+ *
+ * XXX What is a good algorithm for this? We could use
+ * a rate-determined increase, but to know it, we would have
+ * to extend struct tdb.
+ * XXX pt->rpl can wrap over MAXINT, but if so the real tdb
+ * will soon be replaced anyway. For now, just don't handle
+ * this edge case.
+ */
+#define RPL_INCR 16384
+ pt->rpl = htonl(tdb->tdb_rpl + (output ? RPL_INCR : 0));
pt->cur_bytes = htobe64(tdb->tdb_cur_bytes);
if (h->count == sc->sc_maxcount ||
diff --git a/sys/net/if_pfsync.h b/sys/net/if_pfsync.h
index 62902ddc1dc..46459655d75 100644
--- a/sys/net/if_pfsync.h
+++ b/sys/net/if_pfsync.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_pfsync.h,v 1.28 2005/11/04 08:24:14 mcbride Exp $ */
+/* $OpenBSD: if_pfsync.h,v 1.29 2006/05/28 02:04:15 mcbride Exp $ */
/*
* Copyright (c) 2001 Michael Shalayeff
@@ -330,7 +330,7 @@ int pfsync_pack_state(u_int8_t, struct pf_state *, int);
pfsync_pack_state(PFSYNC_ACT_DEL, (st), \
PFSYNC_FLAG_COMPRESS); \
} while (0)
-int pfsync_update_tdb(struct tdb *);
+int pfsync_update_tdb(struct tdb *, int);
#endif
#endif /* _NET_IF_PFSYNC_H_ */