summaryrefslogtreecommitdiffstats
path: root/device
diff options
context:
space:
mode:
authorJason A. Donenfeld <Jason@zx2c4.com>2021-01-29 18:54:19 +0100
committerJason A. Donenfeld <Jason@zx2c4.com>2021-01-29 18:57:03 +0100
commitde51129e33a5fe4fad3da172539e9be640d39211 (patch)
treeb444289dfd35d941c4d11441f336401b9ce81a61 /device
parentdevice: use new model queues for handshakes (diff)
downloadwireguard-go-de51129e33a5fe4fad3da172539e9be640d39211.tar.xz
wireguard-go-de51129e33a5fe4fad3da172539e9be640d39211.zip
device: use int64 instead of atomic.Value for time stamp
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Diffstat (limited to 'device')
-rw-r--r--device/alignment_test.go (renamed from device/peer_test.go)24
-rw-r--r--device/device.go16
2 files changed, 27 insertions, 13 deletions
diff --git a/device/peer_test.go b/device/alignment_test.go
index 0020423..5587cbe 100644
--- a/device/peer_test.go
+++ b/device/alignment_test.go
@@ -41,3 +41,27 @@ func TestPeerAlignment(t *testing.T) {
checkAlignment(t, "Peer.stats", unsafe.Offsetof(p.stats))
checkAlignment(t, "Peer.isRunning", unsafe.Offsetof(p.isRunning))
}
+
+
+// TestDeviceAlignment checks that atomically-accessed fields are
+// aligned to 64-bit boundaries, as required by the atomic package.
+//
+// Unfortunately, violating this rule on 32-bit platforms results in a
+// hard segfault at runtime.
+func TestDeviceAlignment(t *testing.T) {
+ var d Device
+
+ typ := reflect.TypeOf(&d).Elem()
+ t.Logf("Device type size: %d, with fields:", typ.Size())
+ for i := 0; i < typ.NumField(); i++ {
+ field := typ.Field(i)
+ t.Logf("\t%30s\toffset=%3v\t(type size=%3d, align=%d)",
+ field.Name,
+ field.Offset,
+ field.Type.Size(),
+ field.Type.Align(),
+ )
+ }
+
+ checkAlignment(t, "Device.rate.underLoadUntil", unsafe.Offsetof(d.rate.underLoadUntil))
+}
diff --git a/device/device.go b/device/device.go
index fd88855..bac361e 100644
--- a/device/device.go
+++ b/device/device.go
@@ -62,7 +62,7 @@ type Device struct {
cookieChecker CookieChecker
rate struct {
- underLoadUntil atomic.Value
+ underLoadUntil int64
limiter ratelimiter.Ratelimiter
}
@@ -245,20 +245,15 @@ func (device *Device) Down() {
}
func (device *Device) IsUnderLoad() bool {
-
// check if currently under load
-
now := time.Now()
underLoad := len(device.queue.handshake.c) >= UnderLoadQueueSize
if underLoad {
- device.rate.underLoadUntil.Store(now.Add(UnderLoadAfterTime))
+ atomic.StoreInt64(&device.rate.underLoadUntil, now.Add(UnderLoadAfterTime).UnixNano())
return true
}
-
// check if recently under load
-
- until := device.rate.underLoadUntil.Load().(time.Time)
- return until.After(now)
+ return atomic.LoadInt64(&device.rate.underLoadUntil) > now.UnixNano()
}
func (device *Device) SetPrivateKey(sk NoisePrivateKey) error {
@@ -327,14 +322,9 @@ func NewDevice(tunDevice tun.Device, logger *Logger) *Device {
mtu = DefaultMTU
}
device.tun.mtu = int32(mtu)
-
device.peers.keyMap = make(map[NoisePublicKey]*Peer)
-
device.rate.limiter.Init()
- device.rate.underLoadUntil.Store(time.Time{})
-
device.indexTable.Init()
-
device.PopulatePools()
// create queues