aboutsummaryrefslogtreecommitdiffstats
path: root/device/pools_test.go
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--device/pools_test.go73
1 files changed, 62 insertions, 11 deletions
diff --git a/device/pools_test.go b/device/pools_test.go
index 17e2298..82d7493 100644
--- a/device/pools_test.go
+++ b/device/pools_test.go
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: MIT
*
- * Copyright (C) 2019-2021 WireGuard LLC. All Rights Reserved.
+ * Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
*/
package device
@@ -17,29 +17,31 @@ import (
func TestWaitPool(t *testing.T) {
t.Skip("Currently disabled")
var wg sync.WaitGroup
- trials := int32(100000)
+ var trials atomic.Int32
+ startTrials := int32(100000)
if raceEnabled {
// This test can be very slow with -race.
- trials /= 10
+ startTrials /= 10
}
+ trials.Store(startTrials)
workers := runtime.NumCPU() + 2
if workers-4 <= 0 {
t.Skip("Not enough cores")
}
p := NewWaitPool(uint32(workers-4), func() any { return make([]byte, 16) })
wg.Add(workers)
- max := uint32(0)
+ var max atomic.Uint32
updateMax := func() {
- count := atomic.LoadUint32(&p.count)
+ count := p.count.Load()
if count > p.max {
t.Errorf("count (%d) > max (%d)", count, p.max)
}
for {
- old := atomic.LoadUint32(&max)
+ old := max.Load()
if count <= old {
break
}
- if atomic.CompareAndSwapUint32(&max, old, count) {
+ if max.CompareAndSwap(old, count) {
break
}
}
@@ -47,7 +49,7 @@ func TestWaitPool(t *testing.T) {
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
- for atomic.AddInt32(&trials, -1) > 0 {
+ for trials.Add(-1) > 0 {
updateMax()
x := p.Get()
updateMax()
@@ -59,14 +61,15 @@ func TestWaitPool(t *testing.T) {
}()
}
wg.Wait()
- if max != p.max {
+ if max.Load() != p.max {
t.Errorf("Actual maximum count (%d) != ideal maximum count (%d)", max, p.max)
}
}
func BenchmarkWaitPool(b *testing.B) {
var wg sync.WaitGroup
- trials := int32(b.N)
+ var trials atomic.Int32
+ trials.Store(int32(b.N))
workers := runtime.NumCPU() + 2
if workers-4 <= 0 {
b.Skip("Not enough cores")
@@ -77,7 +80,55 @@ func BenchmarkWaitPool(b *testing.B) {
for i := 0; i < workers; i++ {
go func() {
defer wg.Done()
- for atomic.AddInt32(&trials, -1) > 0 {
+ for trials.Add(-1) > 0 {
+ x := p.Get()
+ time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
+ p.Put(x)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkWaitPoolEmpty(b *testing.B) {
+ var wg sync.WaitGroup
+ var trials atomic.Int32
+ trials.Store(int32(b.N))
+ workers := runtime.NumCPU() + 2
+ if workers-4 <= 0 {
+ b.Skip("Not enough cores")
+ }
+ p := NewWaitPool(0, func() any { return make([]byte, 16) })
+ wg.Add(workers)
+ b.ResetTimer()
+ for i := 0; i < workers; i++ {
+ go func() {
+ defer wg.Done()
+ for trials.Add(-1) > 0 {
+ x := p.Get()
+ time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
+ p.Put(x)
+ }
+ }()
+ }
+ wg.Wait()
+}
+
+func BenchmarkSyncPool(b *testing.B) {
+ var wg sync.WaitGroup
+ var trials atomic.Int32
+ trials.Store(int32(b.N))
+ workers := runtime.NumCPU() + 2
+ if workers-4 <= 0 {
+ b.Skip("Not enough cores")
+ }
+ p := sync.Pool{New: func() any { return make([]byte, 16) }}
+ wg.Add(workers)
+ b.ResetTimer()
+ for i := 0; i < workers; i++ {
+ go func() {
+ defer wg.Done()
+ for trials.Add(-1) > 0 {
x := p.Get()
time.Sleep(time.Duration(rand.Intn(100)) * time.Microsecond)
p.Put(x)