...
1
18
19
20
21 package primitives_test
22
23 import (
24 "fmt"
25 "sync"
26 "sync/atomic"
27 "testing"
28 "time"
29 "unsafe"
30 )
31
32 func BenchmarkSelectClosed(b *testing.B) {
33 c := make(chan struct{})
34 close(c)
35 x := 0
36 b.ResetTimer()
37 for i := 0; i < b.N; i++ {
38 select {
39 case <-c:
40 x++
41 default:
42 }
43 }
44 b.StopTimer()
45 if x != b.N {
46 b.Fatal("error")
47 }
48 }
49
50 func BenchmarkSelectOpen(b *testing.B) {
51 c := make(chan struct{})
52 x := 0
53 b.ResetTimer()
54 for i := 0; i < b.N; i++ {
55 select {
56 case <-c:
57 default:
58 x++
59 }
60 }
61 b.StopTimer()
62 if x != b.N {
63 b.Fatal("error")
64 }
65 }
66
67 func BenchmarkAtomicBool(b *testing.B) {
68 c := int32(0)
69 x := 0
70 b.ResetTimer()
71 for i := 0; i < b.N; i++ {
72 if atomic.LoadInt32(&c) == 0 {
73 x++
74 }
75 }
76 b.StopTimer()
77 if x != b.N {
78 b.Fatal("error")
79 }
80 }
81
82 func BenchmarkAtomicValueLoad(b *testing.B) {
83 c := atomic.Value{}
84 c.Store(0)
85 x := 0
86 b.ResetTimer()
87 for i := 0; i < b.N; i++ {
88 if c.Load().(int) == 0 {
89 x++
90 }
91 }
92 b.StopTimer()
93 if x != b.N {
94 b.Fatal("error")
95 }
96 }
97
98 func BenchmarkAtomicValueStore(b *testing.B) {
99 c := atomic.Value{}
100 v := 123
101 b.ResetTimer()
102 for i := 0; i < b.N; i++ {
103 c.Store(v)
104 }
105 b.StopTimer()
106 }
107
108 func BenchmarkMutex(b *testing.B) {
109 c := sync.Mutex{}
110 x := 0
111 b.ResetTimer()
112 for i := 0; i < b.N; i++ {
113 c.Lock()
114 x++
115 c.Unlock()
116 }
117 b.StopTimer()
118 if x != b.N {
119 b.Fatal("error")
120 }
121 }
122
123 func BenchmarkRWMutex(b *testing.B) {
124 c := sync.RWMutex{}
125 x := 0
126 b.ResetTimer()
127 for i := 0; i < b.N; i++ {
128 c.RLock()
129 x++
130 c.RUnlock()
131 }
132 b.StopTimer()
133 if x != b.N {
134 b.Fatal("error")
135 }
136 }
137
138 func BenchmarkRWMutexW(b *testing.B) {
139 c := sync.RWMutex{}
140 x := 0
141 b.ResetTimer()
142 for i := 0; i < b.N; i++ {
143 c.Lock()
144 x++
145 c.Unlock()
146 }
147 b.StopTimer()
148 if x != b.N {
149 b.Fatal("error")
150 }
151 }
152
153 func BenchmarkMutexWithDefer(b *testing.B) {
154 c := sync.Mutex{}
155 x := 0
156 b.ResetTimer()
157 for i := 0; i < b.N; i++ {
158 func() {
159 c.Lock()
160 defer c.Unlock()
161 x++
162 }()
163 }
164 b.StopTimer()
165 if x != b.N {
166 b.Fatal("error")
167 }
168 }
169
170 func BenchmarkMutexWithClosureDefer(b *testing.B) {
171 c := sync.Mutex{}
172 x := 0
173 b.ResetTimer()
174 for i := 0; i < b.N; i++ {
175 func() {
176 c.Lock()
177 defer func() { c.Unlock() }()
178 x++
179 }()
180 }
181 b.StopTimer()
182 if x != b.N {
183 b.Fatal("error")
184 }
185 }
186
187 func BenchmarkMutexWithoutDefer(b *testing.B) {
188 c := sync.Mutex{}
189 x := 0
190 b.ResetTimer()
191 for i := 0; i < b.N; i++ {
192 func() {
193 c.Lock()
194 x++
195 c.Unlock()
196 }()
197 }
198 b.StopTimer()
199 if x != b.N {
200 b.Fatal("error")
201 }
202 }
203
204 func BenchmarkAtomicAddInt64(b *testing.B) {
205 var c int64
206 b.ResetTimer()
207 for i := 0; i < b.N; i++ {
208 atomic.AddInt64(&c, 1)
209 }
210 b.StopTimer()
211 if c != int64(b.N) {
212 b.Fatal("error")
213 }
214 }
215
216 func BenchmarkAtomicTimeValueStore(b *testing.B) {
217 var c atomic.Value
218 t := time.Now()
219 b.ResetTimer()
220 for i := 0; i < b.N; i++ {
221 c.Store(t)
222 }
223 b.StopTimer()
224 }
225
226 func BenchmarkAtomic16BValueStore(b *testing.B) {
227 var c atomic.Value
228 t := struct {
229 a int64
230 b int64
231 }{
232 123, 123,
233 }
234 b.ResetTimer()
235 for i := 0; i < b.N; i++ {
236 c.Store(t)
237 }
238 b.StopTimer()
239 }
240
241 func BenchmarkAtomic32BValueStore(b *testing.B) {
242 var c atomic.Value
243 t := struct {
244 a int64
245 b int64
246 c int64
247 d int64
248 }{
249 123, 123, 123, 123,
250 }
251 b.ResetTimer()
252 for i := 0; i < b.N; i++ {
253 c.Store(t)
254 }
255 b.StopTimer()
256 }
257
258 func BenchmarkAtomicPointerStore(b *testing.B) {
259 t := 123
260 var up unsafe.Pointer
261 b.ResetTimer()
262 for i := 0; i < b.N; i++ {
263 atomic.StorePointer(&up, unsafe.Pointer(&t))
264 }
265 b.StopTimer()
266 }
267
268 func BenchmarkAtomicTimePointerStore(b *testing.B) {
269 t := time.Now()
270 var up unsafe.Pointer
271 b.ResetTimer()
272 for i := 0; i < b.N; i++ {
273 atomic.StorePointer(&up, unsafe.Pointer(&t))
274 }
275 b.StopTimer()
276 }
277
278 func BenchmarkStoreContentionWithAtomic(b *testing.B) {
279 t := 123
280 var c unsafe.Pointer
281 b.RunParallel(func(pb *testing.PB) {
282 for pb.Next() {
283 atomic.StorePointer(&c, unsafe.Pointer(&t))
284 }
285 })
286 }
287
288 func BenchmarkStoreContentionWithMutex(b *testing.B) {
289 t := 123
290 var mu sync.Mutex
291 var c int
292
293 b.RunParallel(func(pb *testing.PB) {
294 for pb.Next() {
295 mu.Lock()
296 c = t
297 mu.Unlock()
298 }
299 })
300 _ = c
301 }
302
303 type dummyStruct struct {
304 a int64
305 b time.Time
306 }
307
308 func BenchmarkStructStoreContention(b *testing.B) {
309 d := dummyStruct{}
310 dp := unsafe.Pointer(&d)
311 t := time.Now()
312 for _, j := range []int{100000000, 10000, 0} {
313 for _, i := range []int{100000, 10} {
314 b.Run(fmt.Sprintf("CAS/%v/%v", j, i), func(b *testing.B) {
315 b.SetParallelism(i)
316 b.RunParallel(func(pb *testing.PB) {
317 n := &dummyStruct{
318 b: t,
319 }
320 for pb.Next() {
321 for y := 0; y < j; y++ {
322 }
323 for {
324 v := (*dummyStruct)(atomic.LoadPointer(&dp))
325 n.a = v.a + 1
326 if atomic.CompareAndSwapPointer(&dp, unsafe.Pointer(v), unsafe.Pointer(n)) {
327 n = v
328 break
329 }
330 }
331 }
332 })
333 })
334 }
335 }
336
337 var mu sync.Mutex
338 for _, j := range []int{100000000, 10000, 0} {
339 for _, i := range []int{100000, 10} {
340 b.Run(fmt.Sprintf("Mutex/%v/%v", j, i), func(b *testing.B) {
341 b.SetParallelism(i)
342 b.RunParallel(func(pb *testing.PB) {
343 for pb.Next() {
344 for y := 0; y < j; y++ {
345 }
346 mu.Lock()
347 d.a++
348 d.b = t
349 mu.Unlock()
350 }
351 })
352 })
353 }
354 }
355 }
356
357 type myFooer struct{}
358
359 func (myFooer) Foo() {}
360
361 type fooer interface {
362 Foo()
363 }
364
365 func BenchmarkInterfaceTypeAssertion(b *testing.B) {
366
367 runInterfaceTypeAssertion(b, myFooer{})
368 }
369
370 func runInterfaceTypeAssertion(b *testing.B, fer any) {
371 x := 0
372 b.ResetTimer()
373 for i := 0; i < b.N; i++ {
374 if _, ok := fer.(fooer); ok {
375 x++
376 }
377 }
378 b.StopTimer()
379 if x != b.N {
380 b.Fatal("error")
381 }
382 }
383
384 func BenchmarkStructTypeAssertion(b *testing.B) {
385
386 runStructTypeAssertion(b, myFooer{})
387 }
388
389 func runStructTypeAssertion(b *testing.B, fer any) {
390 x := 0
391 b.ResetTimer()
392 for i := 0; i < b.N; i++ {
393 if _, ok := fer.(myFooer); ok {
394 x++
395 }
396 }
397 b.StopTimer()
398 if x != b.N {
399 b.Fatal("error")
400 }
401 }
402
403 func BenchmarkWaitGroupAddDone(b *testing.B) {
404 wg := sync.WaitGroup{}
405 b.RunParallel(func(pb *testing.PB) {
406 i := 0
407 for ; pb.Next(); i++ {
408 wg.Add(1)
409 }
410 for ; i > 0; i-- {
411 wg.Done()
412 }
413 })
414 }
415
416 func BenchmarkRLockUnlock(b *testing.B) {
417 mu := sync.RWMutex{}
418 b.RunParallel(func(pb *testing.PB) {
419 i := 0
420 for ; pb.Next(); i++ {
421 mu.RLock()
422 }
423 for ; i > 0; i-- {
424 mu.RUnlock()
425 }
426 })
427 }
428
429 type ifNop interface {
430 nop()
431 }
432
433 type alwaysNop struct{}
434
435 func (alwaysNop) nop() {}
436
437 type concreteNop struct {
438 isNop atomic.Bool
439 i int
440 }
441
442 func (c *concreteNop) nop() {
443 if c.isNop.Load() {
444 return
445 }
446 c.i++
447 }
448
449 func BenchmarkInterfaceNop(b *testing.B) {
450 n := ifNop(alwaysNop{})
451 b.RunParallel(func(pb *testing.PB) {
452 for pb.Next() {
453 n.nop()
454 }
455 })
456 }
457
458 func BenchmarkConcreteNop(b *testing.B) {
459 n := &concreteNop{}
460 n.isNop.Store(true)
461 b.RunParallel(func(pb *testing.PB) {
462 for pb.Next() {
463 n.nop()
464 }
465 })
466 }
467
View as plain text