1
2
3
4
5
6
7 package cache
8
9 import (
10 "math/rand"
11 "runtime"
12 "sync"
13 "sync/atomic"
14 "testing"
15 "time"
16 "unsafe"
17
18 "github.com/stretchr/testify/assert"
19 "github.com/stretchr/testify/require"
20 )
21
22 type int32o int32
23
24 func (o *int32o) acquire() {
25 if atomic.AddInt32((*int32)(o), 1) != 1 {
26 panic("BUG: invalid ref")
27 }
28 }
29
30 func (o *int32o) Release() {
31 if atomic.AddInt32((*int32)(o), -1) != 0 {
32 panic("BUG: invalid ref")
33 }
34 }
35
36 type releaserFunc struct {
37 fn func()
38 value Value
39 }
40
41 func (r releaserFunc) Release() {
42 if r.fn != nil {
43 r.fn()
44 }
45 }
46
47 func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
48 return c.Get(ns, key, func() (int, Value) {
49 if relf != nil {
50 return charge, releaserFunc{relf, value}
51 }
52 return charge, value
53 })
54 }
55
56 func shuffleNodes(nodes mNodes) mNodes {
57 shuffled := append(mNodes(nil), nodes...)
58 rand.Shuffle(len(shuffled), func(i, j int) {
59 shuffled[i], shuffled[j] = shuffled[j], shuffled[i]
60 })
61 return shuffled
62 }
63
64 func generateSortedNodes(nNS, minNKey, maxNKey int) mNodes {
65 var generated mNodes
66 for i := 0; i < nNS; i++ {
67 nKey := minNKey
68 if maxNKey-minNKey > 0 {
69 nKey += rand.Intn(maxNKey - minNKey)
70 }
71 for j := 0; j < nKey; j++ {
72 generated = append(generated, &Node{ns: uint64(i), key: uint64(j)})
73 }
74 }
75 return generated
76 }
77
78 func TestNodesSort(t *testing.T) {
79 testFunc := func(nNS, minNKey, maxNKey int) func(t *testing.T) {
80 return func(t *testing.T) {
81 sorted := generateSortedNodes(nNS, minNKey, maxNKey)
82 for i := 0; i < 3; i++ {
83 shuffled := shuffleNodes(sorted)
84 require.NotEqual(t, sorted, shuffled)
85 shuffled.sort()
86 require.Equal(t, sorted, shuffled)
87 }
88 for i, x := range sorted {
89 r := sorted.search(x.ns, x.key)
90 require.Equal(t, i, r)
91 }
92 }
93 }
94
95 t.Run("SingleNS", testFunc(1, 100, 100))
96 t.Run("MultipleNS", testFunc(10, 1, 10))
97
98 t.Run("SearchInexact", func(t *testing.T) {
99 data := mNodes{
100 &Node{ns: 0, key: 2},
101 &Node{ns: 0, key: 3},
102 &Node{ns: 0, key: 4},
103 &Node{ns: 2, key: 1},
104 &Node{ns: 2, key: 2},
105 &Node{ns: 2, key: 3},
106 }
107 require.Equal(t, 0, data.search(0, 1))
108 require.Equal(t, 0, data.search(0, 2))
109 require.Equal(t, 3, data.search(0, 5))
110 require.Equal(t, 3, data.search(1, 1001000))
111 require.Equal(t, 5, data.search(2, 3))
112 require.Equal(t, 6, data.search(2, 4))
113 require.Equal(t, 6, data.search(10, 10))
114 })
115 }
116
117 func TestCacheMap(t *testing.T) {
118 runtime.GOMAXPROCS(runtime.NumCPU())
119
120 type cacheMapTestParams struct {
121 nObjects, nHandles, concurrent, repeat int
122 }
123
124 var params []cacheMapTestParams
125 if testing.Short() {
126 params = []cacheMapTestParams{
127 {1000, 100, 20, 3},
128 {10000, 300, 50, 10},
129 }
130 } else {
131 params = []cacheMapTestParams{
132 {10000, 400, 50, 3},
133 {100000, 1000, 100, 10},
134 }
135 }
136
137 var (
138 objects [][]int32o
139 handles [][]unsafe.Pointer
140 )
141
142 for _, x := range params {
143 objects = append(objects, make([]int32o, x.nObjects))
144 handles = append(handles, make([]unsafe.Pointer, x.nHandles))
145 }
146
147 c := NewCache(nil)
148
149 wg := new(sync.WaitGroup)
150 var done int32
151
152 for id, param := range params {
153 id := id
154 param := param
155 objects := objects[id]
156 handles := handles[id]
157 for job := 0; job < param.concurrent; job++ {
158 wg.Add(1)
159 go func() {
160 defer wg.Done()
161
162 r := rand.New(rand.NewSource(time.Now().UnixNano()))
163 for j := len(objects) * param.repeat; j >= 0; j-- {
164 if t.Failed() {
165 return
166 }
167
168 i := r.Intn(len(objects))
169 h := c.Get(uint64(id), uint64(i), func() (int, Value) {
170 o := &objects[i]
171 o.acquire()
172 return 1, o
173 })
174 if !assert.True(t, h.Value().(*int32o) == &objects[i]) {
175 return
176 }
177 if !assert.True(t, objects[i] == 1) {
178 return
179 }
180 if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
181 h.Release()
182 }
183 }
184 }()
185 }
186
187
188 go func() {
189 r := rand.New(rand.NewSource(time.Now().UnixNano()))
190
191 for atomic.LoadInt32(&done) == 0 {
192 i := r.Intn(len(handles))
193 h := (*Handle)(atomic.LoadPointer(&handles[i]))
194 if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
195 h.Release()
196 }
197 time.Sleep(time.Millisecond)
198 }
199 }()
200 }
201
202
203 growShrinkStop := make(chan bool, 1)
204 go func() {
205 handles := make([]*Handle, 100000)
206 for atomic.LoadInt32(&done) == 0 {
207 for i := range handles {
208 handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
209 return 1, 1
210 })
211 }
212 for _, h := range handles {
213 h.Release()
214 }
215 }
216 growShrinkStop <- true
217 }()
218
219 wg.Wait()
220 atomic.StoreInt32(&done, 1)
221
222
223 activeCount := 0
224 for _, handle := range handles {
225 for i := range handle {
226 h := (*Handle)(atomic.LoadPointer(&handle[i]))
227 if h != nil && atomic.CompareAndSwapPointer(&handle[i], unsafe.Pointer(h), nil) {
228 activeCount++
229 h.Release()
230 }
231 }
232 }
233 t.Logf("active_handles=%d", activeCount)
234
235
236 for id, object := range objects {
237 for i, o := range object {
238 require.EqualValues(t, 0, o, "invalid object ref: %d-%03d", id, i)
239 }
240 }
241
242 <-growShrinkStop
243
244 require.Zero(t, c.Nodes())
245 require.Zero(t, c.Size())
246 t.Logf("STATS: %#v", c.GetStats())
247 }
248
249 func TestCacheMap_NodesAndSize(t *testing.T) {
250 c := NewCache(nil)
251 require.Zero(t, c.Capacity())
252 require.Zero(t, c.Nodes())
253 require.Zero(t, c.Size())
254 set(c, 0, 1, 1, 1, nil)
255 set(c, 0, 2, 2, 2, nil)
256 set(c, 1, 1, 3, 3, nil)
257 set(c, 2, 1, 4, 1, nil)
258 require.Equal(t, 4, c.Nodes())
259 require.Equal(t, 7, c.Size())
260 }
261
262 func TestLRUCache_Capacity(t *testing.T) {
263 c := NewCache(NewLRU(10))
264 require.Equal(t, 10, c.Capacity())
265 set(c, 0, 1, 1, 1, nil).Release()
266 set(c, 0, 2, 2, 2, nil).Release()
267 set(c, 1, 1, 3, 3, nil).Release()
268 set(c, 2, 1, 4, 1, nil).Release()
269 set(c, 2, 2, 5, 1, nil).Release()
270 set(c, 2, 3, 6, 1, nil).Release()
271 set(c, 2, 4, 7, 1, nil).Release()
272 set(c, 2, 5, 8, 1, nil).Release()
273 require.Equal(t, 7, c.Nodes())
274 require.Equal(t, 10, c.Size())
275 c.SetCapacity(9)
276 require.Equal(t, 9, c.Capacity())
277 require.Equal(t, 6, c.Nodes())
278 require.Equal(t, 8, c.Size())
279 }
280
281 func TestCacheMap_NilValue(t *testing.T) {
282 c := NewCache(NewLRU(10))
283 h := c.Get(0, 0, func() (size int, value Value) {
284 return 1, nil
285 })
286 require.Nil(t, h)
287 require.Zero(t, c.Nodes())
288 require.Zero(t, c.Size())
289 }
290
291 func TestLRUCache_GetLatency(t *testing.T) {
292 runtime.GOMAXPROCS(runtime.NumCPU())
293
294 const (
295 concurrentSet = 30
296 concurrentGet = 3
297 duration = 3 * time.Second
298 delay = 3 * time.Millisecond
299 maxKey = 100000
300 )
301
302 var (
303 set, getHit, getAll int32
304 getMaxLatency, getDuration int64
305 )
306
307 c := NewCache(NewLRU(5000))
308 wg := &sync.WaitGroup{}
309 until := time.Now().Add(duration)
310 for i := 0; i < concurrentSet; i++ {
311 wg.Add(1)
312 go func(i int) {
313 defer wg.Done()
314 r := rand.New(rand.NewSource(time.Now().UnixNano()))
315 for time.Now().Before(until) {
316 c.Get(0, uint64(r.Intn(maxKey)), func() (int, Value) {
317 time.Sleep(delay)
318 atomic.AddInt32(&set, 1)
319 return 1, 1
320 }).Release()
321 }
322 }(i)
323 }
324 for i := 0; i < concurrentGet; i++ {
325 wg.Add(1)
326 go func(i int) {
327 defer wg.Done()
328 r := rand.New(rand.NewSource(time.Now().UnixNano()))
329 for {
330 mark := time.Now()
331 if mark.Before(until) {
332 h := c.Get(0, uint64(r.Intn(maxKey)), nil)
333 latency := int64(time.Since(mark))
334 m := atomic.LoadInt64(&getMaxLatency)
335 if latency > m {
336 atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
337 }
338 atomic.AddInt64(&getDuration, latency)
339 if h != nil {
340 atomic.AddInt32(&getHit, 1)
341 h.Release()
342 }
343 atomic.AddInt32(&getAll, 1)
344 } else {
345 break
346 }
347 }
348 }(i)
349 }
350
351 wg.Wait()
352 getAvgLatency := time.Duration(getDuration) / time.Duration(getAll)
353 t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
354 set, getHit, getAll, time.Duration(getMaxLatency), getAvgLatency)
355
356 require.LessOrEqual(t, getAvgLatency, delay/3)
357 t.Logf("STATS: %#v", c.GetStats())
358 }
359
360 func TestLRUCache_HitMiss(t *testing.T) {
361 cases := []struct {
362 key uint64
363 value string
364 }{
365 {1, "vvvvvvvvv"},
366 {100, "v1"},
367 {0, "v2"},
368 {12346, "v3"},
369 {777, "v4"},
370 {999, "v5"},
371 {7654, "v6"},
372 {2, "v7"},
373 {3, "v8"},
374 {9, "v9"},
375 }
376
377 setFin := 0
378 c := NewCache(NewLRU(1000))
379 for i, x := range cases {
380 set(c, 0, x.key, x.value, len(x.value), func() {
381 setFin++
382 }).Release()
383 for j, y := range cases {
384 h := c.Get(0, y.key, nil)
385 if j <= i {
386
387 require.NotNilf(t, h, "case '%d' iteration '%d' should hit", i, j)
388 require.Equalf(t, y.value, h.Value().(releaserFunc).value, "case '%d' iteration '%d' should have valid value", i, j)
389 } else {
390
391 require.Nilf(t, h, "case '%d' iteration '%d' should miss", i, j)
392 }
393 if h != nil {
394 h.Release()
395 }
396 }
397 }
398
399 for i, x := range cases {
400 finalizerOk := false
401 c.Delete(0, x.key, func() {
402 finalizerOk = true
403 })
404
405 require.True(t, finalizerOk)
406
407 for j, y := range cases {
408 h := c.Get(0, y.key, nil)
409 if j > i {
410
411 require.NotNilf(t, h, "case '%d' iteration '%d' should hit", i, j)
412 require.Equalf(t, y.value, h.Value().(releaserFunc).value, "case '%d' iteration '%d' should have valid value", i, j)
413 } else {
414
415 require.Nilf(t, h, "case '%d' iteration '%d' should miss", i, j)
416 }
417 if h != nil {
418 h.Release()
419 }
420 }
421 }
422
423 require.Equal(t, len(cases), setFin, "some set finalizer may not be executed")
424 }
425
426 func TestLRUCache_Eviction(t *testing.T) {
427 c := NewCache(NewLRU(12))
428 o1 := set(c, 0, 1, 1, 1, nil)
429 set(c, 0, 2, 2, 1, nil).Release()
430 set(c, 0, 3, 3, 1, nil).Release()
431 set(c, 0, 4, 4, 1, nil).Release()
432 set(c, 0, 5, 5, 1, nil).Release()
433 if h := c.Get(0, 2, nil); h != nil {
434 h.Release()
435 }
436 set(c, 0, 9, 9, 10, nil).Release()
437
438 for _, key := range []uint64{9, 2, 5, 1} {
439 h := c.Get(0, key, nil)
440 require.NotNilf(t, h, "miss for key '%d'", key)
441 require.Equalf(t, int(key), h.Value(), "invalid value for key '%d'", key)
442 h.Release()
443 }
444 o1.Release()
445 for _, key := range []uint64{1, 2, 5} {
446 h := c.Get(0, key, nil)
447 require.NotNilf(t, h, "miss for key '%d'", key)
448 require.Equalf(t, int(key), h.Value(), "invalid value for key '%d'", key)
449 h.Release()
450 }
451 for _, key := range []uint64{3, 4, 9} {
452 h := c.Get(0, key, nil)
453 if !assert.Nilf(t, h, "hit for key '%d'", key) {
454 require.Equalf(t, int(key), h.Value(), "invalid value for key '%d'", key)
455 h.Release()
456 }
457 }
458 }
459
460 func TestLRUCache_Evict(t *testing.T) {
461 lru := NewLRU(6).(*lru)
462 c := NewCache(lru)
463 set(c, 0, 1, 1, 1, nil).Release()
464 set(c, 0, 2, 2, 1, nil).Release()
465 set(c, 1, 1, 3, 1, nil).Release()
466 set(c, 1, 2, 4, 1, nil).Release()
467 set(c, 2, 1, 5, 1, nil).Release()
468 set(c, 2, 2, 6, 1, nil).Release()
469
470 v := 1
471 for ns := 0; ns < 3; ns++ {
472 for key := 1; key < 3; key++ {
473 h := c.Get(uint64(ns), uint64(key), nil)
474 require.NotNilf(t, h, "NS=%d key=%d", ns, key)
475 require.Equal(t, v, h.Value())
476 h.Release()
477 v++
478 }
479 }
480
481 require.True(t, c.Evict(0, 1))
482 require.Equal(t, 5, lru.used)
483 require.False(t, c.Evict(0, 1))
484
485 c.EvictNS(1)
486 require.Equal(t, 3, lru.used)
487 require.Nil(t, c.Get(1, 1, nil))
488 require.Nil(t, c.Get(1, 2, nil))
489
490 c.EvictAll()
491 require.Zero(t, lru.used)
492 require.Nil(t, c.Get(0, 1, nil))
493 require.Nil(t, c.Get(0, 2, nil))
494 require.Nil(t, c.Get(1, 1, nil))
495 require.Nil(t, c.Get(1, 2, nil))
496 require.Nil(t, c.Get(2, 1, nil))
497 require.Nil(t, c.Get(2, 2, nil))
498 }
499
500 func TestLRUCache_Delete(t *testing.T) {
501 delFuncCalled := 0
502 delFunc := func() {
503 delFuncCalled++
504 }
505
506 c := NewCache(NewLRU(2))
507 set(c, 0, 1, 1, 1, nil).Release()
508 set(c, 0, 2, 2, 1, nil).Release()
509
510 require.True(t, c.Delete(0, 1, delFunc))
511 require.Nil(t, c.Get(0, 1, nil))
512 require.False(t, c.Delete(0, 1, delFunc))
513
514 h2 := c.Get(0, 2, nil)
515 require.NotNil(t, h2)
516 require.True(t, c.Delete(0, 2, delFunc))
517 require.True(t, c.Delete(0, 2, delFunc))
518
519 set(c, 0, 3, 3, 1, nil).Release()
520 set(c, 0, 4, 4, 1, nil).Release()
521 c.Get(0, 2, nil).Release()
522
523 for key := 2; key <= 4; key++ {
524 h := c.Get(0, uint64(key), nil)
525 require.NotNil(t, h)
526 h.Release()
527 }
528
529 h2.Release()
530 require.Nil(t, c.Get(0, 2, nil))
531 require.Equal(t, 4, delFuncCalled)
532 }
533
534 func TestLRUCache_Close(t *testing.T) {
535 relFuncCalled := 0
536 relFunc := func() {
537 relFuncCalled++
538 }
539 delFuncCalled := 0
540 delFunc := func() {
541 delFuncCalled++
542 }
543
544 c := NewCache(NewLRU(2))
545 set(c, 0, 1, 1, 1, relFunc).Release()
546 set(c, 0, 2, 2, 1, relFunc).Release()
547
548 h3 := set(c, 0, 3, 3, 1, relFunc)
549 require.NotNil(t, h3)
550 require.True(t, c.Delete(0, 3, delFunc))
551
552 c.Close(true)
553
554 require.Equal(t, 3, relFuncCalled)
555 require.Equal(t, 1, delFuncCalled)
556 }
557
View as plain text