1 // Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com>
2 // All rights reserved.
4 // Use of this source code is governed by a BSD-style license that can be
5 // found in the LICENSE file.
21 func (o *int32o) acquire() {
22 if atomic.AddInt32((*int32)(o), 1) != 1 {
23 panic("BUG: invalid ref")
27 func (o *int32o) Release() {
28 if atomic.AddInt32((*int32)(o), -1) != 0 {
29 panic("BUG: invalid ref")
33 type releaserFunc struct {
38 func (r releaserFunc) Release() {
44 func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle {
45 return c.Get(ns, key, func() (int, Value) {
47 return charge, releaserFunc{relf, value}
53 type cacheMapTestParams struct {
54 nobjects, nhandles, concurrent, repeat int
57 func TestCacheMap(t *testing.T) {
58 runtime.GOMAXPROCS(runtime.NumCPU())
60 var params []cacheMapTestParams
62 params = []cacheMapTestParams{
67 params = []cacheMapTestParams{
69 {100000, 1000, 100, 10},
75 handles [][]unsafe.Pointer
78 for _, x := range params {
79 objects = append(objects, make([]int32o, x.nobjects))
80 handles = append(handles, make([]unsafe.Pointer, x.nhandles))
85 wg := new(sync.WaitGroup)
88 for ns, x := range params {
89 for i := 0; i < x.concurrent; i++ {
91 go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) {
93 r := rand.New(rand.NewSource(time.Now().UnixNano()))
95 for j := len(objects) * repeat; j >= 0; j-- {
96 key := uint64(r.Intn(len(objects)))
97 h := c.Get(uint64(ns), key, func() (int, Value) {
102 if v := h.Value().(*int32o); v != &objects[key] {
103 t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v)
105 if objects[key] != 1 {
106 t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key])
108 if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) {
112 }(ns, i, x.repeat, objects[ns], handles[ns])
115 go func(handles []unsafe.Pointer) {
116 r := rand.New(rand.NewSource(time.Now().UnixNano()))
118 for atomic.LoadInt32(&done) == 0 {
119 i := r.Intn(len(handles))
120 h := (*Handle)(atomic.LoadPointer(&handles[i]))
121 if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) {
124 time.Sleep(time.Millisecond)
130 handles := make([]*Handle, 100000)
131 for atomic.LoadInt32(&done) == 0 {
132 for i := range handles {
133 handles[i] = c.Get(999999999, uint64(i), func() (int, Value) {
137 for _, h := range handles {
145 atomic.StoreInt32(&done, 1)
147 for _, handles0 := range handles {
148 for i := range handles0 {
149 h := (*Handle)(atomic.LoadPointer(&handles0[i]))
150 if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) {
156 for ns, objects0 := range objects {
157 for i, o := range objects0 {
159 t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o)
165 func TestCacheMap_NodesAndSize(t *testing.T) {
168 t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
171 t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
173 set(c, 0, 1, 1, 1, nil)
174 set(c, 0, 2, 2, 2, nil)
175 set(c, 1, 1, 3, 3, nil)
176 set(c, 2, 1, 4, 1, nil)
178 t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes())
181 t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size())
185 func TestLRUCache_Capacity(t *testing.T) {
186 c := NewCache(NewLRU(10))
187 if c.Capacity() != 10 {
188 t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity())
190 set(c, 0, 1, 1, 1, nil).Release()
191 set(c, 0, 2, 2, 2, nil).Release()
192 set(c, 1, 1, 3, 3, nil).Release()
193 set(c, 2, 1, 4, 1, nil).Release()
194 set(c, 2, 2, 5, 1, nil).Release()
195 set(c, 2, 3, 6, 1, nil).Release()
196 set(c, 2, 4, 7, 1, nil).Release()
197 set(c, 2, 5, 8, 1, nil).Release()
199 t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes())
202 t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size())
205 if c.Capacity() != 9 {
206 t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity())
209 t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes())
212 t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size())
216 func TestCacheMap_NilValue(t *testing.T) {
217 c := NewCache(NewLRU(10))
218 h := c.Get(0, 0, func() (size int, value Value) {
222 t.Error("cache handle is non-nil")
225 t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes())
228 t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size())
232 func TestLRUCache_GetLatency(t *testing.T) {
233 runtime.GOMAXPROCS(runtime.NumCPU())
238 duration = 3 * time.Second
239 delay = 3 * time.Millisecond
244 set, getHit, getAll int32
245 getMaxLatency, getDuration int64
248 c := NewCache(NewLRU(5000))
249 wg := &sync.WaitGroup{}
250 until := time.Now().Add(duration)
251 for i := 0; i < concurrentSet; i++ {
255 r := rand.New(rand.NewSource(time.Now().UnixNano()))
256 for time.Now().Before(until) {
257 c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) {
259 atomic.AddInt32(&set, 1)
265 for i := 0; i < concurrentGet; i++ {
269 r := rand.New(rand.NewSource(time.Now().UnixNano()))
272 if mark.Before(until) {
273 h := c.Get(0, uint64(r.Intn(maxkey)), nil)
274 latency := int64(time.Now().Sub(mark))
275 m := atomic.LoadInt64(&getMaxLatency)
277 atomic.CompareAndSwapInt64(&getMaxLatency, m, latency)
279 atomic.AddInt64(&getDuration, latency)
281 atomic.AddInt32(&getHit, 1)
284 atomic.AddInt32(&getAll, 1)
293 getAvglatency := time.Duration(getDuration) / time.Duration(getAll)
294 t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v",
295 set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency)
297 if getAvglatency > delay/3 {
298 t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency)
302 func TestLRUCache_HitMiss(t *testing.T) {
320 c := NewCache(NewLRU(1000))
321 for i, x := range cases {
322 set(c, 0, x.key, x.value, len(x.value), func() {
325 for j, y := range cases {
326 h := c.Get(0, y.key, nil)
330 t.Errorf("case '%d' iteration '%d' is miss", i, j)
332 if x := h.Value().(releaserFunc).value.(string); x != y.value {
333 t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
339 t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string))
348 for i, x := range cases {
350 c.Delete(0, x.key, func() {
355 t.Errorf("case %d delete finalizer not executed", i)
358 for j, y := range cases {
359 h := c.Get(0, y.key, nil)
363 t.Errorf("case '%d' iteration '%d' is miss", i, j)
365 if x := h.Value().(releaserFunc).value.(string); x != y.value {
366 t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value)
372 t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string))
381 if setfin != len(cases) {
382 t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin)
386 func TestLRUCache_Eviction(t *testing.T) {
387 c := NewCache(NewLRU(12))
388 o1 := set(c, 0, 1, 1, 1, nil)
389 set(c, 0, 2, 2, 1, nil).Release()
390 set(c, 0, 3, 3, 1, nil).Release()
391 set(c, 0, 4, 4, 1, nil).Release()
392 set(c, 0, 5, 5, 1, nil).Release()
393 if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2
396 set(c, 0, 9, 9, 10, nil).Release() // 5,2,9
398 for _, key := range []uint64{9, 2, 5, 1} {
399 h := c.Get(0, key, nil)
401 t.Errorf("miss for key '%d'", key)
403 if x := h.Value().(int); x != int(key) {
404 t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
410 for _, key := range []uint64{1, 2, 5} {
411 h := c.Get(0, key, nil)
413 t.Errorf("miss for key '%d'", key)
415 if x := h.Value().(int); x != int(key) {
416 t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
421 for _, key := range []uint64{3, 4, 9} {
422 h := c.Get(0, key, nil)
424 t.Errorf("hit for key '%d'", key)
425 if x := h.Value().(int); x != int(key) {
426 t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x)
433 func TestLRUCache_Evict(t *testing.T) {
434 c := NewCache(NewLRU(6))
435 set(c, 0, 1, 1, 1, nil).Release()
436 set(c, 0, 2, 2, 1, nil).Release()
437 set(c, 1, 1, 4, 1, nil).Release()
438 set(c, 1, 2, 5, 1, nil).Release()
439 set(c, 2, 1, 6, 1, nil).Release()
440 set(c, 2, 2, 7, 1, nil).Release()
442 for ns := 0; ns < 3; ns++ {
443 for key := 1; key < 3; key++ {
444 if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
447 t.Errorf("Cache.Get on #%d.%d return nil", ns, key)
452 if ok := c.Evict(0, 1); !ok {
453 t.Error("first Cache.Evict on #0.1 return false")
455 if ok := c.Evict(0, 1); ok {
456 t.Error("second Cache.Evict on #0.1 return true")
458 if h := c.Get(0, 1, nil); h != nil {
459 t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value())
463 if h := c.Get(1, 1, nil); h != nil {
464 t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value())
466 if h := c.Get(1, 2, nil); h != nil {
467 t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value())
471 for ns := 0; ns < 3; ns++ {
472 for key := 1; key < 3; key++ {
473 if h := c.Get(uint64(ns), uint64(key), nil); h != nil {
474 t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value())
480 func TestLRUCache_Delete(t *testing.T) {
486 c := NewCache(NewLRU(2))
487 set(c, 0, 1, 1, 1, nil).Release()
488 set(c, 0, 2, 2, 1, nil).Release()
490 if ok := c.Delete(0, 1, delFunc); !ok {
491 t.Error("Cache.Delete on #1 return false")
493 if h := c.Get(0, 1, nil); h != nil {
494 t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value())
496 if ok := c.Delete(0, 1, delFunc); ok {
497 t.Error("Cache.Delete on #1 return true")
500 h2 := c.Get(0, 2, nil)
502 t.Error("Cache.Get on #2 return nil")
504 if ok := c.Delete(0, 2, delFunc); !ok {
505 t.Error("(1) Cache.Delete on #2 return false")
507 if ok := c.Delete(0, 2, delFunc); !ok {
508 t.Error("(2) Cache.Delete on #2 return false")
511 set(c, 0, 3, 3, 1, nil).Release()
512 set(c, 0, 4, 4, 1, nil).Release()
513 c.Get(0, 2, nil).Release()
515 for key := 2; key <= 4; key++ {
516 if h := c.Get(0, uint64(key), nil); h != nil {
519 t.Errorf("Cache.Get on #%d return nil", key)
524 if h := c.Get(0, 2, nil); h != nil {
525 t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value())
528 if delFuncCalled != 4 {
529 t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled)
533 func TestLRUCache_Close(t *testing.T) {
543 c := NewCache(NewLRU(2))
544 set(c, 0, 1, 1, 1, relFunc).Release()
545 set(c, 0, 2, 2, 1, relFunc).Release()
547 h3 := set(c, 0, 3, 3, 1, relFunc)
549 t.Error("Cache.Get on #3 return nil")
551 if ok := c.Delete(0, 3, delFunc); !ok {
552 t.Error("Cache.Delete on #3 return false")
557 if relFuncCalled != 3 {
558 t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled)
560 if delFuncCalled != 1 {
561 t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled)