2 Copyright 2012 Google Inc.
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
8 http://www.apache.org/licenses/LICENSE-2.0
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
17 // Tests for groupcache.
32 "github.com/golang/protobuf/proto"
34 pb "github.com/golang/groupcache/groupcachepb"
35 testpb "github.com/golang/groupcache/testpb"
40 stringGroup, protoGroup Getter
42 stringc = make(chan string)
46 // cacheFills is the number of times stringGroup or
47 // protoGroup's Getter have been called. Read using the
48 // cacheFills function.
53 stringGroupName = "string-group"
54 protoGroupName = "proto-group"
55 testMessageType = "google3/net/groupcache/go/test_proto.TestMessage"
56 fromChan = "from-chan"
61 stringGroup = NewGroup(stringGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
66 return dest.SetString("ECHO:" + key)
69 protoGroup = NewGroup(protoGroupName, cacheSize, GetterFunc(func(_ Context, key string, dest Sink) error {
74 return dest.SetProto(&testpb.TestMessage{
75 Name: proto.String("ECHO:" + key),
76 City: proto.String("SOME-CITY"),
81 // tests that a Getter's Get method is only called once with two
82 // outstanding callers. This is the string variant.
83 func TestGetDupSuppressString(t *testing.T) {
85 // Start two getters. The first should block (waiting reading
86 // from stringc) and the second should latch on to the first
88 resc := make(chan string, 2)
89 for i := 0; i < 2; i++ {
92 if err := stringGroup.Get(dummyCtx, fromChan, StringSink(&s)); err != nil {
93 resc <- "ERROR:" + err.Error()
100 // Wait a bit so both goroutines get merged together via
102 // TODO(bradfitz): decide whether there are any non-offensive
103 // debug/test hooks that could be added to singleflight to
104 // make a sleep here unnecessary.
105 time.Sleep(250 * time.Millisecond)
107 // Unblock the first getter, which should unblock the second
111 for i := 0; i < 2; i++ {
115 t.Errorf("got %q; want %q", v, "ECHO:foo")
117 case <-time.After(5 * time.Second):
118 t.Errorf("timeout waiting on getter #%d of 2", i+1)
123 // tests that a Getter's Get method is only called once with two
124 // outstanding callers. This is the proto variant.
125 func TestGetDupSuppressProto(t *testing.T) {
127 // Start two getters. The first should block (waiting reading
128 // from stringc) and the second should latch on to the first
130 resc := make(chan *testpb.TestMessage, 2)
131 for i := 0; i < 2; i++ {
133 tm := new(testpb.TestMessage)
134 if err := protoGroup.Get(dummyCtx, fromChan, ProtoSink(tm)); err != nil {
135 tm.Name = proto.String("ERROR:" + err.Error())
141 // Wait a bit so both goroutines get merged together via
143 // TODO(bradfitz): decide whether there are any non-offensive
144 // debug/test hooks that could be added to singleflight to
145 // make a sleep here unnecessary.
146 time.Sleep(250 * time.Millisecond)
148 // Unblock the first getter, which should unblock the second
151 want := &testpb.TestMessage{
152 Name: proto.String("ECHO:Fluffy"),
153 City: proto.String("SOME-CITY"),
155 for i := 0; i < 2; i++ {
158 if !reflect.DeepEqual(v, want) {
159 t.Errorf(" Got: %v\nWant: %v", proto.CompactTextString(v), proto.CompactTextString(want))
161 case <-time.After(5 * time.Second):
162 t.Errorf("timeout waiting on getter #%d of 2", i+1)
167 func countFills(f func()) int64 {
168 fills0 := cacheFills.Get()
170 return cacheFills.Get() - fills0
173 func TestCaching(t *testing.T) {
175 fills := countFills(func() {
176 for i := 0; i < 10; i++ {
178 if err := stringGroup.Get(dummyCtx, "TestCaching-key", StringSink(&s)); err != nil {
184 t.Errorf("expected 1 cache fill; got %d", fills)
188 func TestCacheEviction(t *testing.T) {
190 testKey := "TestCacheEviction-key"
191 getTestKey := func() {
193 for i := 0; i < 10; i++ {
194 if err := stringGroup.Get(dummyCtx, testKey, StringSink(&res)); err != nil {
199 fills := countFills(getTestKey)
201 t.Fatalf("expected 1 cache fill; got %d", fills)
204 g := stringGroup.(*Group)
205 evict0 := g.mainCache.nevict
207 // Trash the cache with other keys.
208 var bytesFlooded int64
209 // cacheSize/len(testKey) is approximate
210 for bytesFlooded < cacheSize+1024 {
212 key := fmt.Sprintf("dummy-key-%d", bytesFlooded)
213 stringGroup.Get(dummyCtx, key, StringSink(&res))
214 bytesFlooded += int64(len(key) + len(res))
216 evicts := g.mainCache.nevict - evict0
218 t.Errorf("evicts = %v; want more than 0", evicts)
221 // Test that the key is gone.
222 fills = countFills(getTestKey)
224 t.Fatalf("expected 1 cache fill after cache trashing; got %d", fills)
228 type fakePeer struct {
233 func (p *fakePeer) Get(_ Context, in *pb.GetRequest, out *pb.GetResponse) error {
236 return errors.New("simulated error from peer")
238 out.Value = []byte("got:" + in.GetKey())
242 type fakePeers []ProtoGetter
244 func (p fakePeers) PickPeer(key string) (peer ProtoGetter, ok bool) {
248 n := crc32.Checksum([]byte(key), crc32.IEEETable) % uint32(len(p))
249 return p[n], p[n] != nil
252 // tests that peers (virtual, in-process) are hit, and how much.
253 func TestPeers(t *testing.T) {
259 peerList := fakePeers([]ProtoGetter{peer0, peer1, peer2, nil})
260 const cacheSize = 0 // disabled
262 getter := func(_ Context, key string, dest Sink) error {
264 return dest.SetString("got:" + key)
266 testGroup := newGroup("TestPeers-group", cacheSize, GetterFunc(getter), peerList)
267 run := func(name string, n int, wantSummary string) {
270 for _, p := range []*fakePeer{peer0, peer1, peer2} {
274 for i := 0; i < n; i++ {
275 key := fmt.Sprintf("key-%d", i)
278 err := testGroup.Get(dummyCtx, key, StringSink(&got))
280 t.Errorf("%s: error on key %q: %v", name, key, err)
284 t.Errorf("%s: for key %q, got %q; want %q", name, key, got, want)
287 summary := func() string {
288 return fmt.Sprintf("localHits = %d, peers = %d %d %d", localHits, peer0.hits, peer1.hits, peer2.hits)
290 if got := summary(); got != wantSummary {
291 t.Errorf("%s: got %q; want %q", name, got, wantSummary)
294 resetCacheSize := func(maxBytes int64) {
296 g.cacheBytes = maxBytes
297 g.mainCache = cache{}
301 // Base case; peers all up, with no problems.
302 resetCacheSize(1 << 20)
303 run("base", 200, "localHits = 49, peers = 51 49 51")
305 // Verify cache was hit. All localHits are gone, and some of
306 // the peer hits (the ones randomly selected to be maybe hot)
307 run("cached_base", 200, "localHits = 0, peers = 49 47 48")
310 // With one of the peers being down.
311 // TODO(bradfitz): on a peer number being unavailable, the
312 // consistent hashing should maybe keep trying others to
313 // spread the load out. Currently it fails back to local
314 // execution if the first consistent-hash slot is unavailable.
316 run("one_peer_down", 200, "localHits = 100, peers = 0 49 51")
321 run("peer0_failing", 200, "localHits = 100, peers = 51 49 51")
324 func TestTruncatingByteSliceTarget(t *testing.T) {
327 if err := stringGroup.Get(dummyCtx, "short", TruncatingByteSliceSink(&s)); err != nil {
330 if want := "ECHO:short"; string(s) != want {
331 t.Errorf("short key got %q; want %q", s, want)
335 if err := stringGroup.Get(dummyCtx, "truncated", TruncatingByteSliceSink(&s)); err != nil {
338 if want := "ECHO:t"; string(s) != want {
339 t.Errorf("truncated key got %q; want %q", s, want)
343 func TestAllocatingByteSliceTarget(t *testing.T) {
345 sink := AllocatingByteSliceSink(&dst)
347 inBytes := []byte("some bytes")
348 sink.SetBytes(inBytes)
349 if want := "some bytes"; string(dst) != want {
350 t.Errorf("SetBytes resulted in %q; want %q", dst, want)
352 v, err := sink.view()
354 t.Fatalf("view after SetBytes failed: %v", err)
356 if &inBytes[0] == &dst[0] {
357 t.Error("inBytes and dst share memory")
359 if &inBytes[0] == &v.b[0] {
360 t.Error("inBytes and view share memory")
362 if &dst[0] == &v.b[0] {
363 t.Error("dst and view share memory")
367 // orderedFlightGroup allows the caller to force the schedule of when
368 // orig.Do will be called. This is useful to serialize calls such
369 // that singleflight cannot dedup them.
370 type orderedFlightGroup struct {
377 func (g *orderedFlightGroup) Do(key string, fn func() (interface{}, error)) (interface{}, error) {
382 return g.orig.Do(key, fn)
385 // TestNoDedup tests invariants on the cache size when singleflight is
386 // unable to dedup calls.
387 func TestNoDedup(t *testing.T) {
388 const testkey = "testkey"
389 const testval = "testval"
390 g := newGroup("testgroup", 1024, GetterFunc(func(_ Context, key string, dest Sink) error {
391 return dest.SetString(testval)
394 orderedGroup := &orderedFlightGroup{
395 stage1: make(chan bool),
396 stage2: make(chan bool),
399 // Replace loadGroup with our wrapper so we can control when
400 // loadGroup.Do is entered for each concurrent request.
401 g.loadGroup = orderedGroup
403 // Issue two idential requests concurrently. Since the cache is
404 // empty, it will miss. Both will enter load(), but we will only
405 // allow one at a time to enter singleflight.Do, so the callback
406 // function will be called twice.
407 resc := make(chan string, 2)
408 for i := 0; i < 2; i++ {
411 if err := g.Get(dummyCtx, testkey, StringSink(&s)); err != nil {
412 resc <- "ERROR:" + err.Error()
419 // Ensure both goroutines have entered the Do routine. This implies
420 // both concurrent requests have checked the cache, found it empty,
421 // and called load().
422 orderedGroup.stage1 <- true
423 orderedGroup.stage1 <- true
424 orderedGroup.stage2 <- true
425 orderedGroup.stage2 <- true
427 for i := 0; i < 2; i++ {
428 if s := <-resc; s != testval {
429 t.Errorf("result is %s want %s", s, testval)
434 if g.mainCache.items() != wantItems {
435 t.Errorf("mainCache has %d items, want %d", g.mainCache.items(), wantItems)
438 // If the singleflight callback doesn't double-check the cache again
439 // upon entry, we would increment nbytes twice but the entry would
440 // only be in the cache once.
441 const wantBytes = int64(len(testkey) + len(testval))
442 if g.mainCache.nbytes != wantBytes {
443 t.Errorf("cache has %d bytes, want %d", g.mainCache.nbytes, wantBytes)
447 func TestGroupStatsAlignment(t *testing.T) {
449 off := unsafe.Offsetof(g.Stats)
451 t.Fatal("Stats structure is not 8-byte aligned.")
455 // TODO(bradfitz): port the Google-internal full integration test into here,
456 // using HTTP requests instead of our RPC system.