...

Source file src/google.golang.org/grpc/balancer/rls/cache_test.go

Documentation: google.golang.org/grpc/balancer/rls

     1  /*
     2   *
     3   * Copyright 2021 gRPC authors.
     4   *
     5   * Licensed under the Apache License, Version 2.0 (the "License");
     6   * you may not use this file except in compliance with the License.
     7   * You may obtain a copy of the License at
     8   *
     9   *     http://www.apache.org/licenses/LICENSE-2.0
    10   *
    11   * Unless required by applicable law or agreed to in writing, software
    12   * distributed under the License is distributed on an "AS IS" BASIS,
    13   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    14   * See the License for the specific language governing permissions and
    15   * limitations under the License.
    16   *
    17   */
    18  
    19  package rls
    20  
    21  import (
    22  	"testing"
    23  	"time"
    24  
    25  	"github.com/google/go-cmp/cmp"
    26  	"github.com/google/go-cmp/cmp/cmpopts"
    27  	"google.golang.org/grpc/internal/backoff"
    28  )
    29  
    30  var (
    31  	cacheKeys = []cacheKey{
    32  		{path: "0", keys: "a"},
    33  		{path: "1", keys: "b"},
    34  		{path: "2", keys: "c"},
    35  		{path: "3", keys: "d"},
    36  		{path: "4", keys: "e"},
    37  	}
    38  
    39  	longDuration  = 10 * time.Minute
    40  	shortDuration = 1 * time.Millisecond
    41  	cacheEntries  []*cacheEntry
    42  )
    43  
    44  func initCacheEntries() {
    45  	// All entries have a dummy size of 1 to simplify resize operations.
    46  	cacheEntries = []*cacheEntry{
    47  		{
    48  			// Entry is valid and minimum expiry time has not expired.
    49  			expiryTime:        time.Now().Add(longDuration),
    50  			earliestEvictTime: time.Now().Add(longDuration),
    51  			size:              1,
    52  		},
    53  		{
    54  			// Entry is valid and is in backoff.
    55  			expiryTime:   time.Now().Add(longDuration),
    56  			backoffTime:  time.Now().Add(longDuration),
    57  			backoffState: &backoffState{timer: time.NewTimer(longDuration)},
    58  			size:         1,
    59  		},
    60  		{
    61  			// Entry is valid, and not in backoff.
    62  			expiryTime: time.Now().Add(longDuration),
    63  			size:       1,
    64  		},
    65  		{
    66  			// Entry is invalid.
    67  			expiryTime: time.Time{}.Add(shortDuration),
    68  			size:       1,
    69  		},
    70  		{
    71  			// Entry is invalid valid and backoff has expired.
    72  			expiryTime:        time.Time{}.Add(shortDuration),
    73  			backoffExpiryTime: time.Time{}.Add(shortDuration),
    74  			size:              1,
    75  		},
    76  	}
    77  }
    78  
    79  func (s) TestLRU_BasicOperations(t *testing.T) {
    80  	initCacheEntries()
    81  	// Create an LRU and add some entries to it.
    82  	lru := newLRU()
    83  	for _, k := range cacheKeys {
    84  		lru.addEntry(k)
    85  	}
    86  
    87  	// Get the least recent entry. This should be the first entry we added.
    88  	if got, want := lru.getLeastRecentlyUsed(), cacheKeys[0]; got != want {
    89  		t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
    90  	}
    91  
    92  	// Iterate through the slice of keys we added earlier, making them the most
    93  	// recent entry, one at a time. The least recent entry at that point should
    94  	// be the next entry from our slice of keys.
    95  	for i, k := range cacheKeys {
    96  		lru.makeRecent(k)
    97  
    98  		lruIndex := (i + 1) % len(cacheKeys)
    99  		if got, want := lru.getLeastRecentlyUsed(), cacheKeys[lruIndex]; got != want {
   100  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   101  		}
   102  	}
   103  
   104  	// Iterate through the slice of keys we added earlier, removing them one at
   105  	// a time The least recent entry at that point should be the next entry from
   106  	// our slice of keys, except for the last one because the lru will be empty.
   107  	for i, k := range cacheKeys {
   108  		lru.removeEntry(k)
   109  
   110  		var want cacheKey
   111  		if i < len(cacheKeys)-1 {
   112  			want = cacheKeys[i+1]
   113  		}
   114  		if got := lru.getLeastRecentlyUsed(); got != want {
   115  			t.Fatalf("lru.getLeastRecentlyUsed() = %v, want %v", got, want)
   116  		}
   117  	}
   118  }
   119  
   120  func (s) TestDataCache_BasicOperations(t *testing.T) {
   121  	initCacheEntries()
   122  	dc := newDataCache(5, nil)
   123  	for i, k := range cacheKeys {
   124  		dc.addEntry(k, cacheEntries[i])
   125  	}
   126  	for i, k := range cacheKeys {
   127  		entry := dc.getEntry(k)
   128  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   129  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", k, entry, cacheEntries[i])
   130  		}
   131  	}
   132  }
   133  
   134  func (s) TestDataCache_AddForcesResize(t *testing.T) {
   135  	initCacheEntries()
   136  	dc := newDataCache(1, nil)
   137  
   138  	// The first entry in cacheEntries has a minimum expiry time in the future.
   139  	// This entry would stop the resize operation since we do not evict entries
   140  	// whose minimum expiration time is in the future. So, we do not use that
   141  	// entry in this test. The entry being added has a running backoff timer.
   142  	evicted, ok := dc.addEntry(cacheKeys[1], cacheEntries[1])
   143  	if evicted || !ok {
   144  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", evicted, ok)
   145  	}
   146  
   147  	// Add another entry leading to the eviction of the above entry which has a
   148  	// running backoff timer. The first return value is expected to be true.
   149  	backoffCancelled, ok := dc.addEntry(cacheKeys[2], cacheEntries[2])
   150  	if !backoffCancelled || !ok {
   151  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (true, true)", backoffCancelled, ok)
   152  	}
   153  
   154  	// Add another entry leading to the eviction of the above entry which does not
   155  	// have a running backoff timer. This should evict the above entry, but the
   156  	// first return value is expected to be false.
   157  	backoffCancelled, ok = dc.addEntry(cacheKeys[3], cacheEntries[3])
   158  	if backoffCancelled || !ok {
   159  		t.Fatalf("dataCache.addEntry() returned (%v, %v) want (false, true)", backoffCancelled, ok)
   160  	}
   161  }
   162  
   163  func (s) TestDataCache_Resize(t *testing.T) {
   164  	initCacheEntries()
   165  	dc := newDataCache(5, nil)
   166  	for i, k := range cacheKeys {
   167  		dc.addEntry(k, cacheEntries[i])
   168  	}
   169  
   170  	// The first cache entry (with a key of cacheKeys[0]) that we added has an
   171  	// earliestEvictTime in the future. As part of the resize operation, we
   172  	// traverse the cache in least recently used order, and this will be first
   173  	// entry that we will encounter. And since the earliestEvictTime is in the
   174  	// future, the resize operation will stop, leaving the cache bigger than
   175  	// what was asked for.
   176  	if dc.resize(1) {
   177  		t.Fatalf("dataCache.resize() returned true, want false")
   178  	}
   179  	if dc.currentSize != 5 {
   180  		t.Fatalf("dataCache.size is %d, want 5", dc.currentSize)
   181  	}
   182  
   183  	// Remove the entry with earliestEvictTime in the future and retry the
   184  	// resize operation.
   185  	dc.removeEntryForTesting(cacheKeys[0])
   186  	if !dc.resize(1) {
   187  		t.Fatalf("dataCache.resize() returned false, want true")
   188  	}
   189  	if dc.currentSize != 1 {
   190  		t.Fatalf("dataCache.size is %d, want 1", dc.currentSize)
   191  	}
   192  }
   193  
   194  func (s) TestDataCache_EvictExpiredEntries(t *testing.T) {
   195  	initCacheEntries()
   196  	dc := newDataCache(5, nil)
   197  	for i, k := range cacheKeys {
   198  		dc.addEntry(k, cacheEntries[i])
   199  	}
   200  
   201  	// The last two entries in the cacheEntries list have expired, and will be
   202  	// evicted. The first three should still remain in the cache.
   203  	if !dc.evictExpiredEntries() {
   204  		t.Fatal("dataCache.evictExpiredEntries() returned false, want true")
   205  	}
   206  	if dc.currentSize != 3 {
   207  		t.Fatalf("dataCache.size is %d, want 3", dc.currentSize)
   208  	}
   209  	for i := 0; i < 3; i++ {
   210  		entry := dc.getEntry(cacheKeys[i])
   211  		if !cmp.Equal(entry, cacheEntries[i], cmp.AllowUnexported(cacheEntry{}, backoffState{}), cmpopts.IgnoreUnexported(time.Timer{})) {
   212  			t.Fatalf("Data cache lookup for key %v returned entry %v, want %v", cacheKeys[i], entry, cacheEntries[i])
   213  		}
   214  	}
   215  }
   216  
   217  func (s) TestDataCache_ResetBackoffState(t *testing.T) {
   218  	type fakeBackoff struct {
   219  		backoff.Strategy
   220  	}
   221  
   222  	initCacheEntries()
   223  	dc := newDataCache(5, nil)
   224  	for i, k := range cacheKeys {
   225  		dc.addEntry(k, cacheEntries[i])
   226  	}
   227  
   228  	newBackoffState := &backoffState{bs: &fakeBackoff{}}
   229  	if updatePicker := dc.resetBackoffState(newBackoffState); !updatePicker {
   230  		t.Fatal("dataCache.resetBackoffState() returned updatePicker is false, want true")
   231  	}
   232  
   233  	// Make sure that the entry with no backoff state was not touched.
   234  	if entry := dc.getEntry(cacheKeys[0]); cmp.Equal(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})) {
   235  		t.Fatal("dataCache.resetBackoffState() touched entries without a valid backoffState")
   236  	}
   237  
   238  	// Make sure that the entry with a valid backoff state was reset.
   239  	entry := dc.getEntry(cacheKeys[1])
   240  	if diff := cmp.Diff(entry.backoffState, newBackoffState, cmp.AllowUnexported(backoffState{})); diff != "" {
   241  		t.Fatalf("unexpected diff in backoffState for cache entry after dataCache.resetBackoffState(): %s", diff)
   242  	}
   243  }
   244  

View as plain text