...

Source file src/go.etcd.io/etcd/server/v3/mvcc/watchable_store_bench_test.go

Documentation: go.etcd.io/etcd/server/v3/mvcc

     1  // Copyright 2015 The etcd Authors
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package mvcc
    16  
    17  import (
    18  	"math/rand"
    19  	"os"
    20  	"testing"
    21  
    22  	"go.etcd.io/etcd/pkg/v3/traceutil"
    23  	"go.etcd.io/etcd/server/v3/lease"
    24  	betesting "go.etcd.io/etcd/server/v3/mvcc/backend/testing"
    25  
    26  	"go.uber.org/zap"
    27  )
    28  
    29  func BenchmarkWatchableStorePut(b *testing.B) {
    30  	be, tmpPath := betesting.NewDefaultTmpBackend(b)
    31  	s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
    32  	defer cleanup(s, be, tmpPath)
    33  
    34  	// arbitrary number of bytes
    35  	bytesN := 64
    36  	keys := createBytesSlice(bytesN, b.N)
    37  	vals := createBytesSlice(bytesN, b.N)
    38  
    39  	b.ResetTimer()
    40  	b.ReportAllocs()
    41  	for i := 0; i < b.N; i++ {
    42  		s.Put(keys[i], vals[i], lease.NoLease)
    43  	}
    44  }
    45  
    46  // BenchmarkWatchableStoreTxnPut benchmarks the Put operation
    47  // with transaction begin and end, where transaction involves
    48  // some synchronization operations, such as mutex locking.
    49  func BenchmarkWatchableStoreTxnPut(b *testing.B) {
    50  	be, tmpPath := betesting.NewDefaultTmpBackend(b)
    51  	s := New(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
    52  	defer cleanup(s, be, tmpPath)
    53  
    54  	// arbitrary number of bytes
    55  	bytesN := 64
    56  	keys := createBytesSlice(bytesN, b.N)
    57  	vals := createBytesSlice(bytesN, b.N)
    58  
    59  	b.ResetTimer()
    60  	b.ReportAllocs()
    61  	for i := 0; i < b.N; i++ {
    62  		txn := s.Write(traceutil.TODO())
    63  		txn.Put(keys[i], vals[i], lease.NoLease)
    64  		txn.End()
    65  	}
    66  }
    67  
    68  // BenchmarkWatchableStoreWatchPutSync benchmarks the case of
    69  // many synced watchers receiving a Put notification.
    70  func BenchmarkWatchableStoreWatchPutSync(b *testing.B) {
    71  	benchmarkWatchableStoreWatchPut(b, true)
    72  }
    73  
    74  // BenchmarkWatchableStoreWatchPutUnsync benchmarks the case of
    75  // many unsynced watchers receiving a Put notification.
    76  func BenchmarkWatchableStoreWatchPutUnsync(b *testing.B) {
    77  	benchmarkWatchableStoreWatchPut(b, false)
    78  }
    79  
    80  func benchmarkWatchableStoreWatchPut(b *testing.B, synced bool) {
    81  	be, tmpPath := betesting.NewDefaultTmpBackend(b)
    82  	s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
    83  	defer cleanup(s, be, tmpPath)
    84  
    85  	k := []byte("testkey")
    86  	v := []byte("testval")
    87  
    88  	rev := int64(0)
    89  	if !synced {
    90  		// non-0 value to keep watchers in unsynced
    91  		rev = 1
    92  	}
    93  
    94  	w := s.NewWatchStream()
    95  	defer w.Close()
    96  	watchIDs := make([]WatchID, b.N)
    97  	for i := range watchIDs {
    98  		watchIDs[i], _ = w.Watch(0, k, nil, rev)
    99  	}
   100  
   101  	b.ResetTimer()
   102  	b.ReportAllocs()
   103  
   104  	// trigger watchers
   105  	s.Put(k, v, lease.NoLease)
   106  	for range watchIDs {
   107  		<-w.Chan()
   108  	}
   109  	select {
   110  	case wc := <-w.Chan():
   111  		b.Fatalf("unexpected data %v", wc)
   112  	default:
   113  	}
   114  }
   115  
   116  // BenchmarkWatchableStoreUnsyncedCancel to benchmark on cancel function performance
   117  // for unsynced watchers in a WatchableStore. It creates k*N watchers to populate
   118  // unsynced with a reasonably large number of watchers. And measures the time it
   119  // takes to cancel N watchers out of k*N watchers. The performance is
   120  // expected to differ depending on the unsynced member implementation.
   121  // TODO: k is an arbitrary constant. We need to figure out what factor
   122  // we should put to simulate the real-world use cases.
   123  func BenchmarkWatchableStoreUnsyncedCancel(b *testing.B) {
   124  	be, tmpPath := betesting.NewDefaultTmpBackend(b)
   125  	s := NewStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
   126  
   127  	// manually create watchableStore instead of newWatchableStore
   128  	// because newWatchableStore periodically calls syncWatchersLoop
   129  	// method to sync watchers in unsynced map. We want to keep watchers
   130  	// in unsynced for this benchmark.
   131  	ws := &watchableStore{
   132  		store:    s,
   133  		unsynced: newWatcherGroup(),
   134  
   135  		// to make the test not crash from assigning to nil map.
   136  		// 'synced' doesn't get populated in this test.
   137  		synced: newWatcherGroup(),
   138  	}
   139  
   140  	defer func() {
   141  		ws.store.Close()
   142  		os.Remove(tmpPath)
   143  	}()
   144  
   145  	// Put a key so that we can spawn watchers on that key
   146  	// (testKey in this test). This increases the rev to 1,
   147  	// and later we can we set the watcher's startRev to 1,
   148  	// and force watchers to be in unsynced.
   149  	testKey := []byte("foo")
   150  	testValue := []byte("bar")
   151  	s.Put(testKey, testValue, lease.NoLease)
   152  
   153  	w := ws.NewWatchStream()
   154  
   155  	const k int = 2
   156  	benchSampleN := b.N
   157  	watcherN := k * benchSampleN
   158  
   159  	watchIDs := make([]WatchID, watcherN)
   160  	for i := 0; i < watcherN; i++ {
   161  		// non-0 value to keep watchers in unsynced
   162  		watchIDs[i], _ = w.Watch(0, testKey, nil, 1)
   163  	}
   164  
   165  	// random-cancel N watchers to make it not biased towards
   166  	// data structures with an order, such as slice.
   167  	ix := rand.Perm(watcherN)
   168  
   169  	b.ResetTimer()
   170  	b.ReportAllocs()
   171  
   172  	// cancel N watchers
   173  	for _, idx := range ix[:benchSampleN] {
   174  		if err := w.Cancel(watchIDs[idx]); err != nil {
   175  			b.Error(err)
   176  		}
   177  	}
   178  }
   179  
   180  func BenchmarkWatchableStoreSyncedCancel(b *testing.B) {
   181  	be, tmpPath := betesting.NewDefaultTmpBackend(b)
   182  	s := newWatchableStore(zap.NewExample(), be, &lease.FakeLessor{}, StoreConfig{})
   183  
   184  	defer func() {
   185  		s.store.Close()
   186  		os.Remove(tmpPath)
   187  	}()
   188  
   189  	// Put a key so that we can spawn watchers on that key
   190  	testKey := []byte("foo")
   191  	testValue := []byte("bar")
   192  	s.Put(testKey, testValue, lease.NoLease)
   193  
   194  	w := s.NewWatchStream()
   195  
   196  	// put 1 million watchers on the same key
   197  	const watcherN = 1000000
   198  
   199  	watchIDs := make([]WatchID, watcherN)
   200  	for i := 0; i < watcherN; i++ {
   201  		// 0 for startRev to keep watchers in synced
   202  		watchIDs[i], _ = w.Watch(0, testKey, nil, 0)
   203  	}
   204  
   205  	// randomly cancel watchers to make it not biased towards
   206  	// data structures with an order, such as slice.
   207  	ix := rand.Perm(watcherN)
   208  
   209  	b.ResetTimer()
   210  	b.ReportAllocs()
   211  
   212  	for _, idx := range ix {
   213  		if err := w.Cancel(watchIDs[idx]); err != nil {
   214  			b.Error(err)
   215  		}
   216  	}
   217  }
   218  

View as plain text