...

Source file src/golang.org/x/time/rate/rate_test.go

Documentation: golang.org/x/time/rate

     1  // Copyright 2015 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package rate
     6  
     7  import (
     8  	"context"
     9  	"math"
    10  	"sync"
    11  	"sync/atomic"
    12  	"testing"
    13  	"time"
    14  )
    15  
    16  func TestLimit(t *testing.T) {
    17  	if Limit(10) == Inf {
    18  		t.Errorf("Limit(10) == Inf should be false")
    19  	}
    20  }
    21  
    22  func closeEnough(a, b Limit) bool {
    23  	return (math.Abs(float64(a)/float64(b)) - 1.0) < 1e-9
    24  }
    25  
    26  func TestEvery(t *testing.T) {
    27  	cases := []struct {
    28  		interval time.Duration
    29  		lim      Limit
    30  	}{
    31  		{0, Inf},
    32  		{-1, Inf},
    33  		{1 * time.Nanosecond, Limit(1e9)},
    34  		{1 * time.Microsecond, Limit(1e6)},
    35  		{1 * time.Millisecond, Limit(1e3)},
    36  		{10 * time.Millisecond, Limit(100)},
    37  		{100 * time.Millisecond, Limit(10)},
    38  		{1 * time.Second, Limit(1)},
    39  		{2 * time.Second, Limit(0.5)},
    40  		{time.Duration(2.5 * float64(time.Second)), Limit(0.4)},
    41  		{4 * time.Second, Limit(0.25)},
    42  		{10 * time.Second, Limit(0.1)},
    43  		{time.Duration(math.MaxInt64), Limit(1e9 / float64(math.MaxInt64))},
    44  	}
    45  	for _, tc := range cases {
    46  		lim := Every(tc.interval)
    47  		if !closeEnough(lim, tc.lim) {
    48  			t.Errorf("Every(%v) = %v want %v", tc.interval, lim, tc.lim)
    49  		}
    50  	}
    51  }
    52  
    53  const (
    54  	d = 100 * time.Millisecond
    55  )
    56  
    57  var (
    58  	t0 = time.Now()
    59  	t1 = t0.Add(time.Duration(1) * d)
    60  	t2 = t0.Add(time.Duration(2) * d)
    61  	t3 = t0.Add(time.Duration(3) * d)
    62  	t4 = t0.Add(time.Duration(4) * d)
    63  	t5 = t0.Add(time.Duration(5) * d)
    64  	t9 = t0.Add(time.Duration(9) * d)
    65  )
    66  
    67  type allow struct {
    68  	t    time.Time
    69  	toks float64
    70  	n    int
    71  	ok   bool
    72  }
    73  
    74  func run(t *testing.T, lim *Limiter, allows []allow) {
    75  	t.Helper()
    76  	for i, allow := range allows {
    77  		if toks := lim.TokensAt(allow.t); toks != allow.toks {
    78  			t.Errorf("step %d: lim.TokensAt(%v) = %v want %v",
    79  				i, allow.t, toks, allow.toks)
    80  		}
    81  		ok := lim.AllowN(allow.t, allow.n)
    82  		if ok != allow.ok {
    83  			t.Errorf("step %d: lim.AllowN(%v, %v) = %v want %v",
    84  				i, allow.t, allow.n, ok, allow.ok)
    85  		}
    86  	}
    87  }
    88  
    89  func TestLimiterBurst1(t *testing.T) {
    90  	run(t, NewLimiter(10, 1), []allow{
    91  		{t0, 1, 1, true},
    92  		{t0, 0, 1, false},
    93  		{t0, 0, 1, false},
    94  		{t1, 1, 1, true},
    95  		{t1, 0, 1, false},
    96  		{t1, 0, 1, false},
    97  		{t2, 1, 2, false}, // burst size is 1, so n=2 always fails
    98  		{t2, 1, 1, true},
    99  		{t2, 0, 1, false},
   100  	})
   101  }
   102  
   103  func TestLimiterBurst3(t *testing.T) {
   104  	run(t, NewLimiter(10, 3), []allow{
   105  		{t0, 3, 2, true},
   106  		{t0, 1, 2, false},
   107  		{t0, 1, 1, true},
   108  		{t0, 0, 1, false},
   109  		{t1, 1, 4, false},
   110  		{t2, 2, 1, true},
   111  		{t3, 2, 1, true},
   112  		{t4, 2, 1, true},
   113  		{t4, 1, 1, true},
   114  		{t4, 0, 1, false},
   115  		{t4, 0, 1, false},
   116  		{t9, 3, 3, true},
   117  		{t9, 0, 0, true},
   118  	})
   119  }
   120  
   121  func TestLimiterJumpBackwards(t *testing.T) {
   122  	run(t, NewLimiter(10, 3), []allow{
   123  		{t1, 3, 1, true}, // start at t1
   124  		{t0, 2, 1, true}, // jump back to t0, two tokens remain
   125  		{t0, 1, 1, true},
   126  		{t0, 0, 1, false},
   127  		{t0, 0, 1, false},
   128  		{t1, 1, 1, true}, // got a token
   129  		{t1, 0, 1, false},
   130  		{t1, 0, 1, false},
   131  		{t2, 1, 1, true}, // got another token
   132  		{t2, 0, 1, false},
   133  		{t2, 0, 1, false},
   134  	})
   135  }
   136  
   137  // Ensure that tokensFromDuration doesn't produce
   138  // rounding errors by truncating nanoseconds.
   139  // See golang.org/issues/34861.
   140  func TestLimiter_noTruncationErrors(t *testing.T) {
   141  	if !NewLimiter(0.7692307692307693, 1).Allow() {
   142  		t.Fatal("expected true")
   143  	}
   144  }
   145  
   146  // testTime is a fake time used for testing.
   147  type testTime struct {
   148  	mu     sync.Mutex
   149  	cur    time.Time   // current fake time
   150  	timers []testTimer // fake timers
   151  }
   152  
   153  // testTimer is a fake timer.
   154  type testTimer struct {
   155  	when time.Time
   156  	ch   chan<- time.Time
   157  }
   158  
   159  // now returns the current fake time.
   160  func (tt *testTime) now() time.Time {
   161  	tt.mu.Lock()
   162  	defer tt.mu.Unlock()
   163  	return tt.cur
   164  }
   165  
   166  // newTimer creates a fake timer. It returns the channel,
   167  // a function to stop the timer (which we don't care about),
   168  // and a function to advance to the next timer.
   169  func (tt *testTime) newTimer(dur time.Duration) (<-chan time.Time, func() bool, func()) {
   170  	tt.mu.Lock()
   171  	defer tt.mu.Unlock()
   172  	ch := make(chan time.Time, 1)
   173  	timer := testTimer{
   174  		when: tt.cur.Add(dur),
   175  		ch:   ch,
   176  	}
   177  	tt.timers = append(tt.timers, timer)
   178  	return ch, func() bool { return true }, tt.advanceToTimer
   179  }
   180  
   181  // since returns the fake time since the given time.
   182  func (tt *testTime) since(t time.Time) time.Duration {
   183  	tt.mu.Lock()
   184  	defer tt.mu.Unlock()
   185  	return tt.cur.Sub(t)
   186  }
   187  
   188  // advance advances the fake time.
   189  func (tt *testTime) advance(dur time.Duration) {
   190  	tt.mu.Lock()
   191  	defer tt.mu.Unlock()
   192  	tt.advanceUnlocked(dur)
   193  }
   194  
   195  // advanceUnlock advances the fake time, assuming it is already locked.
   196  func (tt *testTime) advanceUnlocked(dur time.Duration) {
   197  	tt.cur = tt.cur.Add(dur)
   198  	i := 0
   199  	for i < len(tt.timers) {
   200  		if tt.timers[i].when.After(tt.cur) {
   201  			i++
   202  		} else {
   203  			tt.timers[i].ch <- tt.cur
   204  			copy(tt.timers[i:], tt.timers[i+1:])
   205  			tt.timers = tt.timers[:len(tt.timers)-1]
   206  		}
   207  	}
   208  }
   209  
   210  // advanceToTimer advances the time to the next timer.
   211  func (tt *testTime) advanceToTimer() {
   212  	tt.mu.Lock()
   213  	defer tt.mu.Unlock()
   214  	if len(tt.timers) == 0 {
   215  		panic("no timer")
   216  	}
   217  	when := tt.timers[0].when
   218  	for _, timer := range tt.timers[1:] {
   219  		if timer.when.Before(when) {
   220  			when = timer.when
   221  		}
   222  	}
   223  	tt.advanceUnlocked(when.Sub(tt.cur))
   224  }
   225  
   226  // makeTestTime hooks the testTimer into the package.
   227  func makeTestTime(t *testing.T) *testTime {
   228  	return &testTime{
   229  		cur: time.Now(),
   230  	}
   231  }
   232  
   233  func TestSimultaneousRequests(t *testing.T) {
   234  	const (
   235  		limit       = 1
   236  		burst       = 5
   237  		numRequests = 15
   238  	)
   239  	var (
   240  		wg    sync.WaitGroup
   241  		numOK = uint32(0)
   242  	)
   243  
   244  	// Very slow replenishing bucket.
   245  	lim := NewLimiter(limit, burst)
   246  
   247  	// Tries to take a token, atomically updates the counter and decreases the wait
   248  	// group counter.
   249  	f := func() {
   250  		defer wg.Done()
   251  		if ok := lim.Allow(); ok {
   252  			atomic.AddUint32(&numOK, 1)
   253  		}
   254  	}
   255  
   256  	wg.Add(numRequests)
   257  	for i := 0; i < numRequests; i++ {
   258  		go f()
   259  	}
   260  	wg.Wait()
   261  	if numOK != burst {
   262  		t.Errorf("numOK = %d, want %d", numOK, burst)
   263  	}
   264  }
   265  
   266  func TestLongRunningQPS(t *testing.T) {
   267  	// The test runs for a few (fake) seconds executing many requests
   268  	// and then checks that overall number of requests is reasonable.
   269  	const (
   270  		limit = 100
   271  		burst = 100
   272  	)
   273  	var (
   274  		numOK = int32(0)
   275  		tt    = makeTestTime(t)
   276  	)
   277  
   278  	lim := NewLimiter(limit, burst)
   279  
   280  	start := tt.now()
   281  	end := start.Add(5 * time.Second)
   282  	for tt.now().Before(end) {
   283  		if ok := lim.AllowN(tt.now(), 1); ok {
   284  			numOK++
   285  		}
   286  
   287  		// This will still offer ~500 requests per second, but won't consume
   288  		// outrageous amount of CPU.
   289  		tt.advance(2 * time.Millisecond)
   290  	}
   291  	elapsed := tt.since(start)
   292  	ideal := burst + (limit * float64(elapsed) / float64(time.Second))
   293  
   294  	// We should never get more requests than allowed.
   295  	if want := int32(ideal + 1); numOK > want {
   296  		t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal)
   297  	}
   298  	// We should get very close to the number of requests allowed.
   299  	if want := int32(0.999 * ideal); numOK < want {
   300  		t.Errorf("numOK = %d, want %d (ideal %f)", numOK, want, ideal)
   301  	}
   302  }
   303  
   304  // A request provides the arguments to lim.reserveN(t, n) and the expected results (act, ok).
   305  type request struct {
   306  	t   time.Time
   307  	n   int
   308  	act time.Time
   309  	ok  bool
   310  }
   311  
   312  // dFromDuration converts a duration to the nearest multiple of the global constant d.
   313  func dFromDuration(dur time.Duration) int {
   314  	// Add d/2 to dur so that integer division will round to
   315  	// the nearest multiple instead of truncating.
   316  	// (We don't care about small inaccuracies.)
   317  	return int((dur + (d / 2)) / d)
   318  }
   319  
   320  // dSince returns multiples of d since t0
   321  func dSince(t time.Time) int {
   322  	return dFromDuration(t.Sub(t0))
   323  }
   324  
   325  func runReserve(t *testing.T, lim *Limiter, req request) *Reservation {
   326  	t.Helper()
   327  	return runReserveMax(t, lim, req, InfDuration)
   328  }
   329  
   330  // runReserveMax attempts to reserve req.n tokens at time req.t, limiting the delay until action to
   331  // maxReserve. It checks whether the response matches req.act and req.ok. If not, it reports a test
   332  // error including the difference from expected durations in multiples of d (global constant).
   333  func runReserveMax(t *testing.T, lim *Limiter, req request, maxReserve time.Duration) *Reservation {
   334  	t.Helper()
   335  	r := lim.reserveN(req.t, req.n, maxReserve)
   336  	if r.ok && (dSince(r.timeToAct) != dSince(req.act)) || r.ok != req.ok {
   337  		t.Errorf("lim.reserveN(t%d, %v, %v) = (t%d, %v) want (t%d, %v)",
   338  			dSince(req.t), req.n, maxReserve, dSince(r.timeToAct), r.ok, dSince(req.act), req.ok)
   339  	}
   340  	return &r
   341  }
   342  
   343  func TestSimpleReserve(t *testing.T) {
   344  	lim := NewLimiter(10, 2)
   345  
   346  	runReserve(t, lim, request{t0, 2, t0, true})
   347  	runReserve(t, lim, request{t0, 2, t2, true})
   348  	runReserve(t, lim, request{t3, 2, t4, true})
   349  }
   350  
   351  func TestMix(t *testing.T) {
   352  	lim := NewLimiter(10, 2)
   353  
   354  	runReserve(t, lim, request{t0, 3, t1, false}) // should return false because n > Burst
   355  	runReserve(t, lim, request{t0, 2, t0, true})
   356  	run(t, lim, []allow{{t1, 1, 2, false}}) // not enough tokens - don't allow
   357  	runReserve(t, lim, request{t1, 2, t2, true})
   358  	run(t, lim, []allow{{t1, -1, 1, false}}) // negative tokens - don't allow
   359  	run(t, lim, []allow{{t3, 1, 1, true}})
   360  }
   361  
   362  func TestCancelInvalid(t *testing.T) {
   363  	lim := NewLimiter(10, 2)
   364  
   365  	runReserve(t, lim, request{t0, 2, t0, true})
   366  	r := runReserve(t, lim, request{t0, 3, t3, false})
   367  	r.CancelAt(t0)                               // should have no effect
   368  	runReserve(t, lim, request{t0, 2, t2, true}) // did not get extra tokens
   369  }
   370  
   371  func TestCancelLast(t *testing.T) {
   372  	lim := NewLimiter(10, 2)
   373  
   374  	runReserve(t, lim, request{t0, 2, t0, true})
   375  	r := runReserve(t, lim, request{t0, 2, t2, true})
   376  	r.CancelAt(t1) // got 2 tokens back
   377  	runReserve(t, lim, request{t1, 2, t2, true})
   378  }
   379  
   380  func TestCancelTooLate(t *testing.T) {
   381  	lim := NewLimiter(10, 2)
   382  
   383  	runReserve(t, lim, request{t0, 2, t0, true})
   384  	r := runReserve(t, lim, request{t0, 2, t2, true})
   385  	r.CancelAt(t3) // too late to cancel - should have no effect
   386  	runReserve(t, lim, request{t3, 2, t4, true})
   387  }
   388  
   389  func TestCancel0Tokens(t *testing.T) {
   390  	lim := NewLimiter(10, 2)
   391  
   392  	runReserve(t, lim, request{t0, 2, t0, true})
   393  	r := runReserve(t, lim, request{t0, 1, t1, true})
   394  	runReserve(t, lim, request{t0, 1, t2, true})
   395  	r.CancelAt(t0) // got 0 tokens back
   396  	runReserve(t, lim, request{t0, 1, t3, true})
   397  }
   398  
   399  func TestCancel1Token(t *testing.T) {
   400  	lim := NewLimiter(10, 2)
   401  
   402  	runReserve(t, lim, request{t0, 2, t0, true})
   403  	r := runReserve(t, lim, request{t0, 2, t2, true})
   404  	runReserve(t, lim, request{t0, 1, t3, true})
   405  	r.CancelAt(t2) // got 1 token back
   406  	runReserve(t, lim, request{t2, 2, t4, true})
   407  }
   408  
   409  func TestCancelMulti(t *testing.T) {
   410  	lim := NewLimiter(10, 4)
   411  
   412  	runReserve(t, lim, request{t0, 4, t0, true})
   413  	rA := runReserve(t, lim, request{t0, 3, t3, true})
   414  	runReserve(t, lim, request{t0, 1, t4, true})
   415  	rC := runReserve(t, lim, request{t0, 1, t5, true})
   416  	rC.CancelAt(t1) // get 1 token back
   417  	rA.CancelAt(t1) // get 2 tokens back, as if C was never reserved
   418  	runReserve(t, lim, request{t1, 3, t5, true})
   419  }
   420  
   421  func TestReserveJumpBack(t *testing.T) {
   422  	lim := NewLimiter(10, 2)
   423  
   424  	runReserve(t, lim, request{t1, 2, t1, true}) // start at t1
   425  	runReserve(t, lim, request{t0, 1, t1, true}) // should violate Limit,Burst
   426  	runReserve(t, lim, request{t2, 2, t3, true})
   427  	// burst size is 2, so n=3 always fails, and the state of lim should not be changed
   428  	runReserve(t, lim, request{t0, 3, time.Time{}, false})
   429  	runReserve(t, lim, request{t2, 1, t4, true})
   430  	// the maxReserve is not enough so it fails, and the state of lim should not be changed
   431  	runReserveMax(t, lim, request{t0, 2, time.Time{}, false}, d)
   432  	runReserve(t, lim, request{t2, 1, t5, true})
   433  }
   434  
   435  func TestReserveJumpBackCancel(t *testing.T) {
   436  	lim := NewLimiter(10, 2)
   437  
   438  	runReserve(t, lim, request{t1, 2, t1, true}) // start at t1
   439  	r := runReserve(t, lim, request{t1, 2, t3, true})
   440  	runReserve(t, lim, request{t1, 1, t4, true})
   441  	r.CancelAt(t0)                               // cancel at t0, get 1 token back
   442  	runReserve(t, lim, request{t1, 2, t4, true}) // should violate Limit,Burst
   443  }
   444  
   445  func TestReserveSetLimit(t *testing.T) {
   446  	lim := NewLimiter(5, 2)
   447  
   448  	runReserve(t, lim, request{t0, 2, t0, true})
   449  	runReserve(t, lim, request{t0, 2, t4, true})
   450  	lim.SetLimitAt(t2, 10)
   451  	runReserve(t, lim, request{t2, 1, t4, true}) // violates Limit and Burst
   452  }
   453  
   454  func TestReserveSetBurst(t *testing.T) {
   455  	lim := NewLimiter(5, 2)
   456  
   457  	runReserve(t, lim, request{t0, 2, t0, true})
   458  	runReserve(t, lim, request{t0, 2, t4, true})
   459  	lim.SetBurstAt(t3, 4)
   460  	runReserve(t, lim, request{t0, 4, t9, true}) // violates Limit and Burst
   461  }
   462  
   463  func TestReserveSetLimitCancel(t *testing.T) {
   464  	lim := NewLimiter(5, 2)
   465  
   466  	runReserve(t, lim, request{t0, 2, t0, true})
   467  	r := runReserve(t, lim, request{t0, 2, t4, true})
   468  	lim.SetLimitAt(t2, 10)
   469  	r.CancelAt(t2) // 2 tokens back
   470  	runReserve(t, lim, request{t2, 2, t3, true})
   471  }
   472  
   473  func TestReserveMax(t *testing.T) {
   474  	lim := NewLimiter(10, 2)
   475  	maxT := d
   476  
   477  	runReserveMax(t, lim, request{t0, 2, t0, true}, maxT)
   478  	runReserveMax(t, lim, request{t0, 1, t1, true}, maxT)  // reserve for close future
   479  	runReserveMax(t, lim, request{t0, 1, t2, false}, maxT) // time to act too far in the future
   480  }
   481  
   482  type wait struct {
   483  	name   string
   484  	ctx    context.Context
   485  	n      int
   486  	delay  int // in multiples of d
   487  	nilErr bool
   488  }
   489  
   490  func runWait(t *testing.T, tt *testTime, lim *Limiter, w wait) {
   491  	t.Helper()
   492  	start := tt.now()
   493  	err := lim.wait(w.ctx, w.n, start, tt.newTimer)
   494  	delay := tt.since(start)
   495  
   496  	if (w.nilErr && err != nil) || (!w.nilErr && err == nil) || !waitDelayOk(w.delay, delay) {
   497  		errString := "<nil>"
   498  		if !w.nilErr {
   499  			errString = "<non-nil error>"
   500  		}
   501  		t.Errorf("lim.WaitN(%v, lim, %v) = %v with delay %v; want %v with delay %v (±%v)",
   502  			w.name, w.n, err, delay, errString, d*time.Duration(w.delay), d/2)
   503  	}
   504  }
   505  
   506  // waitDelayOk reports whether a duration spent in WaitN is “close enough” to
   507  // wantD multiples of d, given scheduling slop.
   508  func waitDelayOk(wantD int, got time.Duration) bool {
   509  	gotD := dFromDuration(got)
   510  
   511  	// The actual time spent waiting will be REDUCED by the amount of time spent
   512  	// since the last call to the limiter. We expect the time in between calls to
   513  	// be executing simple, straight-line, non-blocking code, so it should reduce
   514  	// the wait time by no more than half a d, which would round to exactly wantD.
   515  	if gotD < wantD {
   516  		return false
   517  	}
   518  
   519  	// The actual time spend waiting will be INCREASED by the amount of scheduling
   520  	// slop in the platform's sleep syscall, plus the amount of time spent executing
   521  	// straight-line code before measuring the elapsed duration.
   522  	//
   523  	// The latter is surely less than half a d, but the former is empirically
   524  	// sometimes larger on a number of platforms for a number of reasons.
   525  	// NetBSD and OpenBSD tend to overshoot sleeps by a wide margin due to a
   526  	// suspected platform bug; see https://go.dev/issue/44067 and
   527  	// https://go.dev/issue/50189.
   528  	// Longer delays were also also observed on slower builders with Linux kernels
   529  	// (linux-ppc64le-buildlet, android-amd64-emu), and on Solaris and Plan 9.
   530  	//
   531  	// Since d is already fairly generous, we take 150% of wantD rounded up —
   532  	// that's at least enough to account for the overruns we've seen so far in
   533  	// practice.
   534  	maxD := (wantD*3 + 1) / 2
   535  	return gotD <= maxD
   536  }
   537  
   538  func TestWaitSimple(t *testing.T) {
   539  	tt := makeTestTime(t)
   540  
   541  	lim := NewLimiter(10, 3)
   542  
   543  	ctx, cancel := context.WithCancel(context.Background())
   544  	cancel()
   545  	runWait(t, tt, lim, wait{"already-cancelled", ctx, 1, 0, false})
   546  
   547  	runWait(t, tt, lim, wait{"exceed-burst-error", context.Background(), 4, 0, false})
   548  
   549  	runWait(t, tt, lim, wait{"act-now", context.Background(), 2, 0, true})
   550  	runWait(t, tt, lim, wait{"act-later", context.Background(), 3, 2, true})
   551  }
   552  
   553  func TestWaitCancel(t *testing.T) {
   554  	tt := makeTestTime(t)
   555  
   556  	lim := NewLimiter(10, 3)
   557  
   558  	ctx, cancel := context.WithCancel(context.Background())
   559  	runWait(t, tt, lim, wait{"act-now", ctx, 2, 0, true}) // after this lim.tokens = 1
   560  	ch, _, _ := tt.newTimer(d)
   561  	go func() {
   562  		<-ch
   563  		cancel()
   564  	}()
   565  	runWait(t, tt, lim, wait{"will-cancel", ctx, 3, 1, false})
   566  	// should get 3 tokens back, and have lim.tokens = 2
   567  	t.Logf("tokens:%v last:%v lastEvent:%v", lim.tokens, lim.last, lim.lastEvent)
   568  	runWait(t, tt, lim, wait{"act-now-after-cancel", context.Background(), 2, 0, true})
   569  }
   570  
   571  func TestWaitTimeout(t *testing.T) {
   572  	tt := makeTestTime(t)
   573  
   574  	lim := NewLimiter(10, 3)
   575  
   576  	ctx, cancel := context.WithTimeout(context.Background(), d)
   577  	defer cancel()
   578  	runWait(t, tt, lim, wait{"act-now", ctx, 2, 0, true})
   579  	runWait(t, tt, lim, wait{"w-timeout-err", ctx, 3, 0, false})
   580  }
   581  
   582  func TestWaitInf(t *testing.T) {
   583  	tt := makeTestTime(t)
   584  
   585  	lim := NewLimiter(Inf, 0)
   586  
   587  	runWait(t, tt, lim, wait{"exceed-burst-no-error", context.Background(), 3, 0, true})
   588  }
   589  
   590  func BenchmarkAllowN(b *testing.B) {
   591  	lim := NewLimiter(Every(1*time.Second), 1)
   592  	now := time.Now()
   593  	b.ReportAllocs()
   594  	b.ResetTimer()
   595  	b.RunParallel(func(pb *testing.PB) {
   596  		for pb.Next() {
   597  			lim.AllowN(now, 1)
   598  		}
   599  	})
   600  }
   601  
   602  func BenchmarkWaitNNoDelay(b *testing.B) {
   603  	lim := NewLimiter(Limit(b.N), b.N)
   604  	ctx := context.Background()
   605  	b.ReportAllocs()
   606  	b.ResetTimer()
   607  	for i := 0; i < b.N; i++ {
   608  		lim.WaitN(ctx, 1)
   609  	}
   610  }
   611  
   612  func TestZeroLimit(t *testing.T) {
   613  	r := NewLimiter(0, 1)
   614  	if !r.Allow() {
   615  		t.Errorf("Limit(0, 1) want true when first used")
   616  	}
   617  	if r.Allow() {
   618  		t.Errorf("Limit(0, 1) want false when already used")
   619  	}
   620  }
   621  

View as plain text