...

Source file src/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go

Documentation: k8s.io/kubernetes/pkg/controller/replicaset

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package replicaset
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"math/rand"
    24  	"net/http/httptest"
    25  	"net/url"
    26  	"reflect"
    27  	"sort"
    28  	"strings"
    29  	"sync"
    30  	"testing"
    31  	"time"
    32  
    33  	apps "k8s.io/api/apps/v1"
    34  	v1 "k8s.io/api/core/v1"
    35  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    36  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    37  	"k8s.io/apimachinery/pkg/runtime"
    38  	"k8s.io/apimachinery/pkg/runtime/schema"
    39  	"k8s.io/apimachinery/pkg/types"
    40  	"k8s.io/apimachinery/pkg/util/sets"
    41  	"k8s.io/apimachinery/pkg/util/uuid"
    42  	"k8s.io/apimachinery/pkg/util/wait"
    43  	"k8s.io/apimachinery/pkg/watch"
    44  	"k8s.io/client-go/informers"
    45  	clientset "k8s.io/client-go/kubernetes"
    46  	"k8s.io/client-go/kubernetes/fake"
    47  	restclient "k8s.io/client-go/rest"
    48  	core "k8s.io/client-go/testing"
    49  	"k8s.io/client-go/tools/cache"
    50  	utiltesting "k8s.io/client-go/util/testing"
    51  	"k8s.io/client-go/util/workqueue"
    52  	"k8s.io/klog/v2"
    53  	"k8s.io/kubernetes/pkg/controller"
    54  	. "k8s.io/kubernetes/pkg/controller/testutil"
    55  	"k8s.io/kubernetes/pkg/securitycontext"
    56  	"k8s.io/kubernetes/test/utils/ktesting"
    57  	"k8s.io/utils/pointer"
    58  )
    59  
    60  var (
    61  	informerSyncTimeout = 30 * time.Second
    62  )
    63  
    64  func testNewReplicaSetControllerFromClient(tb testing.TB, client clientset.Interface, stopCh chan struct{}, burstReplicas int) (*ReplicaSetController, informers.SharedInformerFactory) {
    65  	informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
    66  
    67  	tCtx := ktesting.Init(tb)
    68  	ret := NewReplicaSetController(
    69  		tCtx,
    70  		informers.Apps().V1().ReplicaSets(),
    71  		informers.Core().V1().Pods(),
    72  		client,
    73  		burstReplicas,
    74  	)
    75  
    76  	ret.podListerSynced = alwaysReady
    77  	ret.rsListerSynced = alwaysReady
    78  
    79  	return ret, informers
    80  }
    81  
    82  func skipListerFunc(verb string, url url.URL) bool {
    83  	if verb != "GET" {
    84  		return false
    85  	}
    86  	if strings.HasSuffix(url.Path, "/pods") || strings.Contains(url.Path, "/replicasets") {
    87  		return true
    88  	}
    89  	return false
    90  }
    91  
    92  var alwaysReady = func() bool { return true }
    93  
    94  func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet {
    95  	isController := true
    96  	rs := &apps.ReplicaSet{
    97  		TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ReplicaSet"},
    98  		ObjectMeta: metav1.ObjectMeta{
    99  			UID:       uuid.NewUUID(),
   100  			Name:      "foobar",
   101  			Namespace: metav1.NamespaceDefault,
   102  			OwnerReferences: []metav1.OwnerReference{
   103  				{UID: "123", Controller: &isController},
   104  			},
   105  			ResourceVersion: "18",
   106  		},
   107  		Spec: apps.ReplicaSetSpec{
   108  			Replicas: pointer.Int32(int32(replicas)),
   109  			Selector: &metav1.LabelSelector{MatchLabels: selectorMap},
   110  			Template: v1.PodTemplateSpec{
   111  				ObjectMeta: metav1.ObjectMeta{
   112  					Labels: map[string]string{
   113  						"name": "foo",
   114  						"type": "production",
   115  					},
   116  				},
   117  				Spec: v1.PodSpec{
   118  					Containers: []v1.Container{
   119  						{
   120  							Image:                  "foo/bar",
   121  							TerminationMessagePath: v1.TerminationMessagePathDefault,
   122  							ImagePullPolicy:        v1.PullIfNotPresent,
   123  							SecurityContext:        securitycontext.ValidSecurityContextWithContainerDefaults(),
   124  						},
   125  					},
   126  					RestartPolicy: v1.RestartPolicyAlways,
   127  					DNSPolicy:     v1.DNSDefault,
   128  					NodeSelector: map[string]string{
   129  						"baz": "blah",
   130  					},
   131  				},
   132  			},
   133  		},
   134  	}
   135  	return rs
   136  }
   137  
   138  // create a pod with the given phase for the given rs (same selectors and namespace)
   139  func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransitionTime *metav1.Time, properlyOwned bool) *v1.Pod {
   140  	var conditions []v1.PodCondition
   141  	if status == v1.PodRunning {
   142  		condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
   143  		if lastTransitionTime != nil {
   144  			condition.LastTransitionTime = *lastTransitionTime
   145  		}
   146  		conditions = append(conditions, condition)
   147  	}
   148  	var controllerReference metav1.OwnerReference
   149  	if properlyOwned {
   150  		var trueVar = true
   151  		controllerReference = metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
   152  	}
   153  	return &v1.Pod{
   154  		ObjectMeta: metav1.ObjectMeta{
   155  			UID:             uuid.NewUUID(),
   156  			Name:            name,
   157  			Namespace:       rs.Namespace,
   158  			Labels:          rs.Spec.Selector.MatchLabels,
   159  			OwnerReferences: []metav1.OwnerReference{controllerReference},
   160  		},
   161  		Status: v1.PodStatus{Phase: status, Conditions: conditions},
   162  	}
   163  }
   164  
   165  // create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
   166  func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *apps.ReplicaSet, name string) *v1.PodList {
   167  	pods := []v1.Pod{}
   168  	var trueVar = true
   169  	controllerReference := metav1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
   170  	for i := 0; i < count; i++ {
   171  		pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil, false)
   172  		pod.ObjectMeta.Labels = labelMap
   173  		pod.OwnerReferences = []metav1.OwnerReference{controllerReference}
   174  		if store != nil {
   175  			store.Add(pod)
   176  		}
   177  		pods = append(pods, *pod)
   178  	}
   179  	return &v1.PodList{
   180  		Items: pods,
   181  	}
   182  }
   183  
   184  // processSync initiates a sync via processNextWorkItem() to test behavior that
   185  // depends on both functions (such as re-queueing on sync error).
   186  func processSync(ctx context.Context, rsc *ReplicaSetController, key string) error {
   187  	// Save old syncHandler and replace with one that captures the error.
   188  	oldSyncHandler := rsc.syncHandler
   189  	defer func() {
   190  		rsc.syncHandler = oldSyncHandler
   191  	}()
   192  	var syncErr error
   193  	rsc.syncHandler = func(ctx context.Context, key string) error {
   194  		syncErr = oldSyncHandler(ctx, key)
   195  		return syncErr
   196  	}
   197  	rsc.queue.Add(key)
   198  	rsc.processNextWorkItem(ctx)
   199  	return syncErr
   200  }
   201  
   202  func validateSyncReplicaSet(fakePodControl *controller.FakePodControl, expectedCreates, expectedDeletes, expectedPatches int) error {
   203  	if e, a := expectedCreates, len(fakePodControl.Templates); e != a {
   204  		return fmt.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", e, a)
   205  	}
   206  
   207  	if e, a := expectedDeletes, len(fakePodControl.DeletePodName); e != a {
   208  		return fmt.Errorf("Unexpected number of deletes.  Expected %d, saw %d\n", e, a)
   209  	}
   210  
   211  	if e, a := expectedPatches, len(fakePodControl.Patches); e != a {
   212  		return fmt.Errorf("Unexpected number of patches.  Expected %d, saw %d\n", e, a)
   213  	}
   214  
   215  	return nil
   216  }
   217  
   218  func TestSyncReplicaSetDoesNothing(t *testing.T) {
   219  	_, ctx := ktesting.NewTestContext(t)
   220  	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
   221  	fakePodControl := controller.FakePodControl{}
   222  	stopCh := make(chan struct{})
   223  	defer close(stopCh)
   224  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   225  
   226  	// 2 running pods, a controller with 2 replicas, sync is a no-op
   227  	labelMap := map[string]string{"foo": "bar"}
   228  	rsSpec := newReplicaSet(2, labelMap)
   229  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
   230  	newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 2, v1.PodRunning, labelMap, rsSpec, "pod")
   231  
   232  	manager.podControl = &fakePodControl
   233  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   234  	err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
   235  	if err != nil {
   236  		t.Fatal(err)
   237  	}
   238  }
   239  
   240  func TestDeleteFinalStateUnknown(t *testing.T) {
   241  	logger, ctx := ktesting.NewTestContext(t)
   242  	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
   243  	fakePodControl := controller.FakePodControl{}
   244  	stopCh := make(chan struct{})
   245  	defer close(stopCh)
   246  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   247  	manager.podControl = &fakePodControl
   248  
   249  	received := make(chan string)
   250  	manager.syncHandler = func(ctx context.Context, key string) error {
   251  		received <- key
   252  		return nil
   253  	}
   254  
   255  	// The DeletedFinalStateUnknown object should cause the ReplicaSet manager to insert
   256  	// the controller matching the selectors of the deleted pod into the work queue.
   257  	labelMap := map[string]string{"foo": "bar"}
   258  	rsSpec := newReplicaSet(1, labelMap)
   259  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
   260  	pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
   261  	manager.deletePod(logger, cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
   262  
   263  	go manager.worker(ctx)
   264  
   265  	expected := GetKey(rsSpec, t)
   266  	select {
   267  	case key := <-received:
   268  		if key != expected {
   269  			t.Errorf("Unexpected sync all for ReplicaSet %v, expected %v", key, expected)
   270  		}
   271  	case <-time.After(wait.ForeverTestTimeout):
   272  		t.Errorf("Processing DeleteFinalStateUnknown took longer than expected")
   273  	}
   274  }
   275  
   276  // Tell the rs to create 100 replicas, but simulate a limit (like a quota limit)
   277  // of 10, and verify that the rs doesn't make 100 create calls per sync pass
   278  func TestSyncReplicaSetCreateFailures(t *testing.T) {
   279  	_, ctx := ktesting.NewTestContext(t)
   280  	fakePodControl := controller.FakePodControl{}
   281  	fakePodControl.CreateLimit = 10
   282  
   283  	labelMap := map[string]string{"foo": "bar"}
   284  	rs := newReplicaSet(fakePodControl.CreateLimit*10, labelMap)
   285  	client := fake.NewSimpleClientset(rs)
   286  	stopCh := make(chan struct{})
   287  	defer close(stopCh)
   288  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   289  
   290  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
   291  
   292  	manager.podControl = &fakePodControl
   293  	manager.syncReplicaSet(ctx, GetKey(rs, t))
   294  	err := validateSyncReplicaSet(&fakePodControl, fakePodControl.CreateLimit, 0, 0)
   295  	if err != nil {
   296  		t.Fatal(err)
   297  	}
   298  	expectedLimit := 0
   299  	for pass := uint8(0); expectedLimit <= fakePodControl.CreateLimit; pass++ {
   300  		expectedLimit += controller.SlowStartInitialBatchSize << pass
   301  	}
   302  	if fakePodControl.CreateCallCount > expectedLimit {
   303  		t.Errorf("Unexpected number of create calls.  Expected <= %d, saw %d\n", fakePodControl.CreateLimit*2, fakePodControl.CreateCallCount)
   304  	}
   305  }
   306  
   307  func TestSyncReplicaSetDormancy(t *testing.T) {
   308  	// Setup a test server so we can lie about the current state of pods
   309  	logger, ctx := ktesting.NewTestContext(t)
   310  	fakeHandler := utiltesting.FakeHandler{
   311  		StatusCode:    200,
   312  		ResponseBody:  "{}",
   313  		SkipRequestFn: skipListerFunc,
   314  		T:             t,
   315  	}
   316  	testServer := httptest.NewServer(&fakeHandler)
   317  	defer testServer.Close()
   318  	client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
   319  
   320  	fakePodControl := controller.FakePodControl{}
   321  	stopCh := make(chan struct{})
   322  	defer close(stopCh)
   323  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   324  
   325  	manager.podControl = &fakePodControl
   326  
   327  	labelMap := map[string]string{"foo": "bar"}
   328  	rsSpec := newReplicaSet(2, labelMap)
   329  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
   330  	newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rsSpec, "pod")
   331  
   332  	// Creates a replica and sets expectations
   333  	rsSpec.Status.Replicas = 1
   334  	rsSpec.Status.ReadyReplicas = 1
   335  	rsSpec.Status.AvailableReplicas = 1
   336  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   337  	err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
   338  	if err != nil {
   339  		t.Fatal(err)
   340  	}
   341  
   342  	// Expectations prevents replicas but not an update on status
   343  	rsSpec.Status.Replicas = 0
   344  	rsSpec.Status.ReadyReplicas = 0
   345  	rsSpec.Status.AvailableReplicas = 0
   346  	fakePodControl.Clear()
   347  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   348  	err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
   349  	if err != nil {
   350  		t.Fatal(err)
   351  	}
   352  
   353  	// Get the key for the controller
   354  	rsKey, err := controller.KeyFunc(rsSpec)
   355  	if err != nil {
   356  		t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err)
   357  	}
   358  
   359  	// Lowering expectations should lead to a sync that creates a replica, however the
   360  	// fakePodControl error will prevent this, leaving expectations at 0, 0
   361  	manager.expectations.CreationObserved(logger, rsKey)
   362  	rsSpec.Status.Replicas = 1
   363  	rsSpec.Status.ReadyReplicas = 1
   364  	rsSpec.Status.AvailableReplicas = 1
   365  	fakePodControl.Clear()
   366  	fakePodControl.Err = fmt.Errorf("fake Error")
   367  
   368  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   369  	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
   370  	if err != nil {
   371  		t.Fatal(err)
   372  	}
   373  
   374  	// This replica should not need a Lowering of expectations, since the previous create failed
   375  	fakePodControl.Clear()
   376  	fakePodControl.Err = nil
   377  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   378  	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
   379  	if err != nil {
   380  		t.Fatal(err)
   381  	}
   382  
   383  	// 2 PUT for the ReplicaSet status during dormancy window.
   384  	// Note that the pod creates go through pod control so they're not recorded.
   385  	fakeHandler.ValidateRequestCount(t, 2)
   386  }
   387  
   388  func TestGetReplicaSetsWithSameController(t *testing.T) {
   389  	someRS := newReplicaSet(1, map[string]string{"foo": "bar"})
   390  	someRS.Name = "rs1"
   391  	relatedRS := newReplicaSet(1, map[string]string{"foo": "baz"})
   392  	relatedRS.Name = "rs2"
   393  	unrelatedRS := newReplicaSet(1, map[string]string{"foo": "quux"})
   394  	unrelatedRS.Name = "rs3"
   395  	unrelatedRS.ObjectMeta.OwnerReferences[0].UID = "456"
   396  	pendingDeletionRS := newReplicaSet(1, map[string]string{"foo": "xyzzy"})
   397  	pendingDeletionRS.Name = "rs4"
   398  	pendingDeletionRS.ObjectMeta.OwnerReferences[0].UID = "789"
   399  	now := metav1.Now()
   400  	pendingDeletionRS.DeletionTimestamp = &now
   401  	logger, _ := ktesting.NewTestContext(t)
   402  
   403  	stopCh := make(chan struct{})
   404  	defer close(stopCh)
   405  	manager, informers := testNewReplicaSetControllerFromClient(t, clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
   406  	testCases := []struct {
   407  		name        string
   408  		rss         []*apps.ReplicaSet
   409  		rs          *apps.ReplicaSet
   410  		expectedRSs []*apps.ReplicaSet
   411  	}{
   412  		{
   413  			name:        "expect to get back a ReplicaSet that is pending deletion",
   414  			rss:         []*apps.ReplicaSet{pendingDeletionRS, unrelatedRS},
   415  			rs:          pendingDeletionRS,
   416  			expectedRSs: []*apps.ReplicaSet{pendingDeletionRS},
   417  		},
   418  		{
   419  			name:        "expect to get back only the given ReplicaSet if there is no related ReplicaSet",
   420  			rss:         []*apps.ReplicaSet{someRS, unrelatedRS},
   421  			rs:          someRS,
   422  			expectedRSs: []*apps.ReplicaSet{someRS},
   423  		},
   424  		{
   425  			name:        "expect to get back the given ReplicaSet as well as any related ReplicaSet but not an unrelated ReplicaSet",
   426  			rss:         []*apps.ReplicaSet{someRS, relatedRS, unrelatedRS},
   427  			rs:          someRS,
   428  			expectedRSs: []*apps.ReplicaSet{someRS, relatedRS},
   429  		},
   430  	}
   431  	for _, c := range testCases {
   432  		for _, r := range c.rss {
   433  			informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r)
   434  		}
   435  		actualRSs := manager.getReplicaSetsWithSameController(logger, c.rs)
   436  		var actualRSNames, expectedRSNames []string
   437  		for _, r := range actualRSs {
   438  			actualRSNames = append(actualRSNames, r.Name)
   439  		}
   440  		for _, r := range c.expectedRSs {
   441  			expectedRSNames = append(expectedRSNames, r.Name)
   442  		}
   443  		sort.Strings(actualRSNames)
   444  		sort.Strings(expectedRSNames)
   445  		if !reflect.DeepEqual(actualRSNames, expectedRSNames) {
   446  			t.Errorf("Got [%s]; expected [%s]", strings.Join(actualRSNames, ", "), strings.Join(expectedRSNames, ", "))
   447  		}
   448  	}
   449  }
   450  
   451  func BenchmarkGetReplicaSetsWithSameController(b *testing.B) {
   452  	stopCh := make(chan struct{})
   453  	defer close(stopCh)
   454  	controller, informers := testNewReplicaSetControllerFromClient(b, clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
   455  	logger, _ := ktesting.NewTestContext(b)
   456  
   457  	targetRS := newReplicaSet(1, map[string]string{"foo": "bar"})
   458  	targetRS.Name = "rs1"
   459  	targetRS.ObjectMeta.OwnerReferences[0].UID = "123456"
   460  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(targetRS)
   461  	relatedRS := newReplicaSet(1, map[string]string{"foo": "bar"})
   462  	relatedRS.Name = "rs2"
   463  	relatedRS.ObjectMeta.OwnerReferences[0].UID = "123456"
   464  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(relatedRS)
   465  	for i := 0; i < 100; i++ {
   466  		unrelatedRS := newReplicaSet(1, map[string]string{"foo": fmt.Sprintf("baz-%d", i)})
   467  		unrelatedRS.Name = fmt.Sprintf("rs-%d", i)
   468  		unrelatedRS.ObjectMeta.OwnerReferences[0].UID = types.UID(fmt.Sprintf("%d", i))
   469  		informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(unrelatedRS)
   470  	}
   471  
   472  	b.ReportAllocs()
   473  	b.ResetTimer()
   474  	for n := 0; n < b.N; n++ {
   475  		gotRSs := controller.getReplicaSetsWithSameController(logger, targetRS)
   476  		if len(gotRSs) != 2 {
   477  			b.Errorf("Incorrect ReplicaSets number, expected 2, got: %d", len(gotRSs))
   478  		}
   479  	}
   480  }
   481  
   482  func TestPodControllerLookup(t *testing.T) {
   483  	stopCh := make(chan struct{})
   484  	defer close(stopCh)
   485  	manager, informers := testNewReplicaSetControllerFromClient(t, clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
   486  	testCases := []struct {
   487  		inRSs     []*apps.ReplicaSet
   488  		pod       *v1.Pod
   489  		outRSName string
   490  	}{
   491  		// pods without labels don't match any ReplicaSets
   492  		{
   493  			inRSs: []*apps.ReplicaSet{
   494  				{ObjectMeta: metav1.ObjectMeta{Name: "basic"}}},
   495  			pod:       &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo1", Namespace: metav1.NamespaceAll}},
   496  			outRSName: "",
   497  		},
   498  		// Matching labels, not namespace
   499  		{
   500  			inRSs: []*apps.ReplicaSet{
   501  				{
   502  					ObjectMeta: metav1.ObjectMeta{Name: "foo"},
   503  					Spec: apps.ReplicaSetSpec{
   504  						Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
   505  					},
   506  				},
   507  			},
   508  			pod: &v1.Pod{
   509  				ObjectMeta: metav1.ObjectMeta{
   510  					Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
   511  			outRSName: "",
   512  		},
   513  		// Matching ns and labels returns the key to the ReplicaSet, not the ReplicaSet name
   514  		{
   515  			inRSs: []*apps.ReplicaSet{
   516  				{
   517  					ObjectMeta: metav1.ObjectMeta{Name: "bar", Namespace: "ns"},
   518  					Spec: apps.ReplicaSetSpec{
   519  						Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
   520  					},
   521  				},
   522  			},
   523  			pod: &v1.Pod{
   524  				ObjectMeta: metav1.ObjectMeta{
   525  					Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
   526  			outRSName: "bar",
   527  		},
   528  	}
   529  	for _, c := range testCases {
   530  		for _, r := range c.inRSs {
   531  			informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r)
   532  		}
   533  		if rss := manager.getPodReplicaSets(c.pod); rss != nil {
   534  			if len(rss) != 1 {
   535  				t.Errorf("len(rss) = %v, want %v", len(rss), 1)
   536  				continue
   537  			}
   538  			rs := rss[0]
   539  			if c.outRSName != rs.Name {
   540  				t.Errorf("Got replica set %+v expected %+v", rs.Name, c.outRSName)
   541  			}
   542  		} else if c.outRSName != "" {
   543  			t.Errorf("Expected a replica set %v pod %v, found none", c.outRSName, c.pod.Name)
   544  		}
   545  	}
   546  }
   547  
   548  func TestRelatedPodsLookup(t *testing.T) {
   549  	someRS := newReplicaSet(1, map[string]string{"foo": "bar"})
   550  	someRS.Name = "foo1"
   551  	relatedRS := newReplicaSet(1, map[string]string{"foo": "baz"})
   552  	relatedRS.Name = "foo2"
   553  	unrelatedRS := newReplicaSet(1, map[string]string{"foo": "quux"})
   554  	unrelatedRS.Name = "bar1"
   555  	unrelatedRS.ObjectMeta.OwnerReferences[0].UID = "456"
   556  	pendingDeletionRS := newReplicaSet(1, map[string]string{"foo": "xyzzy"})
   557  	pendingDeletionRS.Name = "foo3"
   558  	pendingDeletionRS.ObjectMeta.OwnerReferences[0].UID = "789"
   559  	now := metav1.Now()
   560  	pendingDeletionRS.DeletionTimestamp = &now
   561  	pod1 := newPod("pod1", someRS, v1.PodRunning, nil, true)
   562  	pod2 := newPod("pod2", someRS, v1.PodRunning, nil, true)
   563  	pod3 := newPod("pod3", relatedRS, v1.PodRunning, nil, true)
   564  	pod4 := newPod("pod4", unrelatedRS, v1.PodRunning, nil, true)
   565  	logger, _ := ktesting.NewTestContext(t)
   566  
   567  	stopCh := make(chan struct{})
   568  	defer close(stopCh)
   569  	manager, informers := testNewReplicaSetControllerFromClient(t, clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas)
   570  	testCases := []struct {
   571  		name             string
   572  		rss              []*apps.ReplicaSet
   573  		pods             []*v1.Pod
   574  		rs               *apps.ReplicaSet
   575  		expectedPodNames []string
   576  	}{
   577  		{
   578  			name:             "expect to get a pod even if its owning ReplicaSet is pending deletion",
   579  			rss:              []*apps.ReplicaSet{pendingDeletionRS, unrelatedRS},
   580  			rs:               pendingDeletionRS,
   581  			pods:             []*v1.Pod{newPod("pod", pendingDeletionRS, v1.PodRunning, nil, true)},
   582  			expectedPodNames: []string{"pod"},
   583  		},
   584  		{
   585  			name:             "expect to get only the ReplicaSet's own pods if there is no related ReplicaSet",
   586  			rss:              []*apps.ReplicaSet{someRS, unrelatedRS},
   587  			rs:               someRS,
   588  			pods:             []*v1.Pod{pod1, pod2, pod4},
   589  			expectedPodNames: []string{"pod1", "pod2"},
   590  		},
   591  		{
   592  			name:             "expect to get own pods as well as any related ReplicaSet's but not an unrelated ReplicaSet's",
   593  			rss:              []*apps.ReplicaSet{someRS, relatedRS, unrelatedRS},
   594  			rs:               someRS,
   595  			pods:             []*v1.Pod{pod1, pod2, pod3, pod4},
   596  			expectedPodNames: []string{"pod1", "pod2", "pod3"},
   597  		},
   598  	}
   599  	for _, c := range testCases {
   600  		for _, r := range c.rss {
   601  			informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r)
   602  		}
   603  		for _, pod := range c.pods {
   604  			informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
   605  			manager.addPod(logger, pod)
   606  		}
   607  		actualPods, err := manager.getIndirectlyRelatedPods(logger, c.rs)
   608  		if err != nil {
   609  			t.Errorf("Unexpected error from getIndirectlyRelatedPods: %v", err)
   610  		}
   611  		var actualPodNames []string
   612  		for _, pod := range actualPods {
   613  			actualPodNames = append(actualPodNames, pod.Name)
   614  		}
   615  		sort.Strings(actualPodNames)
   616  		sort.Strings(c.expectedPodNames)
   617  		if !reflect.DeepEqual(actualPodNames, c.expectedPodNames) {
   618  			t.Errorf("Got [%s]; expected [%s]", strings.Join(actualPodNames, ", "), strings.Join(c.expectedPodNames, ", "))
   619  		}
   620  	}
   621  }
   622  
   623  func TestWatchControllers(t *testing.T) {
   624  
   625  	fakeWatch := watch.NewFake()
   626  	client := fake.NewSimpleClientset()
   627  	client.PrependWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
   628  	stopCh := make(chan struct{})
   629  	defer close(stopCh)
   630  	informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
   631  	tCtx := ktesting.Init(t)
   632  	manager := NewReplicaSetController(
   633  		tCtx,
   634  		informers.Apps().V1().ReplicaSets(),
   635  		informers.Core().V1().Pods(),
   636  		client,
   637  		BurstReplicas,
   638  	)
   639  	informers.Start(stopCh)
   640  	informers.WaitForCacheSync(stopCh)
   641  
   642  	var testRSSpec apps.ReplicaSet
   643  	received := make(chan string)
   644  
   645  	// The update sent through the fakeWatcher should make its way into the workqueue,
   646  	// and eventually into the syncHandler. The handler validates the received controller
   647  	// and closes the received channel to indicate that the test can finish.
   648  	manager.syncHandler = func(ctx context.Context, key string) error {
   649  		obj, exists, err := informers.Apps().V1().ReplicaSets().Informer().GetIndexer().GetByKey(key)
   650  		if !exists || err != nil {
   651  			t.Errorf("Expected to find replica set under key %v", key)
   652  		}
   653  		rsSpec := *obj.(*apps.ReplicaSet)
   654  		if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
   655  			t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
   656  		}
   657  		close(received)
   658  		return nil
   659  	}
   660  	// Start only the ReplicaSet watcher and the workqueue, send a watch event,
   661  	// and make sure it hits the sync method.
   662  	go wait.UntilWithContext(tCtx, manager.worker, 10*time.Millisecond)
   663  
   664  	testRSSpec.Name = "foo"
   665  	fakeWatch.Add(&testRSSpec)
   666  
   667  	select {
   668  	case <-received:
   669  	case <-time.After(wait.ForeverTestTimeout):
   670  		t.Errorf("unexpected timeout from result channel")
   671  	}
   672  }
   673  
   674  func TestWatchPods(t *testing.T) {
   675  	_, ctx := ktesting.NewTestContext(t)
   676  	client := fake.NewSimpleClientset()
   677  
   678  	fakeWatch := watch.NewFake()
   679  	client.PrependWatchReactor("pods", core.DefaultWatchReactor(fakeWatch, nil))
   680  
   681  	stopCh := make(chan struct{})
   682  	defer close(stopCh)
   683  
   684  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   685  
   686  	// Put one ReplicaSet into the shared informer
   687  	labelMap := map[string]string{"foo": "bar"}
   688  	testRSSpec := newReplicaSet(1, labelMap)
   689  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec)
   690  
   691  	received := make(chan string)
   692  	// The pod update sent through the fakeWatcher should figure out the managing ReplicaSet and
   693  	// send it into the syncHandler.
   694  	manager.syncHandler = func(ctx context.Context, key string) error {
   695  		namespace, name, err := cache.SplitMetaNamespaceKey(key)
   696  		if err != nil {
   697  			t.Errorf("Error splitting key: %v", err)
   698  		}
   699  		rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name)
   700  		if err != nil {
   701  			t.Errorf("Expected to find replica set under key %v: %v", key, err)
   702  		}
   703  		if !apiequality.Semantic.DeepDerivative(rsSpec, testRSSpec) {
   704  			t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
   705  		}
   706  		close(received)
   707  		return nil
   708  	}
   709  
   710  	// Start only the pod watcher and the workqueue, send a watch event,
   711  	// and make sure it hits the sync method for the right ReplicaSet.
   712  	go informers.Core().V1().Pods().Informer().Run(stopCh)
   713  	go manager.Run(ctx, 1)
   714  
   715  	pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
   716  	testPod := pods.Items[0]
   717  	testPod.Status.Phase = v1.PodFailed
   718  	fakeWatch.Add(&testPod)
   719  
   720  	select {
   721  	case <-received:
   722  	case <-time.After(wait.ForeverTestTimeout):
   723  		t.Errorf("unexpected timeout from result channel")
   724  	}
   725  }
   726  
   727  func TestUpdatePods(t *testing.T) {
   728  	logger, ctx := ktesting.NewTestContext(t)
   729  	stopCh := make(chan struct{})
   730  	defer close(stopCh)
   731  	manager, informers := testNewReplicaSetControllerFromClient(t, fake.NewSimpleClientset(), stopCh, BurstReplicas)
   732  
   733  	received := make(chan string)
   734  
   735  	manager.syncHandler = func(ctx context.Context, key string) error {
   736  		namespace, name, err := cache.SplitMetaNamespaceKey(key)
   737  		if err != nil {
   738  			t.Errorf("Error splitting key: %v", err)
   739  		}
   740  		rsSpec, err := manager.rsLister.ReplicaSets(namespace).Get(name)
   741  		if err != nil {
   742  			t.Errorf("Expected to find replica set under key %v: %v", key, err)
   743  		}
   744  		received <- rsSpec.Name
   745  		return nil
   746  	}
   747  
   748  	go wait.UntilWithContext(ctx, manager.worker, 10*time.Millisecond)
   749  
   750  	// Put 2 ReplicaSets and one pod into the informers
   751  	labelMap1 := map[string]string{"foo": "bar"}
   752  	testRSSpec1 := newReplicaSet(1, labelMap1)
   753  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(testRSSpec1)
   754  	testRSSpec2 := *testRSSpec1
   755  	labelMap2 := map[string]string{"bar": "foo"}
   756  	testRSSpec2.Spec.Selector = &metav1.LabelSelector{MatchLabels: labelMap2}
   757  	testRSSpec2.Name = "barfoo"
   758  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&testRSSpec2)
   759  
   760  	isController := true
   761  	controllerRef1 := metav1.OwnerReference{UID: testRSSpec1.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: testRSSpec1.Name, Controller: &isController}
   762  	controllerRef2 := metav1.OwnerReference{UID: testRSSpec2.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: testRSSpec2.Name, Controller: &isController}
   763  
   764  	// case 1: Pod with a ControllerRef
   765  	pod1 := newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
   766  	pod1.OwnerReferences = []metav1.OwnerReference{controllerRef1}
   767  	pod1.ResourceVersion = "1"
   768  	pod2 := pod1
   769  	pod2.Labels = labelMap2
   770  	pod2.ResourceVersion = "2"
   771  	manager.updatePod(logger, &pod1, &pod2)
   772  	expected := sets.NewString(testRSSpec1.Name)
   773  	for _, name := range expected.List() {
   774  		t.Logf("Expecting update for %+v", name)
   775  		select {
   776  		case got := <-received:
   777  			if !expected.Has(got) {
   778  				t.Errorf("Expected keys %#v got %v", expected, got)
   779  			}
   780  		case <-time.After(wait.ForeverTestTimeout):
   781  			t.Errorf("Expected update notifications for replica sets")
   782  		}
   783  	}
   784  
   785  	// case 2: Remove ControllerRef (orphan). Expect to sync label-matching RS.
   786  	pod1 = newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
   787  	pod1.ResourceVersion = "1"
   788  	pod1.Labels = labelMap2
   789  	pod1.OwnerReferences = []metav1.OwnerReference{controllerRef2}
   790  	pod2 = pod1
   791  	pod2.OwnerReferences = nil
   792  	pod2.ResourceVersion = "2"
   793  	manager.updatePod(logger, &pod1, &pod2)
   794  	expected = sets.NewString(testRSSpec2.Name)
   795  	for _, name := range expected.List() {
   796  		t.Logf("Expecting update for %+v", name)
   797  		select {
   798  		case got := <-received:
   799  			if !expected.Has(got) {
   800  				t.Errorf("Expected keys %#v got %v", expected, got)
   801  			}
   802  		case <-time.After(wait.ForeverTestTimeout):
   803  			t.Errorf("Expected update notifications for replica sets")
   804  		}
   805  	}
   806  
   807  	// case 2: Remove ControllerRef (orphan). Expect to sync both former owner and
   808  	// any label-matching RS.
   809  	pod1 = newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
   810  	pod1.ResourceVersion = "1"
   811  	pod1.Labels = labelMap2
   812  	pod1.OwnerReferences = []metav1.OwnerReference{controllerRef1}
   813  	pod2 = pod1
   814  	pod2.OwnerReferences = nil
   815  	pod2.ResourceVersion = "2"
   816  	manager.updatePod(logger, &pod1, &pod2)
   817  	expected = sets.NewString(testRSSpec1.Name, testRSSpec2.Name)
   818  	for _, name := range expected.List() {
   819  		t.Logf("Expecting update for %+v", name)
   820  		select {
   821  		case got := <-received:
   822  			if !expected.Has(got) {
   823  				t.Errorf("Expected keys %#v got %v", expected, got)
   824  			}
   825  		case <-time.After(wait.ForeverTestTimeout):
   826  			t.Errorf("Expected update notifications for replica sets")
   827  		}
   828  	}
   829  
   830  	// case 4: Keep ControllerRef, change labels. Expect to sync owning RS.
   831  	pod1 = newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
   832  	pod1.ResourceVersion = "1"
   833  	pod1.Labels = labelMap1
   834  	pod1.OwnerReferences = []metav1.OwnerReference{controllerRef2}
   835  	pod2 = pod1
   836  	pod2.Labels = labelMap2
   837  	pod2.ResourceVersion = "2"
   838  	manager.updatePod(logger, &pod1, &pod2)
   839  	expected = sets.NewString(testRSSpec2.Name)
   840  	for _, name := range expected.List() {
   841  		t.Logf("Expecting update for %+v", name)
   842  		select {
   843  		case got := <-received:
   844  			if !expected.Has(got) {
   845  				t.Errorf("Expected keys %#v got %v", expected, got)
   846  			}
   847  		case <-time.After(wait.ForeverTestTimeout):
   848  			t.Errorf("Expected update notifications for replica sets")
   849  		}
   850  	}
   851  }
   852  
   853  func TestControllerUpdateRequeue(t *testing.T) {
   854  	_, ctx := ktesting.NewTestContext(t)
   855  	// This server should force a requeue of the controller because it fails to update status.Replicas.
   856  	labelMap := map[string]string{"foo": "bar"}
   857  	rs := newReplicaSet(1, labelMap)
   858  	client := fake.NewSimpleClientset(rs)
   859  	client.PrependReactor("update", "replicasets",
   860  		func(action core.Action) (bool, runtime.Object, error) {
   861  			if action.GetSubresource() != "status" {
   862  				return false, nil, nil
   863  			}
   864  			return true, nil, errors.New("failed to update status")
   865  		})
   866  	stopCh := make(chan struct{})
   867  	defer close(stopCh)
   868  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, BurstReplicas)
   869  
   870  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
   871  	rs.Status = apps.ReplicaSetStatus{Replicas: 2}
   872  	newPodList(informers.Core().V1().Pods().Informer().GetIndexer(), 1, v1.PodRunning, labelMap, rs, "pod")
   873  
   874  	fakePodControl := controller.FakePodControl{}
   875  	manager.podControl = &fakePodControl
   876  
   877  	// Enqueue once. Then process it. Disable rate-limiting for this.
   878  	manager.queue = workqueue.NewRateLimitingQueue(workqueue.NewMaxOfRateLimiter())
   879  	manager.enqueueRS(rs)
   880  	manager.processNextWorkItem(ctx)
   881  	// It should have been requeued.
   882  	if got, want := manager.queue.Len(), 1; got != want {
   883  		t.Errorf("queue.Len() = %v, want %v", got, want)
   884  	}
   885  }
   886  
   887  func TestControllerUpdateStatusWithFailure(t *testing.T) {
   888  	rs := newReplicaSet(1, map[string]string{"foo": "bar"})
   889  	fakeClient := &fake.Clientset{}
   890  	fakeClient.AddReactor("get", "replicasets", func(action core.Action) (bool, runtime.Object, error) { return true, rs, nil })
   891  	fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
   892  		return true, &apps.ReplicaSet{}, fmt.Errorf("fake error")
   893  	})
   894  	fakeRSClient := fakeClient.AppsV1().ReplicaSets("default")
   895  	numReplicas := int32(10)
   896  	newStatus := apps.ReplicaSetStatus{Replicas: numReplicas}
   897  	logger, _ := ktesting.NewTestContext(t)
   898  	updateReplicaSetStatus(logger, fakeRSClient, rs, newStatus)
   899  	updates, gets := 0, 0
   900  	for _, a := range fakeClient.Actions() {
   901  		if a.GetResource().Resource != "replicasets" {
   902  			t.Errorf("Unexpected action %+v", a)
   903  			continue
   904  		}
   905  
   906  		switch action := a.(type) {
   907  		case core.GetAction:
   908  			gets++
   909  			// Make sure the get is for the right ReplicaSet even though the update failed.
   910  			if action.GetName() != rs.Name {
   911  				t.Errorf("Expected get for ReplicaSet %v, got %+v instead", rs.Name, action.GetName())
   912  			}
   913  		case core.UpdateAction:
   914  			updates++
   915  			// Confirm that the update has the right status.Replicas even though the Get
   916  			// returned a ReplicaSet with replicas=1.
   917  			if c, ok := action.GetObject().(*apps.ReplicaSet); !ok {
   918  				t.Errorf("Expected a ReplicaSet as the argument to update, got %T", c)
   919  			} else if c.Status.Replicas != numReplicas {
   920  				t.Errorf("Expected update for ReplicaSet to contain replicas %v, got %v instead",
   921  					numReplicas, c.Status.Replicas)
   922  			}
   923  		default:
   924  			t.Errorf("Unexpected action %+v", a)
   925  			break
   926  		}
   927  	}
   928  	if gets != 1 || updates != 2 {
   929  		t.Errorf("Expected 1 get and 2 updates, got %d gets %d updates", gets, updates)
   930  	}
   931  }
   932  
   933  // TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
   934  func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
   935  	logger, ctx := ktesting.NewTestContext(t)
   936  	labelMap := map[string]string{"foo": "bar"}
   937  	rsSpec := newReplicaSet(numReplicas, labelMap)
   938  	client := fake.NewSimpleClientset(rsSpec)
   939  	fakePodControl := controller.FakePodControl{}
   940  	stopCh := make(chan struct{})
   941  	defer close(stopCh)
   942  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, burstReplicas)
   943  	manager.podControl = &fakePodControl
   944  
   945  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
   946  
   947  	expectedPods := int32(0)
   948  	pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
   949  
   950  	rsKey, err := controller.KeyFunc(rsSpec)
   951  	if err != nil {
   952  		t.Errorf("Couldn't get key for object %#v: %v", rsSpec, err)
   953  	}
   954  
   955  	// Size up the controller, then size it down, and confirm the expected create/delete pattern
   956  	for _, replicas := range []int32{int32(numReplicas), 0} {
   957  
   958  		*(rsSpec.Spec.Replicas) = replicas
   959  		informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
   960  
   961  		for i := 0; i < numReplicas; i += burstReplicas {
   962  			manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
   963  
   964  			// The store accrues active pods. It's also used by the ReplicaSet to determine how many
   965  			// replicas to create.
   966  			activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List()))
   967  			if replicas != 0 {
   968  				// This is the number of pods currently "in flight". They were created by the
   969  				// ReplicaSet controller above, which then puts the ReplicaSet to sleep till
   970  				// all of them have been observed.
   971  				expectedPods = replicas - activePods
   972  				if expectedPods > int32(burstReplicas) {
   973  					expectedPods = int32(burstReplicas)
   974  				}
   975  				// This validates the ReplicaSet manager sync actually created pods
   976  				err := validateSyncReplicaSet(&fakePodControl, int(expectedPods), 0, 0)
   977  				if err != nil {
   978  					t.Fatal(err)
   979  				}
   980  
   981  				// This simulates the watch events for all but 1 of the expected pods.
   982  				// None of these should wake the controller because it has expectations==BurstReplicas.
   983  				for i := int32(0); i < expectedPods-1; i++ {
   984  					informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[i])
   985  					manager.addPod(logger, &pods.Items[i])
   986  				}
   987  
   988  				podExp, exists, err := manager.expectations.GetExpectations(rsKey)
   989  				if !exists || err != nil {
   990  					t.Fatalf("Did not find expectations for rs.")
   991  				}
   992  				if add, _ := podExp.GetExpectations(); add != 1 {
   993  					t.Fatalf("Expectations are wrong %v", podExp)
   994  				}
   995  			} else {
   996  				expectedPods = (replicas - activePods) * -1
   997  				if expectedPods > int32(burstReplicas) {
   998  					expectedPods = int32(burstReplicas)
   999  				}
  1000  				err := validateSyncReplicaSet(&fakePodControl, 0, int(expectedPods), 0)
  1001  				if err != nil {
  1002  					t.Fatal(err)
  1003  				}
  1004  
  1005  				// To accurately simulate a watch we must delete the exact pods
  1006  				// the rs is waiting for.
  1007  				expectedDels := manager.expectations.GetUIDs(GetKey(rsSpec, t))
  1008  				podsToDelete := []*v1.Pod{}
  1009  				isController := true
  1010  				for _, key := range expectedDels.List() {
  1011  					nsName := strings.Split(key, "/")
  1012  					podsToDelete = append(podsToDelete, &v1.Pod{
  1013  						ObjectMeta: metav1.ObjectMeta{
  1014  							Name:      nsName[1],
  1015  							Namespace: nsName[0],
  1016  							Labels:    rsSpec.Spec.Selector.MatchLabels,
  1017  							OwnerReferences: []metav1.OwnerReference{
  1018  								{UID: rsSpec.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rsSpec.Name, Controller: &isController},
  1019  							},
  1020  						},
  1021  					})
  1022  				}
  1023  				// Don't delete all pods because we confirm that the last pod
  1024  				// has exactly one expectation at the end, to verify that we
  1025  				// don't double delete.
  1026  				for i := range podsToDelete[1:] {
  1027  					informers.Core().V1().Pods().Informer().GetIndexer().Delete(podsToDelete[i])
  1028  					manager.deletePod(logger, podsToDelete[i])
  1029  				}
  1030  				podExp, exists, err := manager.expectations.GetExpectations(rsKey)
  1031  				if !exists || err != nil {
  1032  					t.Fatalf("Did not find expectations for ReplicaSet.")
  1033  				}
  1034  				if _, del := podExp.GetExpectations(); del != 1 {
  1035  					t.Fatalf("Expectations are wrong %v", podExp)
  1036  				}
  1037  			}
  1038  
  1039  			// Check that the ReplicaSet didn't take any action for all the above pods
  1040  			fakePodControl.Clear()
  1041  			manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
  1042  			err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
  1043  			if err != nil {
  1044  				t.Fatal(err)
  1045  			}
  1046  
  1047  			// Create/Delete the last pod
  1048  			// The last add pod will decrease the expectation of the ReplicaSet to 0,
  1049  			// which will cause it to create/delete the remaining replicas up to burstReplicas.
  1050  			if replicas != 0 {
  1051  				informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[expectedPods-1])
  1052  				manager.addPod(logger, &pods.Items[expectedPods-1])
  1053  			} else {
  1054  				expectedDel := manager.expectations.GetUIDs(GetKey(rsSpec, t))
  1055  				if expectedDel.Len() != 1 {
  1056  					t.Fatalf("Waiting on unexpected number of deletes.")
  1057  				}
  1058  				nsName := strings.Split(expectedDel.List()[0], "/")
  1059  				isController := true
  1060  				lastPod := &v1.Pod{
  1061  					ObjectMeta: metav1.ObjectMeta{
  1062  						Name:      nsName[1],
  1063  						Namespace: nsName[0],
  1064  						Labels:    rsSpec.Spec.Selector.MatchLabels,
  1065  						OwnerReferences: []metav1.OwnerReference{
  1066  							{UID: rsSpec.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rsSpec.Name, Controller: &isController},
  1067  						},
  1068  					},
  1069  				}
  1070  				informers.Core().V1().Pods().Informer().GetIndexer().Delete(lastPod)
  1071  				manager.deletePod(logger, lastPod)
  1072  			}
  1073  			pods.Items = pods.Items[expectedPods:]
  1074  		}
  1075  
  1076  		// Confirm that we've created the right number of replicas
  1077  		activePods := int32(len(informers.Core().V1().Pods().Informer().GetIndexer().List()))
  1078  		if activePods != *(rsSpec.Spec.Replicas) {
  1079  			t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods)
  1080  		}
  1081  		// Replenish the pod list, since we cut it down sizing up
  1082  		pods = newPodList(nil, int(replicas), v1.PodRunning, labelMap, rsSpec, "pod")
  1083  	}
  1084  }
  1085  
  1086  func TestControllerBurstReplicas(t *testing.T) {
  1087  	doTestControllerBurstReplicas(t, 5, 30)
  1088  	doTestControllerBurstReplicas(t, 5, 12)
  1089  	doTestControllerBurstReplicas(t, 3, 2)
  1090  }
  1091  
  1092  type FakeRSExpectations struct {
  1093  	*controller.ControllerExpectations
  1094  	satisfied    bool
  1095  	expSatisfied func()
  1096  }
  1097  
  1098  func (fe FakeRSExpectations) SatisfiedExpectations(logger klog.Logger, controllerKey string) bool {
  1099  	fe.expSatisfied()
  1100  	return fe.satisfied
  1101  }
  1102  
  1103  // TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
  1104  // and checking expectations.
  1105  func TestRSSyncExpectations(t *testing.T) {
  1106  	_, ctx := ktesting.NewTestContext(t)
  1107  	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
  1108  	fakePodControl := controller.FakePodControl{}
  1109  	stopCh := make(chan struct{})
  1110  	defer close(stopCh)
  1111  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, 2)
  1112  	manager.podControl = &fakePodControl
  1113  
  1114  	labelMap := map[string]string{"foo": "bar"}
  1115  	rsSpec := newReplicaSet(2, labelMap)
  1116  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rsSpec)
  1117  	pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
  1118  	informers.Core().V1().Pods().Informer().GetIndexer().Add(&pods.Items[0])
  1119  	postExpectationsPod := pods.Items[1]
  1120  
  1121  	manager.expectations = controller.NewUIDTrackingControllerExpectations(FakeRSExpectations{
  1122  		controller.NewControllerExpectations(), true, func() {
  1123  			// If we check active pods before checking expectataions, the
  1124  			// ReplicaSet will create a new replica because it doesn't see
  1125  			// this pod, but has fulfilled its expectations.
  1126  			informers.Core().V1().Pods().Informer().GetIndexer().Add(&postExpectationsPod)
  1127  		},
  1128  	})
  1129  	manager.syncReplicaSet(ctx, GetKey(rsSpec, t))
  1130  	err := validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
  1131  	if err != nil {
  1132  		t.Fatal(err)
  1133  	}
  1134  }
  1135  
  1136  func TestDeleteControllerAndExpectations(t *testing.T) {
  1137  	logger, ctx := ktesting.NewTestContext(t)
  1138  	rs := newReplicaSet(1, map[string]string{"foo": "bar"})
  1139  	client := fake.NewSimpleClientset(rs)
  1140  	stopCh := make(chan struct{})
  1141  	defer close(stopCh)
  1142  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, 10)
  1143  
  1144  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
  1145  
  1146  	fakePodControl := controller.FakePodControl{}
  1147  	manager.podControl = &fakePodControl
  1148  
  1149  	// This should set expectations for the ReplicaSet
  1150  	manager.syncReplicaSet(ctx, GetKey(rs, t))
  1151  	err := validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
  1152  	if err != nil {
  1153  		t.Fatal(err)
  1154  	}
  1155  	fakePodControl.Clear()
  1156  
  1157  	// Get the ReplicaSet key
  1158  	rsKey, err := controller.KeyFunc(rs)
  1159  	if err != nil {
  1160  		t.Errorf("Couldn't get key for object %#v: %v", rs, err)
  1161  	}
  1162  
  1163  	// This is to simulate a concurrent addPod, that has a handle on the expectations
  1164  	// as the controller deletes it.
  1165  	podExp, exists, err := manager.expectations.GetExpectations(rsKey)
  1166  	if !exists || err != nil {
  1167  		t.Errorf("No expectations found for ReplicaSet")
  1168  	}
  1169  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Delete(rs)
  1170  	manager.deleteRS(logger, rs)
  1171  	manager.syncReplicaSet(ctx, GetKey(rs, t))
  1172  
  1173  	_, exists, err = manager.expectations.GetExpectations(rsKey)
  1174  	if err != nil {
  1175  		t.Errorf("Failed to get controllee expectations: %v", err)
  1176  	}
  1177  	if exists {
  1178  		t.Errorf("Found expectations, expected none since the ReplicaSet has been deleted.")
  1179  	}
  1180  
  1181  	// This should have no effect, since we've deleted the ReplicaSet.
  1182  	podExp.Add(-1, 0)
  1183  	informers.Core().V1().Pods().Informer().GetIndexer().Replace(make([]interface{}, 0), "0")
  1184  	manager.syncReplicaSet(ctx, GetKey(rs, t))
  1185  	err = validateSyncReplicaSet(&fakePodControl, 0, 0, 0)
  1186  	if err != nil {
  1187  		t.Fatal(err)
  1188  	}
  1189  }
  1190  
  1191  func TestExpectationsOnRecreate(t *testing.T) {
  1192  	client := fake.NewSimpleClientset()
  1193  	stopCh := make(chan struct{})
  1194  	defer close(stopCh)
  1195  
  1196  	f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
  1197  	tCtx := ktesting.Init(t)
  1198  	logger := tCtx.Logger()
  1199  	manager := NewReplicaSetController(
  1200  		tCtx,
  1201  		f.Apps().V1().ReplicaSets(),
  1202  		f.Core().V1().Pods(),
  1203  		client,
  1204  		100,
  1205  	)
  1206  	f.Start(stopCh)
  1207  	f.WaitForCacheSync(stopCh)
  1208  	fakePodControl := controller.FakePodControl{}
  1209  	manager.podControl = &fakePodControl
  1210  
  1211  	if manager.queue.Len() != 0 {
  1212  		t.Fatal("Unexpected item in the queue")
  1213  	}
  1214  
  1215  	oldRS := newReplicaSet(1, map[string]string{"foo": "bar"})
  1216  	oldRS, err := client.AppsV1().ReplicaSets(oldRS.Namespace).Create(tCtx, oldRS, metav1.CreateOptions{})
  1217  	if err != nil {
  1218  		t.Fatal(err)
  1219  	}
  1220  
  1221  	err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) {
  1222  		logger.V(8).Info("Waiting for queue to have 1 item", "length", manager.queue.Len())
  1223  		return manager.queue.Len() == 1, nil
  1224  	})
  1225  	if err != nil {
  1226  		t.Fatalf("initial RS didn't result in new item in the queue: %v", err)
  1227  	}
  1228  
  1229  	ok := manager.processNextWorkItem(tCtx)
  1230  	if !ok {
  1231  		t.Fatal("queue is shutting down")
  1232  	}
  1233  
  1234  	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
  1235  	if err != nil {
  1236  		t.Fatal(err)
  1237  	}
  1238  	fakePodControl.Clear()
  1239  
  1240  	oldRSKey, err := controller.KeyFunc(oldRS)
  1241  	if err != nil {
  1242  		t.Fatal(err)
  1243  	}
  1244  
  1245  	rsExp, exists, err := manager.expectations.GetExpectations(oldRSKey)
  1246  	if err != nil {
  1247  		t.Fatal(err)
  1248  	}
  1249  	if !exists {
  1250  		t.Errorf("No expectations found for ReplicaSet %q", oldRSKey)
  1251  	}
  1252  	if rsExp.Fulfilled() {
  1253  		t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", oldRSKey)
  1254  	}
  1255  
  1256  	if manager.queue.Len() != 0 {
  1257  		t.Fatal("Unexpected item in the queue")
  1258  	}
  1259  
  1260  	err = client.AppsV1().ReplicaSets(oldRS.Namespace).Delete(tCtx, oldRS.Name, metav1.DeleteOptions{})
  1261  	if err != nil {
  1262  		t.Fatal(err)
  1263  	}
  1264  
  1265  	err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) {
  1266  		logger.V(8).Info("Waiting for queue to have 1 item", "length", manager.queue.Len())
  1267  		return manager.queue.Len() == 1, nil
  1268  	})
  1269  	if err != nil {
  1270  		t.Fatalf("Deleting RS didn't result in new item in the queue: %v", err)
  1271  	}
  1272  
  1273  	_, exists, err = manager.expectations.GetExpectations(oldRSKey)
  1274  	if err != nil {
  1275  		t.Fatal(err)
  1276  	}
  1277  	if exists {
  1278  		t.Errorf("There should be no expectations for ReplicaSet %q after it was deleted", oldRSKey)
  1279  	}
  1280  
  1281  	// skip sync for the delete event so we only see the new RS in sync
  1282  	key, quit := manager.queue.Get()
  1283  	if quit {
  1284  		t.Fatal("Queue is shutting down!")
  1285  	}
  1286  	manager.queue.Done(key)
  1287  	if key != oldRSKey {
  1288  		t.Fatal("Keys should be equal!")
  1289  	}
  1290  
  1291  	if manager.queue.Len() != 0 {
  1292  		t.Fatal("Unexpected item in the queue")
  1293  	}
  1294  
  1295  	newRS := oldRS.DeepCopy()
  1296  	newRS.UID = uuid.NewUUID()
  1297  	newRS, err = client.AppsV1().ReplicaSets(newRS.Namespace).Create(tCtx, newRS, metav1.CreateOptions{})
  1298  	if err != nil {
  1299  		t.Fatal(err)
  1300  	}
  1301  
  1302  	// Sanity check
  1303  	if newRS.UID == oldRS.UID {
  1304  		t.Fatal("New RS has the same UID as the old one!")
  1305  	}
  1306  
  1307  	err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) {
  1308  		logger.V(8).Info("Waiting for queue to have 1 item", "length", manager.queue.Len())
  1309  		return manager.queue.Len() == 1, nil
  1310  	})
  1311  	if err != nil {
  1312  		t.Fatalf("Re-creating RS didn't result in new item in the queue: %v", err)
  1313  	}
  1314  
  1315  	ok = manager.processNextWorkItem(tCtx)
  1316  	if !ok {
  1317  		t.Fatal("Queue is shutting down!")
  1318  	}
  1319  
  1320  	newRSKey, err := controller.KeyFunc(newRS)
  1321  	if err != nil {
  1322  		t.Fatal(err)
  1323  	}
  1324  	rsExp, exists, err = manager.expectations.GetExpectations(newRSKey)
  1325  	if err != nil {
  1326  		t.Fatal(err)
  1327  	}
  1328  	if !exists {
  1329  		t.Errorf("No expectations found for ReplicaSet %q", newRSKey)
  1330  	}
  1331  	if rsExp.Fulfilled() {
  1332  		t.Errorf("There should be unfulfilled expectations for creating new pods for ReplicaSet %q", newRSKey)
  1333  	}
  1334  
  1335  	err = validateSyncReplicaSet(&fakePodControl, 1, 0, 0)
  1336  	if err != nil {
  1337  		t.Fatal(err)
  1338  	}
  1339  	fakePodControl.Clear()
  1340  }
  1341  
  1342  // shuffle returns a new shuffled list of container controllers.
  1343  func shuffle(controllers []*apps.ReplicaSet) []*apps.ReplicaSet {
  1344  	numControllers := len(controllers)
  1345  	randIndexes := rand.Perm(numControllers)
  1346  	shuffled := make([]*apps.ReplicaSet, numControllers)
  1347  	for i := 0; i < numControllers; i++ {
  1348  		shuffled[i] = controllers[randIndexes[i]]
  1349  	}
  1350  	return shuffled
  1351  }
  1352  
  1353  func TestOverlappingRSs(t *testing.T) {
  1354  	logger, _ := ktesting.NewTestContext(t)
  1355  	client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
  1356  	labelMap := map[string]string{"foo": "bar"}
  1357  
  1358  	stopCh := make(chan struct{})
  1359  	defer close(stopCh)
  1360  	manager, informers := testNewReplicaSetControllerFromClient(t, client, stopCh, 10)
  1361  
  1362  	// Create 10 ReplicaSets, shuffled them randomly and insert them into the
  1363  	// ReplicaSet controller's store.
  1364  	// All use the same CreationTimestamp since ControllerRef should be able
  1365  	// to handle that.
  1366  	timestamp := metav1.Date(2014, time.December, 0, 0, 0, 0, 0, time.Local)
  1367  	var controllers []*apps.ReplicaSet
  1368  	for j := 1; j < 10; j++ {
  1369  		rsSpec := newReplicaSet(1, labelMap)
  1370  		rsSpec.CreationTimestamp = timestamp
  1371  		rsSpec.Name = fmt.Sprintf("rs%d", j)
  1372  		controllers = append(controllers, rsSpec)
  1373  	}
  1374  	shuffledControllers := shuffle(controllers)
  1375  	for j := range shuffledControllers {
  1376  		informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(shuffledControllers[j])
  1377  	}
  1378  	// Add a pod with a ControllerRef and make sure only the corresponding
  1379  	// ReplicaSet is synced. Pick a RS in the middle since the old code used to
  1380  	// sort by name if all timestamps were equal.
  1381  	rs := controllers[3]
  1382  	pods := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod")
  1383  	pod := &pods.Items[0]
  1384  	isController := true
  1385  	pod.OwnerReferences = []metav1.OwnerReference{
  1386  		{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
  1387  	}
  1388  	rsKey := GetKey(rs, t)
  1389  
  1390  	manager.addPod(logger, pod)
  1391  	queueRS, _ := manager.queue.Get()
  1392  	if queueRS != rsKey {
  1393  		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
  1394  	}
  1395  }
  1396  
  1397  func TestDeletionTimestamp(t *testing.T) {
  1398  	logger, _ := ktesting.NewTestContext(t)
  1399  	c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
  1400  	labelMap := map[string]string{"foo": "bar"}
  1401  	stopCh := make(chan struct{})
  1402  	defer close(stopCh)
  1403  	manager, informers := testNewReplicaSetControllerFromClient(t, c, stopCh, 10)
  1404  
  1405  	rs := newReplicaSet(1, labelMap)
  1406  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
  1407  	rsKey, err := controller.KeyFunc(rs)
  1408  	if err != nil {
  1409  		t.Errorf("Couldn't get key for object %#v: %v", rs, err)
  1410  	}
  1411  	pod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
  1412  	pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
  1413  	pod.ResourceVersion = "1"
  1414  	manager.expectations.ExpectDeletions(logger, rsKey, []string{controller.PodKey(&pod)})
  1415  
  1416  	// A pod added with a deletion timestamp should decrement deletions, not creations.
  1417  	manager.addPod(logger, &pod)
  1418  
  1419  	queueRS, _ := manager.queue.Get()
  1420  	if queueRS != rsKey {
  1421  		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
  1422  	}
  1423  	manager.queue.Done(rsKey)
  1424  
  1425  	podExp, exists, err := manager.expectations.GetExpectations(rsKey)
  1426  	if !exists || err != nil || !podExp.Fulfilled() {
  1427  		t.Fatalf("Wrong expectations %#v", podExp)
  1428  	}
  1429  
  1430  	// An update from no deletion timestamp to having one should be treated
  1431  	// as a deletion.
  1432  	oldPod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
  1433  	oldPod.ResourceVersion = "2"
  1434  	manager.expectations.ExpectDeletions(logger, rsKey, []string{controller.PodKey(&pod)})
  1435  	manager.updatePod(logger, &oldPod, &pod)
  1436  
  1437  	queueRS, _ = manager.queue.Get()
  1438  	if queueRS != rsKey {
  1439  		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
  1440  	}
  1441  	manager.queue.Done(rsKey)
  1442  
  1443  	podExp, exists, err = manager.expectations.GetExpectations(rsKey)
  1444  	if !exists || err != nil || !podExp.Fulfilled() {
  1445  		t.Fatalf("Wrong expectations %#v", podExp)
  1446  	}
  1447  
  1448  	// An update to the pod (including an update to the deletion timestamp)
  1449  	// should not be counted as a second delete.
  1450  	isController := true
  1451  	secondPod := &v1.Pod{
  1452  		ObjectMeta: metav1.ObjectMeta{
  1453  			Namespace: pod.Namespace,
  1454  			Name:      "secondPod",
  1455  			Labels:    pod.Labels,
  1456  			OwnerReferences: []metav1.OwnerReference{
  1457  				{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name, Controller: &isController},
  1458  			},
  1459  		},
  1460  	}
  1461  	manager.expectations.ExpectDeletions(logger, rsKey, []string{controller.PodKey(secondPod)})
  1462  	oldPod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
  1463  	oldPod.ResourceVersion = "2"
  1464  	manager.updatePod(logger, &oldPod, &pod)
  1465  
  1466  	podExp, exists, err = manager.expectations.GetExpectations(rsKey)
  1467  	if !exists || err != nil || podExp.Fulfilled() {
  1468  		t.Fatalf("Wrong expectations %#v", podExp)
  1469  	}
  1470  
  1471  	// A pod with a non-nil deletion timestamp should also be ignored by the
  1472  	// delete handler, because it's already been counted in the update.
  1473  	manager.deletePod(logger, &pod)
  1474  	podExp, exists, err = manager.expectations.GetExpectations(rsKey)
  1475  	if !exists || err != nil || podExp.Fulfilled() {
  1476  		t.Fatalf("Wrong expectations %#v", podExp)
  1477  	}
  1478  
  1479  	// Deleting the second pod should clear expectations.
  1480  	manager.deletePod(logger, secondPod)
  1481  
  1482  	queueRS, _ = manager.queue.Get()
  1483  	if queueRS != rsKey {
  1484  		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
  1485  	}
  1486  	manager.queue.Done(rsKey)
  1487  
  1488  	podExp, exists, err = manager.expectations.GetExpectations(rsKey)
  1489  	if !exists || err != nil || !podExp.Fulfilled() {
  1490  		t.Fatalf("Wrong expectations %#v", podExp)
  1491  	}
  1492  }
  1493  
  1494  // setupManagerWithGCEnabled creates a RS manager with a fakePodControl
  1495  func setupManagerWithGCEnabled(t *testing.T, stopCh chan struct{}, objs ...runtime.Object) (manager *ReplicaSetController, fakePodControl *controller.FakePodControl, informers informers.SharedInformerFactory) {
  1496  	c := fake.NewSimpleClientset(objs...)
  1497  	fakePodControl = &controller.FakePodControl{}
  1498  	manager, informers = testNewReplicaSetControllerFromClient(t, c, stopCh, BurstReplicas)
  1499  
  1500  	manager.podControl = fakePodControl
  1501  	return manager, fakePodControl, informers
  1502  }
  1503  
  1504  func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
  1505  	_, ctx := ktesting.NewTestContext(t)
  1506  	labelMap := map[string]string{"foo": "bar"}
  1507  	rs := newReplicaSet(2, labelMap)
  1508  	stopCh := make(chan struct{})
  1509  	defer close(stopCh)
  1510  	manager, fakePodControl, informers := setupManagerWithGCEnabled(t, stopCh, rs)
  1511  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
  1512  	var trueVar = true
  1513  	otherControllerReference := metav1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
  1514  	// add to podLister a matching Pod controlled by another controller. Expect no patch.
  1515  	pod := newPod("pod", rs, v1.PodRunning, nil, true)
  1516  	pod.OwnerReferences = []metav1.OwnerReference{otherControllerReference}
  1517  	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod)
  1518  	err := manager.syncReplicaSet(ctx, GetKey(rs, t))
  1519  	if err != nil {
  1520  		t.Fatal(err)
  1521  	}
  1522  	// because the matching pod already has a controller, so 2 pods should be created.
  1523  	err = validateSyncReplicaSet(fakePodControl, 2, 0, 0)
  1524  	if err != nil {
  1525  		t.Fatal(err)
  1526  	}
  1527  }
  1528  
  1529  func TestPatchPodFails(t *testing.T) {
  1530  	_, ctx := ktesting.NewTestContext(t)
  1531  	labelMap := map[string]string{"foo": "bar"}
  1532  	rs := newReplicaSet(2, labelMap)
  1533  	stopCh := make(chan struct{})
  1534  	defer close(stopCh)
  1535  	manager, fakePodControl, informers := setupManagerWithGCEnabled(t, stopCh, rs)
  1536  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
  1537  	// add to podLister two matching pods. Expect two patches to take control
  1538  	// them.
  1539  	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod1", rs, v1.PodRunning, nil, false))
  1540  	informers.Core().V1().Pods().Informer().GetIndexer().Add(newPod("pod2", rs, v1.PodRunning, nil, false))
  1541  	// let both patches fail. The rs controller will assume it fails to take
  1542  	// control of the pods and requeue to try again.
  1543  	fakePodControl.Err = fmt.Errorf("fake Error")
  1544  	rsKey := GetKey(rs, t)
  1545  	err := processSync(ctx, manager, rsKey)
  1546  	if err == nil || !strings.Contains(err.Error(), "fake Error") {
  1547  		t.Errorf("expected fake Error, got %+v", err)
  1548  	}
  1549  	// 2 patches to take control of pod1 and pod2 (both fail).
  1550  	err = validateSyncReplicaSet(fakePodControl, 0, 0, 2)
  1551  	if err != nil {
  1552  		t.Fatal(err)
  1553  	}
  1554  	// RS should requeue itself.
  1555  	queueRS, _ := manager.queue.Get()
  1556  	if queueRS != rsKey {
  1557  		t.Fatalf("Expected to find key %v in queue, found %v", rsKey, queueRS)
  1558  	}
  1559  }
  1560  
  1561  // RS controller shouldn't adopt or create more pods if the rc is about to be
  1562  // deleted.
  1563  func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
  1564  	_, ctx := ktesting.NewTestContext(t)
  1565  	labelMap := map[string]string{"foo": "bar"}
  1566  	rs := newReplicaSet(2, labelMap)
  1567  	now := metav1.Now()
  1568  	rs.DeletionTimestamp = &now
  1569  	stopCh := make(chan struct{})
  1570  	defer close(stopCh)
  1571  	manager, fakePodControl, informers := setupManagerWithGCEnabled(t, stopCh, rs)
  1572  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(rs)
  1573  	pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
  1574  	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
  1575  
  1576  	// no patch, no create
  1577  	err := manager.syncReplicaSet(ctx, GetKey(rs, t))
  1578  	if err != nil {
  1579  		t.Fatal(err)
  1580  	}
  1581  	err = validateSyncReplicaSet(fakePodControl, 0, 0, 0)
  1582  	if err != nil {
  1583  		t.Fatal(err)
  1584  	}
  1585  }
  1586  
  1587  func TestDoNotAdoptOrCreateIfBeingDeletedRace(t *testing.T) {
  1588  	_, ctx := ktesting.NewTestContext(t)
  1589  	labelMap := map[string]string{"foo": "bar"}
  1590  	// Bare client says it IS deleted.
  1591  	rs := newReplicaSet(2, labelMap)
  1592  	now := metav1.Now()
  1593  	rs.DeletionTimestamp = &now
  1594  	stopCh := make(chan struct{})
  1595  	defer close(stopCh)
  1596  	manager, fakePodControl, informers := setupManagerWithGCEnabled(t, stopCh, rs)
  1597  	// Lister (cache) says it's NOT deleted.
  1598  	rs2 := *rs
  1599  	rs2.DeletionTimestamp = nil
  1600  	informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(&rs2)
  1601  
  1602  	// Recheck occurs if a matching orphan is present.
  1603  	pod1 := newPod("pod1", rs, v1.PodRunning, nil, false)
  1604  	informers.Core().V1().Pods().Informer().GetIndexer().Add(pod1)
  1605  
  1606  	// sync should abort.
  1607  	err := manager.syncReplicaSet(ctx, GetKey(rs, t))
  1608  	if err == nil {
  1609  		t.Error("syncReplicaSet() err = nil, expected non-nil")
  1610  	}
  1611  	// no patch, no create.
  1612  	err = validateSyncReplicaSet(fakePodControl, 0, 0, 0)
  1613  	if err != nil {
  1614  		t.Fatal(err)
  1615  	}
  1616  }
  1617  
  1618  var (
  1619  	imagePullBackOff apps.ReplicaSetConditionType = "ImagePullBackOff"
  1620  
  1621  	condImagePullBackOff = func() apps.ReplicaSetCondition {
  1622  		return apps.ReplicaSetCondition{
  1623  			Type:   imagePullBackOff,
  1624  			Status: v1.ConditionTrue,
  1625  			Reason: "NonExistentImage",
  1626  		}
  1627  	}
  1628  
  1629  	condReplicaFailure = func() apps.ReplicaSetCondition {
  1630  		return apps.ReplicaSetCondition{
  1631  			Type:   apps.ReplicaSetReplicaFailure,
  1632  			Status: v1.ConditionTrue,
  1633  			Reason: "OtherFailure",
  1634  		}
  1635  	}
  1636  
  1637  	condReplicaFailure2 = func() apps.ReplicaSetCondition {
  1638  		return apps.ReplicaSetCondition{
  1639  			Type:   apps.ReplicaSetReplicaFailure,
  1640  			Status: v1.ConditionTrue,
  1641  			Reason: "AnotherFailure",
  1642  		}
  1643  	}
  1644  
  1645  	status = func() *apps.ReplicaSetStatus {
  1646  		return &apps.ReplicaSetStatus{
  1647  			Conditions: []apps.ReplicaSetCondition{condReplicaFailure()},
  1648  		}
  1649  	}
  1650  )
  1651  
  1652  func TestGetCondition(t *testing.T) {
  1653  	exampleStatus := status()
  1654  
  1655  	tests := []struct {
  1656  		name string
  1657  
  1658  		status   apps.ReplicaSetStatus
  1659  		condType apps.ReplicaSetConditionType
  1660  
  1661  		expected bool
  1662  	}{
  1663  		{
  1664  			name: "condition exists",
  1665  
  1666  			status:   *exampleStatus,
  1667  			condType: apps.ReplicaSetReplicaFailure,
  1668  
  1669  			expected: true,
  1670  		},
  1671  		{
  1672  			name: "condition does not exist",
  1673  
  1674  			status:   *exampleStatus,
  1675  			condType: imagePullBackOff,
  1676  
  1677  			expected: false,
  1678  		},
  1679  	}
  1680  
  1681  	for _, test := range tests {
  1682  		cond := GetCondition(test.status, test.condType)
  1683  		exists := cond != nil
  1684  		if exists != test.expected {
  1685  			t.Errorf("%s: expected condition to exist: %t, got: %t", test.name, test.expected, exists)
  1686  		}
  1687  	}
  1688  }
  1689  
  1690  func TestSetCondition(t *testing.T) {
  1691  	tests := []struct {
  1692  		name string
  1693  
  1694  		status *apps.ReplicaSetStatus
  1695  		cond   apps.ReplicaSetCondition
  1696  
  1697  		expectedStatus *apps.ReplicaSetStatus
  1698  	}{
  1699  		{
  1700  			name: "set for the first time",
  1701  
  1702  			status: &apps.ReplicaSetStatus{},
  1703  			cond:   condReplicaFailure(),
  1704  
  1705  			expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
  1706  		},
  1707  		{
  1708  			name: "simple set",
  1709  
  1710  			status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff()}},
  1711  			cond:   condReplicaFailure(),
  1712  
  1713  			expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condImagePullBackOff(), condReplicaFailure()}},
  1714  		},
  1715  		{
  1716  			name: "overwrite",
  1717  
  1718  			status: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
  1719  			cond:   condReplicaFailure2(),
  1720  
  1721  			expectedStatus: &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure2()}},
  1722  		},
  1723  	}
  1724  
  1725  	for _, test := range tests {
  1726  		SetCondition(test.status, test.cond)
  1727  		if !reflect.DeepEqual(test.status, test.expectedStatus) {
  1728  			t.Errorf("%s: expected status: %v, got: %v", test.name, test.expectedStatus, test.status)
  1729  		}
  1730  	}
  1731  }
  1732  
  1733  func TestRemoveCondition(t *testing.T) {
  1734  	tests := []struct {
  1735  		name string
  1736  
  1737  		status   *apps.ReplicaSetStatus
  1738  		condType apps.ReplicaSetConditionType
  1739  
  1740  		expectedStatus *apps.ReplicaSetStatus
  1741  	}{
  1742  		{
  1743  			name: "remove from empty status",
  1744  
  1745  			status:   &apps.ReplicaSetStatus{},
  1746  			condType: apps.ReplicaSetReplicaFailure,
  1747  
  1748  			expectedStatus: &apps.ReplicaSetStatus{},
  1749  		},
  1750  		{
  1751  			name: "simple remove",
  1752  
  1753  			status:   &apps.ReplicaSetStatus{Conditions: []apps.ReplicaSetCondition{condReplicaFailure()}},
  1754  			condType: apps.ReplicaSetReplicaFailure,
  1755  
  1756  			expectedStatus: &apps.ReplicaSetStatus{},
  1757  		},
  1758  		{
  1759  			name: "doesn't remove anything",
  1760  
  1761  			status:   status(),
  1762  			condType: imagePullBackOff,
  1763  
  1764  			expectedStatus: status(),
  1765  		},
  1766  	}
  1767  
  1768  	for _, test := range tests {
  1769  		RemoveCondition(test.status, test.condType)
  1770  		if !reflect.DeepEqual(test.status, test.expectedStatus) {
  1771  			t.Errorf("%s: expected status: %v, got: %v", test.name, test.expectedStatus, test.status)
  1772  		}
  1773  	}
  1774  }
  1775  
  1776  func TestSlowStartBatch(t *testing.T) {
  1777  	fakeErr := fmt.Errorf("fake error")
  1778  	callCnt := 0
  1779  	callLimit := 0
  1780  	var lock sync.Mutex
  1781  	fn := func() error {
  1782  		lock.Lock()
  1783  		defer lock.Unlock()
  1784  		callCnt++
  1785  		if callCnt > callLimit {
  1786  			return fakeErr
  1787  		}
  1788  		return nil
  1789  	}
  1790  
  1791  	tests := []struct {
  1792  		name              string
  1793  		count             int
  1794  		callLimit         int
  1795  		fn                func() error
  1796  		expectedSuccesses int
  1797  		expectedErr       error
  1798  		expectedCallCnt   int
  1799  	}{
  1800  		{
  1801  			name:              "callLimit = 0 (all fail)",
  1802  			count:             10,
  1803  			callLimit:         0,
  1804  			fn:                fn,
  1805  			expectedSuccesses: 0,
  1806  			expectedErr:       fakeErr,
  1807  			expectedCallCnt:   1, // 1(first batch): function will be called at least once
  1808  		},
  1809  		{
  1810  			name:              "callLimit = count (all succeed)",
  1811  			count:             10,
  1812  			callLimit:         10,
  1813  			fn:                fn,
  1814  			expectedSuccesses: 10,
  1815  			expectedErr:       nil,
  1816  			expectedCallCnt:   10, // 1(first batch) + 2(2nd batch) + 4(3rd batch) + 3(4th batch) = 10
  1817  		},
  1818  		{
  1819  			name:              "callLimit < count (some succeed)",
  1820  			count:             10,
  1821  			callLimit:         5,
  1822  			fn:                fn,
  1823  			expectedSuccesses: 5,
  1824  			expectedErr:       fakeErr,
  1825  			expectedCallCnt:   7, // 1(first batch) + 2(2nd batch) + 4(3rd batch) = 7
  1826  		},
  1827  	}
  1828  
  1829  	for _, test := range tests {
  1830  		callCnt = 0
  1831  		callLimit = test.callLimit
  1832  		successes, err := slowStartBatch(test.count, 1, test.fn)
  1833  		if successes != test.expectedSuccesses {
  1834  			t.Errorf("%s: unexpected processed batch size, expected %d, got %d", test.name, test.expectedSuccesses, successes)
  1835  		}
  1836  		if err != test.expectedErr {
  1837  			t.Errorf("%s: unexpected processed batch size, expected %v, got %v", test.name, test.expectedErr, err)
  1838  		}
  1839  		// verify that slowStartBatch stops trying more calls after a batch fails
  1840  		if callCnt != test.expectedCallCnt {
  1841  			t.Errorf("%s: slowStartBatch() still tries calls after a batch fails, expected %d calls, got %d", test.name, test.expectedCallCnt, callCnt)
  1842  		}
  1843  	}
  1844  }
  1845  
  1846  func TestGetPodsToDelete(t *testing.T) {
  1847  	labelMap := map[string]string{"name": "foo"}
  1848  	rs := newReplicaSet(1, labelMap)
  1849  	// an unscheduled, pending pod
  1850  	unscheduledPendingPod := newPod("unscheduled-pending-pod", rs, v1.PodPending, nil, true)
  1851  	// a scheduled, pending pod
  1852  	scheduledPendingPod := newPod("scheduled-pending-pod", rs, v1.PodPending, nil, true)
  1853  	scheduledPendingPod.Spec.NodeName = "fake-node"
  1854  	// a scheduled, running, not-ready pod
  1855  	scheduledRunningNotReadyPod := newPod("scheduled-running-not-ready-pod", rs, v1.PodRunning, nil, true)
  1856  	scheduledRunningNotReadyPod.Spec.NodeName = "fake-node"
  1857  	scheduledRunningNotReadyPod.Status.Conditions = []v1.PodCondition{
  1858  		{
  1859  			Type:   v1.PodReady,
  1860  			Status: v1.ConditionFalse,
  1861  		},
  1862  	}
  1863  	// a scheduled, running, ready pod on fake-node-1
  1864  	scheduledRunningReadyPodOnNode1 := newPod("scheduled-running-ready-pod-on-node-1", rs, v1.PodRunning, nil, true)
  1865  	scheduledRunningReadyPodOnNode1.Spec.NodeName = "fake-node-1"
  1866  	scheduledRunningReadyPodOnNode1.Status.Conditions = []v1.PodCondition{
  1867  		{
  1868  			Type:   v1.PodReady,
  1869  			Status: v1.ConditionTrue,
  1870  		},
  1871  	}
  1872  	// a scheduled, running, ready pod on fake-node-2
  1873  	scheduledRunningReadyPodOnNode2 := newPod("scheduled-running-ready-pod-on-node-2", rs, v1.PodRunning, nil, true)
  1874  	scheduledRunningReadyPodOnNode2.Spec.NodeName = "fake-node-2"
  1875  	scheduledRunningReadyPodOnNode2.Status.Conditions = []v1.PodCondition{
  1876  		{
  1877  			Type:   v1.PodReady,
  1878  			Status: v1.ConditionTrue,
  1879  		},
  1880  	}
  1881  
  1882  	tests := []struct {
  1883  		name string
  1884  		pods []*v1.Pod
  1885  		// related defaults to pods if nil.
  1886  		related              []*v1.Pod
  1887  		diff                 int
  1888  		expectedPodsToDelete []*v1.Pod
  1889  	}{
  1890  		// Order used when selecting pods for deletion:
  1891  		// an unscheduled, pending pod
  1892  		// a scheduled, pending pod
  1893  		// a scheduled, running, not-ready pod
  1894  		// a scheduled, running, ready pod on same node as a related pod
  1895  		// a scheduled, running, ready pod not on node with related pods
  1896  		// Note that a pending pod cannot be ready
  1897  		{
  1898  			name:                 "len(pods) = 0 (i.e., diff = 0 too)",
  1899  			pods:                 []*v1.Pod{},
  1900  			diff:                 0,
  1901  			expectedPodsToDelete: []*v1.Pod{},
  1902  		},
  1903  		{
  1904  			name: "diff = len(pods)",
  1905  			pods: []*v1.Pod{
  1906  				scheduledRunningNotReadyPod,
  1907  				scheduledRunningReadyPodOnNode1,
  1908  			},
  1909  			diff:                 2,
  1910  			expectedPodsToDelete: []*v1.Pod{scheduledRunningNotReadyPod, scheduledRunningReadyPodOnNode1},
  1911  		},
  1912  		{
  1913  			name: "diff < len(pods)",
  1914  			pods: []*v1.Pod{
  1915  				scheduledRunningReadyPodOnNode1,
  1916  				scheduledRunningNotReadyPod,
  1917  			},
  1918  			diff:                 1,
  1919  			expectedPodsToDelete: []*v1.Pod{scheduledRunningNotReadyPod},
  1920  		},
  1921  		{
  1922  			name: "various pod phases and conditions, diff = len(pods)",
  1923  			pods: []*v1.Pod{
  1924  				scheduledRunningReadyPodOnNode1,
  1925  				scheduledRunningReadyPodOnNode1,
  1926  				scheduledRunningReadyPodOnNode2,
  1927  				scheduledRunningNotReadyPod,
  1928  				scheduledPendingPod,
  1929  				unscheduledPendingPod,
  1930  			},
  1931  			diff: 6,
  1932  			expectedPodsToDelete: []*v1.Pod{
  1933  				scheduledRunningReadyPodOnNode1,
  1934  				scheduledRunningReadyPodOnNode1,
  1935  				scheduledRunningReadyPodOnNode2,
  1936  				scheduledRunningNotReadyPod,
  1937  				scheduledPendingPod,
  1938  				unscheduledPendingPod,
  1939  			},
  1940  		},
  1941  		{
  1942  			name: "various pod phases and conditions, diff = len(pods), relatedPods empty",
  1943  			pods: []*v1.Pod{
  1944  				scheduledRunningReadyPodOnNode1,
  1945  				scheduledRunningReadyPodOnNode1,
  1946  				scheduledRunningReadyPodOnNode2,
  1947  				scheduledRunningNotReadyPod,
  1948  				scheduledPendingPod,
  1949  				unscheduledPendingPod,
  1950  			},
  1951  			related: []*v1.Pod{},
  1952  			diff:    6,
  1953  			expectedPodsToDelete: []*v1.Pod{
  1954  				scheduledRunningReadyPodOnNode1,
  1955  				scheduledRunningReadyPodOnNode1,
  1956  				scheduledRunningReadyPodOnNode2,
  1957  				scheduledRunningNotReadyPod,
  1958  				scheduledPendingPod,
  1959  				unscheduledPendingPod,
  1960  			},
  1961  		},
  1962  		{
  1963  			name: "scheduled vs unscheduled, diff < len(pods)",
  1964  			pods: []*v1.Pod{
  1965  				scheduledPendingPod,
  1966  				unscheduledPendingPod,
  1967  			},
  1968  			diff: 1,
  1969  			expectedPodsToDelete: []*v1.Pod{
  1970  				unscheduledPendingPod,
  1971  			},
  1972  		},
  1973  		{
  1974  			name: "ready vs not-ready, diff < len(pods)",
  1975  			pods: []*v1.Pod{
  1976  				scheduledRunningReadyPodOnNode1,
  1977  				scheduledRunningNotReadyPod,
  1978  				scheduledRunningNotReadyPod,
  1979  			},
  1980  			diff: 2,
  1981  			expectedPodsToDelete: []*v1.Pod{
  1982  				scheduledRunningNotReadyPod,
  1983  				scheduledRunningNotReadyPod,
  1984  			},
  1985  		},
  1986  		{
  1987  			name: "ready and colocated with another ready pod vs not colocated, diff < len(pods)",
  1988  			pods: []*v1.Pod{
  1989  				scheduledRunningReadyPodOnNode1,
  1990  				scheduledRunningReadyPodOnNode2,
  1991  			},
  1992  			related: []*v1.Pod{
  1993  				scheduledRunningReadyPodOnNode1,
  1994  				scheduledRunningReadyPodOnNode2,
  1995  				scheduledRunningReadyPodOnNode2,
  1996  			},
  1997  			diff: 1,
  1998  			expectedPodsToDelete: []*v1.Pod{
  1999  				scheduledRunningReadyPodOnNode2,
  2000  			},
  2001  		},
  2002  		{
  2003  			name: "pending vs running, diff < len(pods)",
  2004  			pods: []*v1.Pod{
  2005  				scheduledPendingPod,
  2006  				scheduledRunningNotReadyPod,
  2007  			},
  2008  			diff: 1,
  2009  			expectedPodsToDelete: []*v1.Pod{
  2010  				scheduledPendingPod,
  2011  			},
  2012  		},
  2013  		{
  2014  			name: "various pod phases and conditions, diff < len(pods)",
  2015  			pods: []*v1.Pod{
  2016  				scheduledRunningReadyPodOnNode1,
  2017  				scheduledRunningReadyPodOnNode2,
  2018  				scheduledRunningNotReadyPod,
  2019  				scheduledPendingPod,
  2020  				unscheduledPendingPod,
  2021  			},
  2022  			diff: 3,
  2023  			expectedPodsToDelete: []*v1.Pod{
  2024  				unscheduledPendingPod,
  2025  				scheduledPendingPod,
  2026  				scheduledRunningNotReadyPod,
  2027  			},
  2028  		},
  2029  	}
  2030  
  2031  	for _, test := range tests {
  2032  		related := test.related
  2033  		if related == nil {
  2034  			related = test.pods
  2035  		}
  2036  		podsToDelete := getPodsToDelete(test.pods, related, test.diff)
  2037  		if len(podsToDelete) != len(test.expectedPodsToDelete) {
  2038  			t.Errorf("%s: unexpected pods to delete, expected %v, got %v", test.name, test.expectedPodsToDelete, podsToDelete)
  2039  		}
  2040  		if !reflect.DeepEqual(podsToDelete, test.expectedPodsToDelete) {
  2041  			t.Errorf("%s: unexpected pods to delete, expected %v, got %v", test.name, test.expectedPodsToDelete, podsToDelete)
  2042  		}
  2043  	}
  2044  }
  2045  
  2046  func TestGetPodKeys(t *testing.T) {
  2047  	labelMap := map[string]string{"name": "foo"}
  2048  	rs := newReplicaSet(1, labelMap)
  2049  	pod1 := newPod("pod1", rs, v1.PodRunning, nil, true)
  2050  	pod2 := newPod("pod2", rs, v1.PodRunning, nil, true)
  2051  
  2052  	tests := []struct {
  2053  		name            string
  2054  		pods            []*v1.Pod
  2055  		expectedPodKeys []string
  2056  	}{
  2057  		{
  2058  			"len(pods) = 0 (i.e., pods = nil)",
  2059  			[]*v1.Pod{},
  2060  			[]string{},
  2061  		},
  2062  		{
  2063  			"len(pods) > 0",
  2064  			[]*v1.Pod{
  2065  				pod1,
  2066  				pod2,
  2067  			},
  2068  			[]string{"default/pod1", "default/pod2"},
  2069  		},
  2070  	}
  2071  
  2072  	for _, test := range tests {
  2073  		podKeys := getPodKeys(test.pods)
  2074  		if len(podKeys) != len(test.expectedPodKeys) {
  2075  			t.Errorf("%s: unexpected keys for pods to delete, expected %v, got %v", test.name, test.expectedPodKeys, podKeys)
  2076  		}
  2077  		for i := 0; i < len(podKeys); i++ {
  2078  			if podKeys[i] != test.expectedPodKeys[i] {
  2079  				t.Errorf("%s: unexpected keys for pods to delete, expected %v, got %v", test.name, test.expectedPodKeys, podKeys)
  2080  			}
  2081  		}
  2082  	}
  2083  }
  2084  

View as plain text