...

Source file src/k8s.io/kubernetes/pkg/controller/daemon/daemon_controller_test.go

Documentation: k8s.io/kubernetes/pkg/controller/daemon

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package daemon
    18  
    19  import (
    20  	"context"
    21  	"errors"
    22  	"fmt"
    23  	"reflect"
    24  	"sort"
    25  	"strconv"
    26  	"sync"
    27  	"testing"
    28  	"time"
    29  
    30  	apps "k8s.io/api/apps/v1"
    31  	v1 "k8s.io/api/core/v1"
    32  	"k8s.io/apimachinery/pkg/api/resource"
    33  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    34  	"k8s.io/apimachinery/pkg/runtime"
    35  	"k8s.io/apimachinery/pkg/util/intstr"
    36  	"k8s.io/apimachinery/pkg/util/sets"
    37  	"k8s.io/apimachinery/pkg/util/uuid"
    38  	"k8s.io/apimachinery/pkg/util/wait"
    39  	"k8s.io/apiserver/pkg/storage/names"
    40  	"k8s.io/client-go/informers"
    41  	"k8s.io/client-go/kubernetes/fake"
    42  	core "k8s.io/client-go/testing"
    43  	"k8s.io/client-go/tools/cache"
    44  	"k8s.io/client-go/tools/record"
    45  	"k8s.io/client-go/util/flowcontrol"
    46  	"k8s.io/client-go/util/workqueue"
    47  	"k8s.io/klog/v2"
    48  	"k8s.io/klog/v2/ktesting"
    49  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    50  	api "k8s.io/kubernetes/pkg/apis/core"
    51  	"k8s.io/kubernetes/pkg/apis/scheduling"
    52  	"k8s.io/kubernetes/pkg/controller"
    53  	"k8s.io/kubernetes/pkg/controller/daemon/util"
    54  	"k8s.io/kubernetes/pkg/securitycontext"
    55  	labelsutil "k8s.io/kubernetes/pkg/util/labels"
    56  	testingclock "k8s.io/utils/clock/testing"
    57  )
    58  
    59  var (
    60  	simpleDaemonSetLabel  = map[string]string{"name": "simple-daemon", "type": "production"}
    61  	simpleDaemonSetLabel2 = map[string]string{"name": "simple-daemon", "type": "test"}
    62  	simpleNodeLabel       = map[string]string{"color": "blue", "speed": "fast"}
    63  	simpleNodeLabel2      = map[string]string{"color": "red", "speed": "fast"}
    64  	alwaysReady           = func() bool { return true }
    65  	informerSyncTimeout   = 30 * time.Second
    66  )
    67  
    68  var (
    69  	noScheduleTolerations = []v1.Toleration{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}
    70  	noScheduleTaints      = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}
    71  	noExecuteTaints       = []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoExecute"}}
    72  )
    73  
    74  func nowPointer() *metav1.Time {
    75  	now := metav1.Now()
    76  	return &now
    77  }
    78  
    79  var (
    80  	nodeNotReady = []v1.Taint{{
    81  		Key:       v1.TaintNodeNotReady,
    82  		Effect:    v1.TaintEffectNoExecute,
    83  		TimeAdded: nowPointer(),
    84  	}}
    85  
    86  	nodeUnreachable = []v1.Taint{{
    87  		Key:       v1.TaintNodeUnreachable,
    88  		Effect:    v1.TaintEffectNoExecute,
    89  		TimeAdded: nowPointer(),
    90  	}}
    91  )
    92  
    93  func newDaemonSet(name string) *apps.DaemonSet {
    94  	two := int32(2)
    95  	return &apps.DaemonSet{
    96  		ObjectMeta: metav1.ObjectMeta{
    97  			UID:       uuid.NewUUID(),
    98  			Name:      name,
    99  			Namespace: metav1.NamespaceDefault,
   100  		},
   101  		Spec: apps.DaemonSetSpec{
   102  			RevisionHistoryLimit: &two,
   103  			UpdateStrategy: apps.DaemonSetUpdateStrategy{
   104  				Type: apps.OnDeleteDaemonSetStrategyType,
   105  			},
   106  			Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
   107  			Template: v1.PodTemplateSpec{
   108  				ObjectMeta: metav1.ObjectMeta{
   109  					Labels: simpleDaemonSetLabel,
   110  				},
   111  				Spec: v1.PodSpec{
   112  					Containers: []v1.Container{
   113  						{
   114  							Image:                  "foo/bar",
   115  							TerminationMessagePath: v1.TerminationMessagePathDefault,
   116  							ImagePullPolicy:        v1.PullIfNotPresent,
   117  							SecurityContext:        securitycontext.ValidSecurityContextWithContainerDefaults(),
   118  						},
   119  					},
   120  					DNSPolicy: v1.DNSDefault,
   121  				},
   122  			},
   123  		},
   124  	}
   125  }
   126  
   127  func newRollingUpdateStrategy() *apps.DaemonSetUpdateStrategy {
   128  	one := intstr.FromInt32(1)
   129  	return &apps.DaemonSetUpdateStrategy{
   130  		Type:          apps.RollingUpdateDaemonSetStrategyType,
   131  		RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
   132  	}
   133  }
   134  
   135  func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
   136  	return &apps.DaemonSetUpdateStrategy{
   137  		Type: apps.OnDeleteDaemonSetStrategyType,
   138  	}
   139  }
   140  
   141  func updateStrategies() []*apps.DaemonSetUpdateStrategy {
   142  	return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollingUpdateStrategy()}
   143  }
   144  
   145  func newNode(name string, label map[string]string) *v1.Node {
   146  	return &v1.Node{
   147  		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
   148  		ObjectMeta: metav1.ObjectMeta{
   149  			Name:      name,
   150  			Labels:    label,
   151  			Namespace: metav1.NamespaceNone,
   152  		},
   153  		Status: v1.NodeStatus{
   154  			Conditions: []v1.NodeCondition{
   155  				{Type: v1.NodeReady, Status: v1.ConditionTrue},
   156  			},
   157  			Allocatable: v1.ResourceList{
   158  				v1.ResourcePods: resource.MustParse("100"),
   159  			},
   160  		},
   161  	}
   162  }
   163  
   164  func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]string) {
   165  	for i := startIndex; i < startIndex+numNodes; i++ {
   166  		nodeStore.Add(newNode(fmt.Sprintf("node-%d", i), label))
   167  	}
   168  }
   169  
   170  func newPod(podName string, nodeName string, label map[string]string, ds *apps.DaemonSet) *v1.Pod {
   171  	// Add hash unique label to the pod
   172  	newLabels := label
   173  	var podSpec v1.PodSpec
   174  	// Copy pod spec from DaemonSet template, or use a default one if DaemonSet is nil
   175  	if ds != nil {
   176  		hash := controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount)
   177  		newLabels = labelsutil.CloneAndAddLabel(label, apps.DefaultDaemonSetUniqueLabelKey, hash)
   178  		podSpec = ds.Spec.Template.Spec
   179  	} else {
   180  		podSpec = v1.PodSpec{
   181  			Containers: []v1.Container{
   182  				{
   183  					Image:                  "foo/bar",
   184  					TerminationMessagePath: v1.TerminationMessagePathDefault,
   185  					ImagePullPolicy:        v1.PullIfNotPresent,
   186  					SecurityContext:        securitycontext.ValidSecurityContextWithContainerDefaults(),
   187  				},
   188  			},
   189  		}
   190  	}
   191  	// Add node name to the pod
   192  	if len(nodeName) > 0 {
   193  		podSpec.NodeName = nodeName
   194  	}
   195  
   196  	pod := &v1.Pod{
   197  		TypeMeta: metav1.TypeMeta{APIVersion: "v1"},
   198  		ObjectMeta: metav1.ObjectMeta{
   199  			GenerateName: podName,
   200  			Labels:       newLabels,
   201  			Namespace:    metav1.NamespaceDefault,
   202  		},
   203  		Spec: podSpec,
   204  	}
   205  	pod.Name = names.SimpleNameGenerator.GenerateName(podName)
   206  	if ds != nil {
   207  		pod.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds, controllerKind)}
   208  	}
   209  	return pod
   210  }
   211  
   212  func addPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
   213  	for i := 0; i < number; i++ {
   214  		pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
   215  		podStore.Add(pod)
   216  	}
   217  }
   218  
   219  func addFailedPods(podStore cache.Store, nodeName string, label map[string]string, ds *apps.DaemonSet, number int) {
   220  	for i := 0; i < number; i++ {
   221  		pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, label, ds)
   222  		pod.Status = v1.PodStatus{Phase: v1.PodFailed}
   223  		podStore.Add(pod)
   224  	}
   225  }
   226  
   227  func newControllerRevision(name string, namespace string, label map[string]string,
   228  	ownerReferences []metav1.OwnerReference) *apps.ControllerRevision {
   229  	return &apps.ControllerRevision{
   230  		TypeMeta: metav1.TypeMeta{APIVersion: "apps/v1"},
   231  		ObjectMeta: metav1.ObjectMeta{
   232  			Name:            name,
   233  			Labels:          label,
   234  			Namespace:       namespace,
   235  			OwnerReferences: ownerReferences,
   236  		},
   237  	}
   238  }
   239  
   240  type fakePodControl struct {
   241  	sync.Mutex
   242  	*controller.FakePodControl
   243  	podStore     cache.Store
   244  	podIDMap     map[string]*v1.Pod
   245  	expectations controller.ControllerExpectationsInterface
   246  }
   247  
   248  func newFakePodControl() *fakePodControl {
   249  	podIDMap := make(map[string]*v1.Pod)
   250  	return &fakePodControl{
   251  		FakePodControl: &controller.FakePodControl{},
   252  		podIDMap:       podIDMap,
   253  	}
   254  }
   255  
   256  func (f *fakePodControl) CreatePods(ctx context.Context, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *metav1.OwnerReference) error {
   257  	f.Lock()
   258  	defer f.Unlock()
   259  	if err := f.FakePodControl.CreatePods(ctx, namespace, template, object, controllerRef); err != nil {
   260  		return fmt.Errorf("failed to create pod for DaemonSet: %w", err)
   261  	}
   262  
   263  	pod := &v1.Pod{
   264  		ObjectMeta: metav1.ObjectMeta{
   265  			Labels:    template.Labels,
   266  			Namespace: namespace,
   267  		},
   268  	}
   269  
   270  	pod.Name = names.SimpleNameGenerator.GenerateName(fmt.Sprintf("%p-", pod))
   271  
   272  	template.Spec.DeepCopyInto(&pod.Spec)
   273  
   274  	f.podStore.Update(pod)
   275  	f.podIDMap[pod.Name] = pod
   276  
   277  	ds := object.(*apps.DaemonSet)
   278  	dsKey, _ := controller.KeyFunc(ds)
   279  	f.expectations.CreationObserved(klog.FromContext(ctx), dsKey)
   280  
   281  	return nil
   282  }
   283  
   284  func (f *fakePodControl) DeletePod(ctx context.Context, namespace string, podID string, object runtime.Object) error {
   285  	f.Lock()
   286  	defer f.Unlock()
   287  	if err := f.FakePodControl.DeletePod(ctx, namespace, podID, object); err != nil {
   288  		return fmt.Errorf("failed to delete pod %q", podID)
   289  	}
   290  	pod, ok := f.podIDMap[podID]
   291  	if !ok {
   292  		return fmt.Errorf("pod %q does not exist", podID)
   293  	}
   294  	f.podStore.Delete(pod)
   295  	delete(f.podIDMap, podID)
   296  
   297  	ds := object.(*apps.DaemonSet)
   298  	dsKey, _ := controller.KeyFunc(ds)
   299  	f.expectations.DeletionObserved(klog.FromContext(ctx), dsKey)
   300  
   301  	return nil
   302  }
   303  
   304  type daemonSetsController struct {
   305  	*DaemonSetsController
   306  
   307  	dsStore      cache.Store
   308  	historyStore cache.Store
   309  	podStore     cache.Store
   310  	nodeStore    cache.Store
   311  	fakeRecorder *record.FakeRecorder
   312  }
   313  
   314  func newTestController(ctx context.Context, initialObjects ...runtime.Object) (*daemonSetsController, *fakePodControl, *fake.Clientset, error) {
   315  	clientset := fake.NewSimpleClientset(initialObjects...)
   316  	informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
   317  
   318  	dsc, err := NewDaemonSetsController(
   319  		ctx,
   320  		informerFactory.Apps().V1().DaemonSets(),
   321  		informerFactory.Apps().V1().ControllerRevisions(),
   322  		informerFactory.Core().V1().Pods(),
   323  		informerFactory.Core().V1().Nodes(),
   324  		clientset,
   325  		flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
   326  	)
   327  	if err != nil {
   328  		return nil, nil, nil, err
   329  	}
   330  
   331  	fakeRecorder := record.NewFakeRecorder(100)
   332  	dsc.eventRecorder = fakeRecorder
   333  
   334  	dsc.podStoreSynced = alwaysReady
   335  	dsc.nodeStoreSynced = alwaysReady
   336  	dsc.dsStoreSynced = alwaysReady
   337  	dsc.historyStoreSynced = alwaysReady
   338  	podControl := newFakePodControl()
   339  	dsc.podControl = podControl
   340  	podControl.podStore = informerFactory.Core().V1().Pods().Informer().GetStore()
   341  
   342  	newDsc := &daemonSetsController{
   343  		dsc,
   344  		informerFactory.Apps().V1().DaemonSets().Informer().GetStore(),
   345  		informerFactory.Apps().V1().ControllerRevisions().Informer().GetStore(),
   346  		informerFactory.Core().V1().Pods().Informer().GetStore(),
   347  		informerFactory.Core().V1().Nodes().Informer().GetStore(),
   348  		fakeRecorder,
   349  	}
   350  
   351  	podControl.expectations = newDsc.expectations
   352  
   353  	return newDsc, podControl, clientset, nil
   354  }
   355  
   356  func resetCounters(manager *daemonSetsController) {
   357  	manager.podControl.(*fakePodControl).Clear()
   358  	fakeRecorder := record.NewFakeRecorder(100)
   359  	manager.eventRecorder = fakeRecorder
   360  	manager.fakeRecorder = fakeRecorder
   361  }
   362  
   363  func validateSyncDaemonSets(manager *daemonSetsController, fakePodControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) error {
   364  	if len(fakePodControl.Templates) != expectedCreates {
   365  		return fmt.Errorf("Unexpected number of creates.  Expected %d, saw %d\n", expectedCreates, len(fakePodControl.Templates))
   366  	}
   367  	if len(fakePodControl.DeletePodName) != expectedDeletes {
   368  		return fmt.Errorf("Unexpected number of deletes.  Expected %d, got %v\n", expectedDeletes, fakePodControl.DeletePodName)
   369  	}
   370  	if len(manager.fakeRecorder.Events) != expectedEvents {
   371  		return fmt.Errorf("Unexpected number of events.  Expected %d, saw %d\n", expectedEvents, len(manager.fakeRecorder.Events))
   372  	}
   373  	// Every Pod created should have a ControllerRef.
   374  	if got, want := len(fakePodControl.ControllerRefs), expectedCreates; got != want {
   375  		return fmt.Errorf("len(ControllerRefs) = %v, want %v", got, want)
   376  	}
   377  	// Make sure the ControllerRefs are correct.
   378  	for _, controllerRef := range fakePodControl.ControllerRefs {
   379  		if got, want := controllerRef.APIVersion, "apps/v1"; got != want {
   380  			return fmt.Errorf("controllerRef.APIVersion = %q, want %q", got, want)
   381  		}
   382  		if got, want := controllerRef.Kind, "DaemonSet"; got != want {
   383  			return fmt.Errorf("controllerRef.Kind = %q, want %q", got, want)
   384  		}
   385  		if controllerRef.Controller == nil || *controllerRef.Controller != true {
   386  			return fmt.Errorf("controllerRef.Controller is not set to true")
   387  		}
   388  	}
   389  	return nil
   390  }
   391  
   392  func expectSyncDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
   393  	t.Helper()
   394  	expectSyncDaemonSetsWithError(t, manager, ds, podControl, expectedCreates, expectedDeletes, expectedEvents, nil)
   395  }
   396  
   397  func expectSyncDaemonSetsWithError(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int, expectedError error) {
   398  	t.Helper()
   399  	key, err := controller.KeyFunc(ds)
   400  	if err != nil {
   401  		t.Fatal("could not get key for daemon")
   402  	}
   403  
   404  	err = manager.syncHandler(context.TODO(), key)
   405  	if expectedError != nil && !errors.Is(err, expectedError) {
   406  		t.Fatalf("Unexpected error returned from syncHandler: %v", err)
   407  	}
   408  
   409  	if expectedError == nil && err != nil {
   410  		t.Log(err)
   411  	}
   412  
   413  	err = validateSyncDaemonSets(manager, podControl, expectedCreates, expectedDeletes, expectedEvents)
   414  	if err != nil {
   415  		t.Fatal(err)
   416  	}
   417  }
   418  
   419  // clearExpectations copies the FakePodControl to PodStore and clears the create and delete expectations.
   420  func clearExpectations(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, fakePodControl *fakePodControl) {
   421  	fakePodControl.Clear()
   422  	logger, _ := ktesting.NewTestContext(t)
   423  	key, err := controller.KeyFunc(ds)
   424  	if err != nil {
   425  		t.Errorf("Could not get key for daemon.")
   426  		return
   427  	}
   428  	manager.expectations.DeleteExpectations(logger, key)
   429  
   430  	now := manager.failedPodsBackoff.Clock.Now()
   431  	hash, _ := currentDSHash(manager, ds)
   432  	// log all the pods in the store
   433  	var lines []string
   434  	for _, obj := range manager.podStore.List() {
   435  		pod := obj.(*v1.Pod)
   436  		if pod.CreationTimestamp.IsZero() {
   437  			pod.CreationTimestamp.Time = now
   438  		}
   439  		var readyLast time.Time
   440  		ready := podutil.IsPodReady(pod)
   441  		if ready {
   442  			if c := podutil.GetPodReadyCondition(pod.Status); c != nil {
   443  				readyLast = c.LastTransitionTime.Time.Add(time.Duration(ds.Spec.MinReadySeconds) * time.Second)
   444  			}
   445  		}
   446  		nodeName, _ := util.GetTargetNodeName(pod)
   447  
   448  		lines = append(lines, fmt.Sprintf("node=%s current=%-5t ready=%-5t age=%-4d pod=%s now=%d available=%d",
   449  			nodeName,
   450  			hash == pod.Labels[apps.ControllerRevisionHashLabelKey],
   451  			ready,
   452  			now.Unix(),
   453  			pod.Name,
   454  			pod.CreationTimestamp.Unix(),
   455  			readyLast.Unix(),
   456  		))
   457  	}
   458  	sort.Strings(lines)
   459  	for _, line := range lines {
   460  		logger.Info(line)
   461  	}
   462  }
   463  
   464  func TestDeleteFinalStateUnknown(t *testing.T) {
   465  	for _, strategy := range updateStrategies() {
   466  		logger, ctx := ktesting.NewTestContext(t)
   467  		manager, _, _, err := newTestController(ctx)
   468  		if err != nil {
   469  			t.Fatalf("error creating DaemonSets controller: %v", err)
   470  		}
   471  		addNodes(manager.nodeStore, 0, 1, nil)
   472  		ds := newDaemonSet("foo")
   473  		ds.Spec.UpdateStrategy = *strategy
   474  		// DeletedFinalStateUnknown should queue the embedded DS if found.
   475  		manager.deleteDaemonset(logger, cache.DeletedFinalStateUnknown{Key: "foo", Obj: ds})
   476  		enqueuedKey, _ := manager.queue.Get()
   477  		if enqueuedKey.(string) != "default/foo" {
   478  			t.Errorf("expected delete of DeletedFinalStateUnknown to enqueue the daemonset but found: %#v", enqueuedKey)
   479  		}
   480  	}
   481  }
   482  
   483  func TestExpectationsOnRecreate(t *testing.T) {
   484  	_, ctx := ktesting.NewTestContext(t)
   485  	ctx, cancel := context.WithCancel(ctx)
   486  	defer cancel()
   487  
   488  	client := fake.NewSimpleClientset()
   489  
   490  	f := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
   491  	dsc, err := NewDaemonSetsController(
   492  		ctx,
   493  		f.Apps().V1().DaemonSets(),
   494  		f.Apps().V1().ControllerRevisions(),
   495  		f.Core().V1().Pods(),
   496  		f.Core().V1().Nodes(),
   497  		client,
   498  		flowcontrol.NewFakeBackOff(50*time.Millisecond, 500*time.Millisecond, testingclock.NewFakeClock(time.Now())),
   499  	)
   500  	if err != nil {
   501  		t.Fatal(err)
   502  	}
   503  
   504  	expectStableQueueLength := func(expected int) {
   505  		t.Helper()
   506  		for i := 0; i < 5; i++ {
   507  			if actual := dsc.queue.Len(); actual != expected {
   508  				t.Fatalf("expected queue len to remain at %d, got %d", expected, actual)
   509  			}
   510  			time.Sleep(10 * time.Millisecond)
   511  		}
   512  	}
   513  	waitForQueueLength := func(expected int, msg string) {
   514  		t.Helper()
   515  		i := 0
   516  		err = wait.PollImmediate(100*time.Millisecond, informerSyncTimeout, func() (bool, error) {
   517  			current := dsc.queue.Len()
   518  			switch {
   519  			case current == expected:
   520  				return true, nil
   521  			case current > expected:
   522  				return false, fmt.Errorf("queue length %d exceeded expected length %d", current, expected)
   523  			default:
   524  				i++
   525  				if i > 1 {
   526  					t.Logf("Waiting for queue to have %d item, currently has: %d", expected, current)
   527  				}
   528  				return false, nil
   529  			}
   530  		})
   531  		if err != nil {
   532  			t.Fatalf("%s: %v", msg, err)
   533  		}
   534  		expectStableQueueLength(expected)
   535  	}
   536  
   537  	fakeRecorder := record.NewFakeRecorder(100)
   538  	dsc.eventRecorder = fakeRecorder
   539  
   540  	fakePodControl := newFakePodControl()
   541  	fakePodControl.podStore = cache.NewStore(cache.DeletionHandlingMetaNamespaceKeyFunc) // fake store that we don't use
   542  	fakePodControl.expectations = controller.NewControllerExpectations()                 // fake expectations that we don't use
   543  	dsc.podControl = fakePodControl
   544  
   545  	manager := &daemonSetsController{
   546  		DaemonSetsController: dsc,
   547  		fakeRecorder:         fakeRecorder,
   548  	}
   549  
   550  	_, err = client.CoreV1().Nodes().Create(context.Background(), newNode("master-0", nil), metav1.CreateOptions{})
   551  	if err != nil {
   552  		t.Fatal(err)
   553  	}
   554  
   555  	f.Start(ctx.Done())
   556  	for ty, ok := range f.WaitForCacheSync(ctx.Done()) {
   557  		if !ok {
   558  			t.Fatalf("caches failed to sync: %v", ty)
   559  		}
   560  	}
   561  
   562  	expectStableQueueLength(0)
   563  
   564  	oldDS := newDaemonSet("test")
   565  	oldDS, err = client.AppsV1().DaemonSets(oldDS.Namespace).Create(context.Background(), oldDS, metav1.CreateOptions{})
   566  	if err != nil {
   567  		t.Fatal(err)
   568  	}
   569  
   570  	// create of DS adds to queue, processes
   571  	waitForQueueLength(1, "created DS")
   572  	ok := dsc.processNextWorkItem(context.TODO())
   573  	if !ok {
   574  		t.Fatal("queue is shutting down")
   575  	}
   576  
   577  	err = validateSyncDaemonSets(manager, fakePodControl, 1, 0, 0)
   578  	if err != nil {
   579  		t.Error(err)
   580  	}
   581  	fakePodControl.Clear()
   582  
   583  	oldDSKey, err := controller.KeyFunc(oldDS)
   584  	if err != nil {
   585  		t.Fatal(err)
   586  	}
   587  
   588  	dsExp, exists, err := dsc.expectations.GetExpectations(oldDSKey)
   589  	if err != nil {
   590  		t.Fatal(err)
   591  	}
   592  	if !exists {
   593  		t.Fatalf("No expectations found for DaemonSet %q", oldDSKey)
   594  	}
   595  	if dsExp.Fulfilled() {
   596  		t.Errorf("There should be unfulfilled expectation for creating new pods for DaemonSet %q", oldDSKey)
   597  	}
   598  
   599  	// process updates DS, update adds to queue
   600  	waitForQueueLength(1, "updated DS")
   601  	ok = dsc.processNextWorkItem(context.TODO())
   602  	if !ok {
   603  		t.Fatal("queue is shutting down")
   604  	}
   605  
   606  	// process does not re-update the DS
   607  	expectStableQueueLength(0)
   608  
   609  	err = client.AppsV1().DaemonSets(oldDS.Namespace).Delete(context.Background(), oldDS.Name, metav1.DeleteOptions{})
   610  	if err != nil {
   611  		t.Fatal(err)
   612  	}
   613  
   614  	waitForQueueLength(1, "deleted DS")
   615  
   616  	_, exists, err = dsc.expectations.GetExpectations(oldDSKey)
   617  	if err != nil {
   618  		t.Fatal(err)
   619  	}
   620  	if exists {
   621  		t.Errorf("There should be no expectations for DaemonSet %q after it was deleted", oldDSKey)
   622  	}
   623  
   624  	// skip sync for the delete event so we only see the new RS in sync
   625  	key, quit := dsc.queue.Get()
   626  	if quit {
   627  		t.Fatal("Queue is shutting down!")
   628  	}
   629  	dsc.queue.Done(key)
   630  	if key != oldDSKey {
   631  		t.Fatal("Keys should be equal!")
   632  	}
   633  
   634  	expectStableQueueLength(0)
   635  
   636  	newDS := oldDS.DeepCopy()
   637  	newDS.UID = uuid.NewUUID()
   638  	newDS, err = client.AppsV1().DaemonSets(newDS.Namespace).Create(context.Background(), newDS, metav1.CreateOptions{})
   639  	if err != nil {
   640  		t.Fatal(err)
   641  	}
   642  
   643  	// Sanity check
   644  	if newDS.UID == oldDS.UID {
   645  		t.Fatal("New DS has the same UID as the old one!")
   646  	}
   647  
   648  	waitForQueueLength(1, "recreated DS")
   649  	ok = dsc.processNextWorkItem(context.TODO())
   650  	if !ok {
   651  		t.Fatal("Queue is shutting down!")
   652  	}
   653  
   654  	newDSKey, err := controller.KeyFunc(newDS)
   655  	if err != nil {
   656  		t.Fatal(err)
   657  	}
   658  	dsExp, exists, err = dsc.expectations.GetExpectations(newDSKey)
   659  	if err != nil {
   660  		t.Fatal(err)
   661  	}
   662  	if !exists {
   663  		t.Fatalf("No expectations found for DaemonSet %q", oldDSKey)
   664  	}
   665  	if dsExp.Fulfilled() {
   666  		t.Errorf("There should be unfulfilled expectation for creating new pods for DaemonSet %q", oldDSKey)
   667  	}
   668  
   669  	err = validateSyncDaemonSets(manager, fakePodControl, 1, 0, 0)
   670  	if err != nil {
   671  		t.Error(err)
   672  	}
   673  	fakePodControl.Clear()
   674  }
   675  
   676  func markPodsReady(store cache.Store) {
   677  	// mark pods as ready
   678  	for _, obj := range store.List() {
   679  		pod := obj.(*v1.Pod)
   680  		markPodReady(pod)
   681  	}
   682  }
   683  
   684  func markPodReady(pod *v1.Pod) {
   685  	condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
   686  	podutil.UpdatePodCondition(&pod.Status, &condition)
   687  }
   688  
   689  // DaemonSets without node selectors should launch pods on every node.
   690  func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
   691  	for _, strategy := range updateStrategies() {
   692  		ds := newDaemonSet("foo")
   693  		ds.Spec.UpdateStrategy = *strategy
   694  		_, ctx := ktesting.NewTestContext(t)
   695  		manager, podControl, _, err := newTestController(ctx, ds)
   696  		if err != nil {
   697  			t.Fatalf("error creating DaemonSets controller: %v", err)
   698  		}
   699  		addNodes(manager.nodeStore, 0, 5, nil)
   700  		err = manager.dsStore.Add(ds)
   701  		if err != nil {
   702  			t.Error(err)
   703  		}
   704  
   705  		expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   706  	}
   707  }
   708  
   709  // DaemonSets without node selectors should launch pods on every node by NodeAffinity.
   710  func TestSimpleDaemonSetScheduleDaemonSetPodsLaunchesPods(t *testing.T) {
   711  	nodeNum := 5
   712  	for _, strategy := range updateStrategies() {
   713  		ds := newDaemonSet("foo")
   714  		ds.Spec.UpdateStrategy = *strategy
   715  		_, ctx := ktesting.NewTestContext(t)
   716  		manager, podControl, _, err := newTestController(ctx, ds)
   717  		if err != nil {
   718  			t.Fatalf("error creating DaemonSets controller: %v", err)
   719  		}
   720  		addNodes(manager.nodeStore, 0, nodeNum, nil)
   721  		err = manager.dsStore.Add(ds)
   722  		if err != nil {
   723  			t.Fatal(err)
   724  		}
   725  
   726  		expectSyncDaemonSets(t, manager, ds, podControl, nodeNum, 0, 0)
   727  
   728  		if len(podControl.podIDMap) != nodeNum {
   729  			t.Fatalf("failed to create pods for DaemonSet")
   730  		}
   731  
   732  		nodeMap := make(map[string]*v1.Node)
   733  		for _, node := range manager.nodeStore.List() {
   734  			n := node.(*v1.Node)
   735  			nodeMap[n.Name] = n
   736  		}
   737  		if len(nodeMap) != nodeNum {
   738  			t.Fatalf("not enough nodes in the store, expected: %v, got: %v",
   739  				nodeNum, len(nodeMap))
   740  		}
   741  
   742  		for _, pod := range podControl.podIDMap {
   743  			if len(pod.Spec.NodeName) != 0 {
   744  				t.Fatalf("the hostname of pod %v should be empty, but got %s",
   745  					pod.Name, pod.Spec.NodeName)
   746  			}
   747  			if pod.Spec.Affinity == nil {
   748  				t.Fatalf("the Affinity of pod %s is nil.", pod.Name)
   749  			}
   750  			if pod.Spec.Affinity.NodeAffinity == nil {
   751  				t.Fatalf("the NodeAffinity of pod %s is nil.", pod.Name)
   752  			}
   753  			nodeSelector := pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution
   754  			if nodeSelector == nil {
   755  				t.Fatalf("the node selector of pod %s is nil.", pod.Name)
   756  			}
   757  			if len(nodeSelector.NodeSelectorTerms) != 1 {
   758  				t.Fatalf("incorrect number of node selector terms in pod %s, expected: 1, got: %d.",
   759  					pod.Name, len(nodeSelector.NodeSelectorTerms))
   760  			}
   761  
   762  			if len(nodeSelector.NodeSelectorTerms[0].MatchFields) != 1 {
   763  				t.Fatalf("incorrect number of fields in node selector term for pod %s, expected: 1, got: %d.",
   764  					pod.Name, len(nodeSelector.NodeSelectorTerms[0].MatchFields))
   765  			}
   766  
   767  			field := nodeSelector.NodeSelectorTerms[0].MatchFields[0]
   768  			if field.Key == metav1.ObjectNameField {
   769  				if field.Operator != v1.NodeSelectorOpIn {
   770  					t.Fatalf("the operation of hostname NodeAffinity is not %v", v1.NodeSelectorOpIn)
   771  				}
   772  
   773  				if len(field.Values) != 1 {
   774  					t.Fatalf("incorrect hostname in node affinity: expected 1, got %v", len(field.Values))
   775  				}
   776  				delete(nodeMap, field.Values[0])
   777  			}
   778  		}
   779  
   780  		if len(nodeMap) != 0 {
   781  			t.Fatalf("did not find pods on nodes %+v", nodeMap)
   782  		}
   783  	}
   784  }
   785  
   786  // Simulate a cluster with 100 nodes, but simulate a limit (like a quota limit)
   787  // of 10 pods, and verify that the ds doesn't make 100 create calls per sync pass
   788  func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
   789  	for _, strategy := range updateStrategies() {
   790  		ds := newDaemonSet("foo")
   791  		ds.Spec.UpdateStrategy = *strategy
   792  		_, ctx := ktesting.NewTestContext(t)
   793  		manager, podControl, clientset, err := newTestController(ctx, ds)
   794  		if err != nil {
   795  			t.Fatalf("error creating DaemonSets controller: %v", err)
   796  		}
   797  		podControl.FakePodControl.CreateLimit = 10
   798  		addNodes(manager.nodeStore, 0, podControl.FakePodControl.CreateLimit*10, nil)
   799  		err = manager.dsStore.Add(ds)
   800  		if err != nil {
   801  			t.Fatal(err)
   802  		}
   803  
   804  		var updated *apps.DaemonSet
   805  		clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   806  			if action.GetSubresource() != "status" {
   807  				return false, nil, nil
   808  			}
   809  			if u, ok := action.(core.UpdateAction); ok {
   810  				updated = u.GetObject().(*apps.DaemonSet)
   811  			}
   812  			return false, nil, nil
   813  		})
   814  
   815  		expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0)
   816  
   817  		expectedLimit := 0
   818  		for pass := uint8(0); expectedLimit <= podControl.FakePodControl.CreateLimit; pass++ {
   819  			expectedLimit += controller.SlowStartInitialBatchSize << pass
   820  		}
   821  		if podControl.FakePodControl.CreateCallCount > expectedLimit {
   822  			t.Errorf("Unexpected number of create calls.  Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount)
   823  		}
   824  		if updated == nil {
   825  			t.Fatalf("Failed to get updated status")
   826  		}
   827  		if got, want := updated.Status.DesiredNumberScheduled, int32(podControl.FakePodControl.CreateLimit)*10; got != want {
   828  			t.Errorf("Status.DesiredNumberScheduled = %v, want %v", got, want)
   829  		}
   830  		if got, want := updated.Status.CurrentNumberScheduled, int32(podControl.FakePodControl.CreateLimit); got != want {
   831  			t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
   832  		}
   833  		if got, want := updated.Status.UpdatedNumberScheduled, int32(podControl.FakePodControl.CreateLimit); got != want {
   834  			t.Errorf("Status.UpdatedNumberScheduled = %v, want %v", got, want)
   835  		}
   836  	}
   837  }
   838  
   839  func TestDaemonSetPodCreateExpectationsError(t *testing.T) {
   840  	logger, _ := ktesting.NewTestContext(t)
   841  	strategies := updateStrategies()
   842  	for _, strategy := range strategies {
   843  		ds := newDaemonSet("foo")
   844  		ds.Spec.UpdateStrategy = *strategy
   845  		_, ctx := ktesting.NewTestContext(t)
   846  		manager, podControl, _, err := newTestController(ctx, ds)
   847  		if err != nil {
   848  			t.Fatalf("error creating DaemonSets controller: %v", err)
   849  		}
   850  		podControl.FakePodControl.CreateLimit = 10
   851  		creationExpectations := 100
   852  		addNodes(manager.nodeStore, 0, 100, nil)
   853  		err = manager.dsStore.Add(ds)
   854  		if err != nil {
   855  			t.Fatal(err)
   856  		}
   857  
   858  		expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0)
   859  
   860  		dsKey, err := controller.KeyFunc(ds)
   861  		if err != nil {
   862  			t.Fatalf("error get DaemonSets controller key: %v", err)
   863  		}
   864  
   865  		if !manager.expectations.SatisfiedExpectations(logger, dsKey) {
   866  			t.Errorf("Unsatisfied pod creation expectations. Expected %d", creationExpectations)
   867  		}
   868  	}
   869  }
   870  
   871  func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
   872  	for _, strategy := range updateStrategies() {
   873  		ds := newDaemonSet("foo")
   874  		ds.Spec.UpdateStrategy = *strategy
   875  		_, ctx := ktesting.NewTestContext(t)
   876  		manager, podControl, clientset, err := newTestController(ctx, ds)
   877  		if err != nil {
   878  			t.Fatalf("error creating DaemonSets controller: %v", err)
   879  		}
   880  
   881  		var updated *apps.DaemonSet
   882  		clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   883  			if action.GetSubresource() != "status" {
   884  				return false, nil, nil
   885  			}
   886  			if u, ok := action.(core.UpdateAction); ok {
   887  				updated = u.GetObject().(*apps.DaemonSet)
   888  			}
   889  			return false, nil, nil
   890  		})
   891  
   892  		manager.dsStore.Add(ds)
   893  		addNodes(manager.nodeStore, 0, 5, nil)
   894  		expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
   895  
   896  		// Make sure the single sync() updated Status already for the change made
   897  		// during the manage() phase.
   898  		if got, want := updated.Status.CurrentNumberScheduled, int32(5); got != want {
   899  			t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
   900  		}
   901  	}
   902  }
   903  
   904  func TestSimpleDaemonSetUpdatesStatusError(t *testing.T) {
   905  	var (
   906  		syncErr   = fmt.Errorf("sync error")
   907  		statusErr = fmt.Errorf("status error")
   908  	)
   909  
   910  	testCases := []struct {
   911  		desc string
   912  
   913  		hasSyncErr   bool
   914  		hasStatusErr bool
   915  
   916  		expectedErr error
   917  	}{
   918  		{
   919  			desc:         "sync error",
   920  			hasSyncErr:   true,
   921  			hasStatusErr: false,
   922  			expectedErr:  syncErr,
   923  		},
   924  		{
   925  			desc:         "status error",
   926  			hasSyncErr:   false,
   927  			hasStatusErr: true,
   928  			expectedErr:  statusErr,
   929  		},
   930  		{
   931  			desc:         "sync and status error",
   932  			hasSyncErr:   true,
   933  			hasStatusErr: true,
   934  			expectedErr:  syncErr,
   935  		},
   936  	}
   937  
   938  	for _, tc := range testCases {
   939  		t.Run(tc.desc, func(t *testing.T) {
   940  			for _, strategy := range updateStrategies() {
   941  				ds := newDaemonSet("foo")
   942  				ds.Spec.UpdateStrategy = *strategy
   943  				_, ctx := ktesting.NewTestContext(t)
   944  				manager, podControl, clientset, err := newTestController(ctx, ds)
   945  				if err != nil {
   946  					t.Fatalf("error creating DaemonSets controller: %v", err)
   947  				}
   948  
   949  				if tc.hasSyncErr {
   950  					podControl.FakePodControl.Err = syncErr
   951  				}
   952  
   953  				clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   954  					if action.GetSubresource() != "status" {
   955  						return false, nil, nil
   956  					}
   957  
   958  					if tc.hasStatusErr {
   959  						return true, nil, statusErr
   960  					} else {
   961  						return false, nil, nil
   962  					}
   963  				})
   964  
   965  				manager.dsStore.Add(ds)
   966  				addNodes(manager.nodeStore, 0, 1, nil)
   967  				expectSyncDaemonSetsWithError(t, manager, ds, podControl, 1, 0, 0, tc.expectedErr)
   968  			}
   969  		})
   970  	}
   971  }
   972  
   973  // DaemonSets should do nothing if there aren't any nodes
   974  func TestNoNodesDoesNothing(t *testing.T) {
   975  	for _, strategy := range updateStrategies() {
   976  		_, ctx := ktesting.NewTestContext(t)
   977  		manager, podControl, _, err := newTestController(ctx)
   978  		if err != nil {
   979  			t.Fatalf("error creating DaemonSets controller: %v", err)
   980  		}
   981  		ds := newDaemonSet("foo")
   982  		ds.Spec.UpdateStrategy = *strategy
   983  		err = manager.dsStore.Add(ds)
   984  		if err != nil {
   985  			t.Fatal(err)
   986  		}
   987  
   988  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
   989  	}
   990  }
   991  
   992  // DaemonSets without node selectors should launch on a single node in a
   993  // single node cluster.
   994  func TestOneNodeDaemonLaunchesPod(t *testing.T) {
   995  	for _, strategy := range updateStrategies() {
   996  		ds := newDaemonSet("foo")
   997  		ds.Spec.UpdateStrategy = *strategy
   998  		_, ctx := ktesting.NewTestContext(t)
   999  		manager, podControl, _, err := newTestController(ctx, ds)
  1000  		if err != nil {
  1001  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1002  		}
  1003  		err = manager.nodeStore.Add(newNode("only-node", nil))
  1004  		if err != nil {
  1005  			t.Fatal(err)
  1006  		}
  1007  		err = manager.dsStore.Add(ds)
  1008  		if err != nil {
  1009  			t.Fatal(err)
  1010  		}
  1011  
  1012  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1013  	}
  1014  }
  1015  
  1016  // DaemonSets should place onto NotReady nodes
  1017  func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
  1018  	for _, strategy := range updateStrategies() {
  1019  		ds := newDaemonSet("foo")
  1020  		ds.Spec.UpdateStrategy = *strategy
  1021  		_, ctx := ktesting.NewTestContext(t)
  1022  		manager, podControl, _, err := newTestController(ctx, ds)
  1023  		if err != nil {
  1024  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1025  		}
  1026  		node := newNode("not-ready", nil)
  1027  		node.Status.Conditions = []v1.NodeCondition{
  1028  			{Type: v1.NodeReady, Status: v1.ConditionFalse},
  1029  		}
  1030  		err = manager.nodeStore.Add(node)
  1031  		if err != nil {
  1032  			t.Fatal(err)
  1033  		}
  1034  
  1035  		err = manager.dsStore.Add(ds)
  1036  		if err != nil {
  1037  			t.Fatal(err)
  1038  		}
  1039  
  1040  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1041  	}
  1042  }
  1043  
  1044  func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
  1045  	return v1.PodSpec{
  1046  		NodeName: nodeName,
  1047  		Containers: []v1.Container{{
  1048  			Resources: v1.ResourceRequirements{
  1049  				Requests: allocatableResources(memory, cpu),
  1050  			},
  1051  		}},
  1052  	}
  1053  }
  1054  
  1055  func resourceContainerSpec(memory, cpu string) v1.ResourceRequirements {
  1056  	return v1.ResourceRequirements{
  1057  		Requests: allocatableResources(memory, cpu),
  1058  	}
  1059  }
  1060  
  1061  func resourcePodSpecWithoutNodeName(memory, cpu string) v1.PodSpec {
  1062  	return v1.PodSpec{
  1063  		Containers: []v1.Container{{
  1064  			Resources: v1.ResourceRequirements{
  1065  				Requests: allocatableResources(memory, cpu),
  1066  			},
  1067  		}},
  1068  	}
  1069  }
  1070  
  1071  func allocatableResources(memory, cpu string) v1.ResourceList {
  1072  	return v1.ResourceList{
  1073  		v1.ResourceMemory: resource.MustParse(memory),
  1074  		v1.ResourceCPU:    resource.MustParse(cpu),
  1075  		v1.ResourcePods:   resource.MustParse("100"),
  1076  	}
  1077  }
  1078  
  1079  // DaemonSets should not unschedule a daemonset pod from a node with insufficient free resource
  1080  func TestInsufficientCapacityNodeDaemonDoesNotUnscheduleRunningPod(t *testing.T) {
  1081  	for _, strategy := range updateStrategies() {
  1082  		podSpec := resourcePodSpec("too-much-mem", "75M", "75m")
  1083  		podSpec.NodeName = "too-much-mem"
  1084  		ds := newDaemonSet("foo")
  1085  		ds.Spec.UpdateStrategy = *strategy
  1086  		ds.Spec.Template.Spec = podSpec
  1087  		_, ctx := ktesting.NewTestContext(t)
  1088  		manager, podControl, _, err := newTestController(ctx, ds)
  1089  		if err != nil {
  1090  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1091  		}
  1092  		node := newNode("too-much-mem", nil)
  1093  		node.Status.Allocatable = allocatableResources("100M", "200m")
  1094  		err = manager.nodeStore.Add(node)
  1095  		if err != nil {
  1096  			t.Fatal(err)
  1097  		}
  1098  		err = manager.podStore.Add(&v1.Pod{
  1099  			Spec: podSpec,
  1100  		})
  1101  		if err != nil {
  1102  			t.Fatal(err)
  1103  		}
  1104  		err = manager.dsStore.Add(ds)
  1105  		if err != nil {
  1106  			t.Fatal(err)
  1107  		}
  1108  		switch strategy.Type {
  1109  		case apps.OnDeleteDaemonSetStrategyType:
  1110  			expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1111  		case apps.RollingUpdateDaemonSetStrategyType:
  1112  			expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1113  		default:
  1114  			t.Fatalf("unexpected UpdateStrategy %+v", strategy)
  1115  		}
  1116  	}
  1117  }
  1118  
  1119  // DaemonSets should only place onto nodes with sufficient free resource and matched node selector
  1120  func TestInsufficientCapacityNodeSufficientCapacityWithNodeLabelDaemonLaunchPod(t *testing.T) {
  1121  	_, ctx := ktesting.NewTestContext(t)
  1122  	podSpec := resourcePodSpecWithoutNodeName("50M", "75m")
  1123  	ds := newDaemonSet("foo")
  1124  	ds.Spec.Template.Spec = podSpec
  1125  	ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1126  	manager, podControl, _, err := newTestController(ctx, ds)
  1127  	if err != nil {
  1128  		t.Fatalf("error creating DaemonSets controller: %v", err)
  1129  	}
  1130  	node1 := newNode("not-enough-resource", nil)
  1131  	node1.Status.Allocatable = allocatableResources("10M", "20m")
  1132  	node2 := newNode("enough-resource", simpleNodeLabel)
  1133  	node2.Status.Allocatable = allocatableResources("100M", "200m")
  1134  	err = manager.nodeStore.Add(node1)
  1135  	if err != nil {
  1136  		t.Fatal(err)
  1137  	}
  1138  	err = manager.nodeStore.Add(node2)
  1139  	if err != nil {
  1140  		t.Fatal(err)
  1141  	}
  1142  	err = manager.dsStore.Add(ds)
  1143  	if err != nil {
  1144  		t.Fatal(err)
  1145  	}
  1146  	expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1147  	// we do not expect any event for insufficient free resource
  1148  	if len(manager.fakeRecorder.Events) != 0 {
  1149  		t.Fatalf("unexpected events, got %v, expected %v: %+v", len(manager.fakeRecorder.Events), 0, manager.fakeRecorder.Events)
  1150  	}
  1151  }
  1152  
  1153  // DaemonSet should launch a pod on a node with taint NetworkUnavailable condition.
  1154  func TestNetworkUnavailableNodeDaemonLaunchesPod(t *testing.T) {
  1155  	for _, strategy := range updateStrategies() {
  1156  		ds := newDaemonSet("simple")
  1157  		ds.Spec.UpdateStrategy = *strategy
  1158  		_, ctx := ktesting.NewTestContext(t)
  1159  		manager, podControl, _, err := newTestController(ctx, ds)
  1160  		if err != nil {
  1161  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1162  		}
  1163  
  1164  		node := newNode("network-unavailable", nil)
  1165  		node.Status.Conditions = []v1.NodeCondition{
  1166  			{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
  1167  		}
  1168  		err = manager.nodeStore.Add(node)
  1169  		if err != nil {
  1170  			t.Fatal(err)
  1171  		}
  1172  		err = manager.dsStore.Add(ds)
  1173  		if err != nil {
  1174  			t.Fatal(err)
  1175  		}
  1176  
  1177  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1178  	}
  1179  }
  1180  
  1181  // DaemonSets not take any actions when being deleted
  1182  func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
  1183  	for _, strategy := range updateStrategies() {
  1184  		podSpec := resourcePodSpec("not-too-much-mem", "75M", "75m")
  1185  		ds := newDaemonSet("foo")
  1186  		ds.Spec.UpdateStrategy = *strategy
  1187  		ds.Spec.Template.Spec = podSpec
  1188  		now := metav1.Now()
  1189  		ds.DeletionTimestamp = &now
  1190  		_, ctx := ktesting.NewTestContext(t)
  1191  		manager, podControl, _, err := newTestController(ctx, ds)
  1192  		if err != nil {
  1193  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1194  		}
  1195  		node := newNode("not-too-much-mem", nil)
  1196  		node.Status.Allocatable = allocatableResources("200M", "200m")
  1197  		err = manager.nodeStore.Add(node)
  1198  		if err != nil {
  1199  			t.Fatal(err)
  1200  		}
  1201  		err = manager.podStore.Add(&v1.Pod{
  1202  			Spec: podSpec,
  1203  		})
  1204  		if err != nil {
  1205  			t.Fatal(err)
  1206  		}
  1207  		err = manager.dsStore.Add(ds)
  1208  		if err != nil {
  1209  			t.Fatal(err)
  1210  		}
  1211  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1212  	}
  1213  }
  1214  
  1215  func TestDontDoAnythingIfBeingDeletedRace(t *testing.T) {
  1216  	for _, strategy := range updateStrategies() {
  1217  		// Bare client says it IS deleted.
  1218  		ds := newDaemonSet("foo")
  1219  		ds.Spec.UpdateStrategy = *strategy
  1220  		now := metav1.Now()
  1221  		ds.DeletionTimestamp = &now
  1222  		_, ctx := ktesting.NewTestContext(t)
  1223  		manager, podControl, _, err := newTestController(ctx, ds)
  1224  		if err != nil {
  1225  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1226  		}
  1227  		addNodes(manager.nodeStore, 0, 5, nil)
  1228  
  1229  		// Lister (cache) says it's NOT deleted.
  1230  		ds2 := *ds
  1231  		ds2.DeletionTimestamp = nil
  1232  		err = manager.dsStore.Add(&ds2)
  1233  		if err != nil {
  1234  			t.Fatal(err)
  1235  		}
  1236  
  1237  		// The existence of a matching orphan should block all actions in this state.
  1238  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
  1239  		err = manager.podStore.Add(pod)
  1240  		if err != nil {
  1241  			t.Fatal(err)
  1242  		}
  1243  
  1244  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1245  	}
  1246  }
  1247  
  1248  // Test that if the node is already scheduled with a pod using a host port
  1249  // but belonging to the same daemonset, we don't delete that pod
  1250  //
  1251  // Issue: https://github.com/kubernetes/kubernetes/issues/22309
  1252  func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
  1253  	for _, strategy := range updateStrategies() {
  1254  		podSpec := v1.PodSpec{
  1255  			NodeName: "port-conflict",
  1256  			Containers: []v1.Container{{
  1257  				Ports: []v1.ContainerPort{{
  1258  					HostPort: 666,
  1259  				}},
  1260  			}},
  1261  		}
  1262  		_, ctx := ktesting.NewTestContext(t)
  1263  		manager, podControl, _, err := newTestController(ctx)
  1264  		if err != nil {
  1265  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1266  		}
  1267  		node := newNode("port-conflict", nil)
  1268  		err = manager.nodeStore.Add(node)
  1269  		if err != nil {
  1270  			t.Fatal(err)
  1271  		}
  1272  		ds := newDaemonSet("foo")
  1273  		ds.Spec.UpdateStrategy = *strategy
  1274  		ds.Spec.Template.Spec = podSpec
  1275  		err = manager.dsStore.Add(ds)
  1276  		if err != nil {
  1277  			t.Fatal(err)
  1278  		}
  1279  		pod := newPod(ds.Name+"-", node.Name, simpleDaemonSetLabel, ds)
  1280  		err = manager.podStore.Add(pod)
  1281  		if err != nil {
  1282  			t.Fatal(err)
  1283  		}
  1284  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1285  	}
  1286  }
  1287  
  1288  // DaemonSets should place onto nodes that would not cause port conflicts
  1289  func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
  1290  	for _, strategy := range updateStrategies() {
  1291  		podSpec1 := v1.PodSpec{
  1292  			NodeName: "no-port-conflict",
  1293  			Containers: []v1.Container{{
  1294  				Ports: []v1.ContainerPort{{
  1295  					HostPort: 6661,
  1296  				}},
  1297  			}},
  1298  		}
  1299  		podSpec2 := v1.PodSpec{
  1300  			NodeName: "no-port-conflict",
  1301  			Containers: []v1.Container{{
  1302  				Ports: []v1.ContainerPort{{
  1303  					HostPort: 6662,
  1304  				}},
  1305  			}},
  1306  		}
  1307  		ds := newDaemonSet("foo")
  1308  		ds.Spec.UpdateStrategy = *strategy
  1309  		ds.Spec.Template.Spec = podSpec2
  1310  		_, ctx := ktesting.NewTestContext(t)
  1311  		manager, podControl, _, err := newTestController(ctx, ds)
  1312  		if err != nil {
  1313  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1314  		}
  1315  		node := newNode("no-port-conflict", nil)
  1316  		err = manager.nodeStore.Add(node)
  1317  		if err != nil {
  1318  			t.Fatal(err)
  1319  		}
  1320  		err = manager.podStore.Add(&v1.Pod{
  1321  			Spec: podSpec1,
  1322  		})
  1323  		if err != nil {
  1324  			t.Fatal(err)
  1325  		}
  1326  		err = manager.dsStore.Add(ds)
  1327  		if err != nil {
  1328  			t.Fatal(err)
  1329  		}
  1330  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1331  	}
  1332  }
  1333  
  1334  // DaemonSetController should not sync DaemonSets with empty pod selectors.
  1335  //
  1336  // issue https://github.com/kubernetes/kubernetes/pull/23223
  1337  func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
  1338  	// Create a misconfigured DaemonSet. An empty pod selector is invalid but could happen
  1339  	// if we upgrade and make a backwards incompatible change.
  1340  	//
  1341  	// The node selector matches no nodes which mimics the behavior of kubectl delete.
  1342  	//
  1343  	// The DaemonSet should not schedule pods and should not delete scheduled pods in
  1344  	// this case even though it's empty pod selector matches all pods. The DaemonSetController
  1345  	// should detect this misconfiguration and choose not to sync the DaemonSet. We should
  1346  	// not observe a deletion of the pod on node1.
  1347  	for _, strategy := range updateStrategies() {
  1348  		ds := newDaemonSet("foo")
  1349  		ds.Spec.UpdateStrategy = *strategy
  1350  		ls := metav1.LabelSelector{}
  1351  		ds.Spec.Selector = &ls
  1352  		ds.Spec.Template.Spec.NodeSelector = map[string]string{"foo": "bar"}
  1353  
  1354  		_, ctx := ktesting.NewTestContext(t)
  1355  		manager, podControl, _, err := newTestController(ctx, ds)
  1356  		if err != nil {
  1357  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1358  		}
  1359  		err = manager.nodeStore.Add(newNode("node1", nil))
  1360  		if err != nil {
  1361  			t.Fatal(err)
  1362  		}
  1363  		// Create pod not controlled by a daemonset.
  1364  		err = manager.podStore.Add(&v1.Pod{
  1365  			ObjectMeta: metav1.ObjectMeta{
  1366  				Labels:    map[string]string{"bang": "boom"},
  1367  				Namespace: metav1.NamespaceDefault,
  1368  			},
  1369  			Spec: v1.PodSpec{
  1370  				NodeName: "node1",
  1371  			},
  1372  		})
  1373  		if err != nil {
  1374  			t.Fatal(err)
  1375  		}
  1376  		err = manager.dsStore.Add(ds)
  1377  		if err != nil {
  1378  			t.Fatal(err)
  1379  		}
  1380  
  1381  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 1)
  1382  	}
  1383  }
  1384  
  1385  // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
  1386  func TestDealsWithExistingPods(t *testing.T) {
  1387  	for _, strategy := range updateStrategies() {
  1388  		ds := newDaemonSet("foo")
  1389  		ds.Spec.UpdateStrategy = *strategy
  1390  		_, ctx := ktesting.NewTestContext(t)
  1391  		manager, podControl, _, err := newTestController(ctx, ds)
  1392  		if err != nil {
  1393  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1394  		}
  1395  		err = manager.dsStore.Add(ds)
  1396  		if err != nil {
  1397  			t.Fatal(err)
  1398  		}
  1399  		addNodes(manager.nodeStore, 0, 5, nil)
  1400  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
  1401  		addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2)
  1402  		addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
  1403  		addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
  1404  		expectSyncDaemonSets(t, manager, ds, podControl, 2, 5, 0)
  1405  	}
  1406  }
  1407  
  1408  // Daemon with node selector should launch pods on nodes matching selector.
  1409  func TestSelectorDaemonLaunchesPods(t *testing.T) {
  1410  	for _, strategy := range updateStrategies() {
  1411  		daemon := newDaemonSet("foo")
  1412  		daemon.Spec.UpdateStrategy = *strategy
  1413  		daemon.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1414  		_, ctx := ktesting.NewTestContext(t)
  1415  		manager, podControl, _, err := newTestController(ctx, daemon)
  1416  		if err != nil {
  1417  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1418  		}
  1419  		addNodes(manager.nodeStore, 0, 4, nil)
  1420  		addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1421  		err = manager.dsStore.Add(daemon)
  1422  		if err != nil {
  1423  			t.Fatal(err)
  1424  		}
  1425  		expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0)
  1426  	}
  1427  }
  1428  
  1429  // Daemon with node selector should delete pods from nodes that do not satisfy selector.
  1430  func TestSelectorDaemonDeletesUnselectedPods(t *testing.T) {
  1431  	for _, strategy := range updateStrategies() {
  1432  		ds := newDaemonSet("foo")
  1433  		ds.Spec.UpdateStrategy = *strategy
  1434  		ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1435  		_, ctx := ktesting.NewTestContext(t)
  1436  		manager, podControl, _, err := newTestController(ctx, ds)
  1437  		if err != nil {
  1438  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1439  		}
  1440  		err = manager.dsStore.Add(ds)
  1441  		if err != nil {
  1442  			t.Fatal(err)
  1443  		}
  1444  		addNodes(manager.nodeStore, 0, 5, nil)
  1445  		addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
  1446  		addPods(manager.podStore, "node-0", simpleDaemonSetLabel2, ds, 2)
  1447  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
  1448  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 1)
  1449  		addPods(manager.podStore, "node-4", simpleDaemonSetLabel, ds, 1)
  1450  		expectSyncDaemonSets(t, manager, ds, podControl, 5, 4, 0)
  1451  	}
  1452  }
  1453  
  1454  // DaemonSet with node selector should launch pods on nodes matching selector, but also deal with existing pods on nodes.
  1455  func TestSelectorDaemonDealsWithExistingPods(t *testing.T) {
  1456  	for _, strategy := range updateStrategies() {
  1457  		ds := newDaemonSet("foo")
  1458  		ds.Spec.UpdateStrategy = *strategy
  1459  		ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1460  		_, ctx := ktesting.NewTestContext(t)
  1461  		manager, podControl, _, err := newTestController(ctx, ds)
  1462  		if err != nil {
  1463  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1464  		}
  1465  		err = manager.dsStore.Add(ds)
  1466  		if err != nil {
  1467  			t.Fatal(err)
  1468  		}
  1469  		addNodes(manager.nodeStore, 0, 5, nil)
  1470  		addNodes(manager.nodeStore, 5, 5, simpleNodeLabel)
  1471  		addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
  1472  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 3)
  1473  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel2, ds, 2)
  1474  		addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 4)
  1475  		addPods(manager.podStore, "node-6", simpleDaemonSetLabel, ds, 13)
  1476  		addPods(manager.podStore, "node-7", simpleDaemonSetLabel2, ds, 4)
  1477  		addPods(manager.podStore, "node-9", simpleDaemonSetLabel, ds, 1)
  1478  		addPods(manager.podStore, "node-9", simpleDaemonSetLabel2, ds, 1)
  1479  		expectSyncDaemonSets(t, manager, ds, podControl, 3, 20, 0)
  1480  	}
  1481  }
  1482  
  1483  // DaemonSet with node selector which does not match any node labels should not launch pods.
  1484  func TestBadSelectorDaemonDoesNothing(t *testing.T) {
  1485  	for _, strategy := range updateStrategies() {
  1486  		_, ctx := ktesting.NewTestContext(t)
  1487  		manager, podControl, _, err := newTestController(ctx)
  1488  		if err != nil {
  1489  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1490  		}
  1491  		addNodes(manager.nodeStore, 0, 4, nil)
  1492  		addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1493  		ds := newDaemonSet("foo")
  1494  		ds.Spec.UpdateStrategy = *strategy
  1495  		ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel2
  1496  		err = manager.dsStore.Add(ds)
  1497  		if err != nil {
  1498  			t.Fatal(err)
  1499  		}
  1500  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1501  	}
  1502  }
  1503  
  1504  // DaemonSet with node name should launch pod on node with corresponding name.
  1505  func TestNameDaemonSetLaunchesPods(t *testing.T) {
  1506  	for _, strategy := range updateStrategies() {
  1507  		ds := newDaemonSet("foo")
  1508  		ds.Spec.UpdateStrategy = *strategy
  1509  		ds.Spec.Template.Spec.NodeName = "node-0"
  1510  		_, ctx := ktesting.NewTestContext(t)
  1511  		manager, podControl, _, err := newTestController(ctx, ds)
  1512  		if err != nil {
  1513  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1514  		}
  1515  		addNodes(manager.nodeStore, 0, 5, nil)
  1516  		err = manager.dsStore.Add(ds)
  1517  		if err != nil {
  1518  			t.Fatal(err)
  1519  		}
  1520  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1521  	}
  1522  }
  1523  
  1524  // DaemonSet with node name that does not exist should not launch pods.
  1525  func TestBadNameDaemonSetDoesNothing(t *testing.T) {
  1526  	for _, strategy := range updateStrategies() {
  1527  		ds := newDaemonSet("foo")
  1528  		ds.Spec.UpdateStrategy = *strategy
  1529  		ds.Spec.Template.Spec.NodeName = "node-10"
  1530  		_, ctx := ktesting.NewTestContext(t)
  1531  		manager, podControl, _, err := newTestController(ctx, ds)
  1532  		if err != nil {
  1533  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1534  		}
  1535  		addNodes(manager.nodeStore, 0, 5, nil)
  1536  		err = manager.dsStore.Add(ds)
  1537  		if err != nil {
  1538  			t.Fatal(err)
  1539  		}
  1540  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1541  	}
  1542  }
  1543  
  1544  // DaemonSet with node selector, and node name, matching a node, should launch a pod on the node.
  1545  func TestNameAndSelectorDaemonSetLaunchesPods(t *testing.T) {
  1546  	for _, strategy := range updateStrategies() {
  1547  		ds := newDaemonSet("foo")
  1548  		ds.Spec.UpdateStrategy = *strategy
  1549  		ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1550  		ds.Spec.Template.Spec.NodeName = "node-6"
  1551  		_, ctx := ktesting.NewTestContext(t)
  1552  		manager, podControl, _, err := newTestController(ctx, ds)
  1553  		if err != nil {
  1554  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1555  		}
  1556  		addNodes(manager.nodeStore, 0, 4, nil)
  1557  		addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1558  		err = manager.dsStore.Add(ds)
  1559  		if err != nil {
  1560  			t.Fatal(err)
  1561  		}
  1562  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1563  	}
  1564  }
  1565  
  1566  // DaemonSet with node selector that matches some nodes, and node name that matches a different node, should do nothing.
  1567  func TestInconsistentNameSelectorDaemonSetDoesNothing(t *testing.T) {
  1568  	for _, strategy := range updateStrategies() {
  1569  		ds := newDaemonSet("foo")
  1570  		ds.Spec.UpdateStrategy = *strategy
  1571  		ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1572  		ds.Spec.Template.Spec.NodeName = "node-0"
  1573  		_, ctx := ktesting.NewTestContext(t)
  1574  		manager, podControl, _, err := newTestController(ctx, ds)
  1575  		if err != nil {
  1576  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1577  		}
  1578  		addNodes(manager.nodeStore, 0, 4, nil)
  1579  		addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1580  		err = manager.dsStore.Add(ds)
  1581  		if err != nil {
  1582  			t.Fatal(err)
  1583  		}
  1584  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1585  	}
  1586  }
  1587  
  1588  // DaemonSet with node selector, matching some nodes, should launch pods on all the nodes.
  1589  func TestSelectorDaemonSetLaunchesPods(t *testing.T) {
  1590  	_, ctx := ktesting.NewTestContext(t)
  1591  	ds := newDaemonSet("foo")
  1592  	ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  1593  	manager, podControl, _, err := newTestController(ctx, ds)
  1594  	if err != nil {
  1595  		t.Fatalf("error creating DaemonSets controller: %v", err)
  1596  	}
  1597  	addNodes(manager.nodeStore, 0, 4, nil)
  1598  	addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1599  	err = manager.dsStore.Add(ds)
  1600  	if err != nil {
  1601  		t.Fatal(err)
  1602  	}
  1603  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 0, 0)
  1604  }
  1605  
  1606  // Daemon with node affinity should launch pods on nodes matching affinity.
  1607  func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
  1608  	for _, strategy := range updateStrategies() {
  1609  		daemon := newDaemonSet("foo")
  1610  		daemon.Spec.UpdateStrategy = *strategy
  1611  		daemon.Spec.Template.Spec.Affinity = &v1.Affinity{
  1612  			NodeAffinity: &v1.NodeAffinity{
  1613  				RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  1614  					NodeSelectorTerms: []v1.NodeSelectorTerm{
  1615  						{
  1616  							MatchExpressions: []v1.NodeSelectorRequirement{
  1617  								{
  1618  									Key:      "color",
  1619  									Operator: v1.NodeSelectorOpIn,
  1620  									Values:   []string{simpleNodeLabel["color"]},
  1621  								},
  1622  							},
  1623  						},
  1624  					},
  1625  				},
  1626  			},
  1627  		}
  1628  		_, ctx := ktesting.NewTestContext(t)
  1629  		manager, podControl, _, err := newTestController(ctx, daemon)
  1630  		if err != nil {
  1631  			t.Fatalf("error creating DaemonSetsController: %v", err)
  1632  		}
  1633  		addNodes(manager.nodeStore, 0, 4, nil)
  1634  		addNodes(manager.nodeStore, 4, 3, simpleNodeLabel)
  1635  		err = manager.dsStore.Add(daemon)
  1636  		if err != nil {
  1637  			t.Fatal(err)
  1638  		}
  1639  		expectSyncDaemonSets(t, manager, daemon, podControl, 3, 0, 0)
  1640  	}
  1641  }
  1642  
  1643  func TestNumberReadyStatus(t *testing.T) {
  1644  	for _, strategy := range updateStrategies() {
  1645  		ds := newDaemonSet("foo")
  1646  		ds.Spec.UpdateStrategy = *strategy
  1647  		_, ctx := ktesting.NewTestContext(t)
  1648  		manager, podControl, clientset, err := newTestController(ctx, ds)
  1649  		if err != nil {
  1650  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1651  		}
  1652  		var updated *apps.DaemonSet
  1653  		clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  1654  			if action.GetSubresource() != "status" {
  1655  				return false, nil, nil
  1656  			}
  1657  			if u, ok := action.(core.UpdateAction); ok {
  1658  				updated = u.GetObject().(*apps.DaemonSet)
  1659  			}
  1660  			return false, nil, nil
  1661  		})
  1662  		addNodes(manager.nodeStore, 0, 2, simpleNodeLabel)
  1663  		addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
  1664  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
  1665  		err = manager.dsStore.Add(ds)
  1666  		if err != nil {
  1667  			t.Fatal(err)
  1668  		}
  1669  
  1670  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1671  		if updated.Status.NumberReady != 0 {
  1672  			t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
  1673  		}
  1674  
  1675  		selector, _ := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
  1676  		daemonPods, _ := manager.podLister.Pods(ds.Namespace).List(selector)
  1677  		for _, pod := range daemonPods {
  1678  			condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
  1679  			pod.Status.Conditions = append(pod.Status.Conditions, condition)
  1680  		}
  1681  
  1682  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1683  		if updated.Status.NumberReady != 2 {
  1684  			t.Errorf("Wrong daemon %s status: %v", updated.Name, updated.Status)
  1685  		}
  1686  	}
  1687  }
  1688  
  1689  func TestObservedGeneration(t *testing.T) {
  1690  	for _, strategy := range updateStrategies() {
  1691  		ds := newDaemonSet("foo")
  1692  		ds.Spec.UpdateStrategy = *strategy
  1693  		ds.Generation = 1
  1694  		_, ctx := ktesting.NewTestContext(t)
  1695  		manager, podControl, clientset, err := newTestController(ctx, ds)
  1696  		if err != nil {
  1697  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1698  		}
  1699  		var updated *apps.DaemonSet
  1700  		clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
  1701  			if action.GetSubresource() != "status" {
  1702  				return false, nil, nil
  1703  			}
  1704  			if u, ok := action.(core.UpdateAction); ok {
  1705  				updated = u.GetObject().(*apps.DaemonSet)
  1706  			}
  1707  			return false, nil, nil
  1708  		})
  1709  
  1710  		addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
  1711  		addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
  1712  		err = manager.dsStore.Add(ds)
  1713  		if err != nil {
  1714  			t.Fatal(err)
  1715  		}
  1716  
  1717  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1718  		if updated.Status.ObservedGeneration != ds.Generation {
  1719  			t.Errorf("Wrong ObservedGeneration for daemon %s in status. Expected %d, got %d", updated.Name, ds.Generation, updated.Status.ObservedGeneration)
  1720  		}
  1721  	}
  1722  }
  1723  
  1724  // DaemonSet controller should kill all failed pods and create at most 1 pod on every node.
  1725  func TestDaemonKillFailedPods(t *testing.T) {
  1726  	tests := []struct {
  1727  		numFailedPods, numNormalPods, expectedCreates, expectedDeletes, expectedEvents int
  1728  		test                                                                           string
  1729  	}{
  1730  		{numFailedPods: 0, numNormalPods: 1, expectedCreates: 0, expectedDeletes: 0, expectedEvents: 0, test: "normal (do nothing)"},
  1731  		{numFailedPods: 0, numNormalPods: 0, expectedCreates: 1, expectedDeletes: 0, expectedEvents: 0, test: "no pods (create 1)"},
  1732  		{numFailedPods: 1, numNormalPods: 0, expectedCreates: 0, expectedDeletes: 1, expectedEvents: 1, test: "1 failed pod (kill 1), 0 normal pod (create 0; will create in the next sync)"},
  1733  		{numFailedPods: 1, numNormalPods: 3, expectedCreates: 0, expectedDeletes: 3, expectedEvents: 1, test: "1 failed pod (kill 1), 3 normal pods (kill 2)"},
  1734  	}
  1735  
  1736  	for _, test := range tests {
  1737  		t.Run(test.test, func(t *testing.T) {
  1738  			for _, strategy := range updateStrategies() {
  1739  				ds := newDaemonSet("foo")
  1740  				ds.Spec.UpdateStrategy = *strategy
  1741  				_, ctx := ktesting.NewTestContext(t)
  1742  				manager, podControl, _, err := newTestController(ctx, ds)
  1743  				if err != nil {
  1744  					t.Fatalf("error creating DaemonSets controller: %v", err)
  1745  				}
  1746  				err = manager.dsStore.Add(ds)
  1747  				if err != nil {
  1748  					t.Fatal(err)
  1749  				}
  1750  				addNodes(manager.nodeStore, 0, 1, nil)
  1751  				addFailedPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numFailedPods)
  1752  				addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, test.numNormalPods)
  1753  				expectSyncDaemonSets(t, manager, ds, podControl, test.expectedCreates, test.expectedDeletes, test.expectedEvents)
  1754  			}
  1755  		})
  1756  	}
  1757  }
  1758  
  1759  // DaemonSet controller needs to backoff when killing failed pods to avoid hot looping and fighting with kubelet.
  1760  func TestDaemonKillFailedPodsBackoff(t *testing.T) {
  1761  	for _, strategy := range updateStrategies() {
  1762  		t.Run(string(strategy.Type), func(t *testing.T) {
  1763  			_, ctx := ktesting.NewTestContext(t)
  1764  			ds := newDaemonSet("foo")
  1765  			ds.Spec.UpdateStrategy = *strategy
  1766  
  1767  			manager, podControl, _, err := newTestController(ctx, ds)
  1768  			if err != nil {
  1769  				t.Fatalf("error creating DaemonSets controller: %v", err)
  1770  			}
  1771  
  1772  			err = manager.dsStore.Add(ds)
  1773  			if err != nil {
  1774  				t.Fatal(err)
  1775  			}
  1776  			addNodes(manager.nodeStore, 0, 1, nil)
  1777  
  1778  			nodeName := "node-0"
  1779  			pod := newPod(fmt.Sprintf("%s-", nodeName), nodeName, simpleDaemonSetLabel, ds)
  1780  
  1781  			// Add a failed Pod
  1782  			pod.Status.Phase = v1.PodFailed
  1783  			err = manager.podStore.Add(pod)
  1784  			if err != nil {
  1785  				t.Fatal(err)
  1786  			}
  1787  
  1788  			backoffKey := failedPodsBackoffKey(ds, nodeName)
  1789  
  1790  			// First sync will delete the pod, initializing backoff
  1791  			expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1)
  1792  			initialDelay := manager.failedPodsBackoff.Get(backoffKey)
  1793  			if initialDelay <= 0 {
  1794  				t.Fatal("Initial delay is expected to be set.")
  1795  			}
  1796  
  1797  			resetCounters(manager)
  1798  
  1799  			// Immediate (second) sync gets limited by the backoff
  1800  			expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1801  			delay := manager.failedPodsBackoff.Get(backoffKey)
  1802  			if delay != initialDelay {
  1803  				t.Fatal("Backoff delay shouldn't be raised while waiting.")
  1804  			}
  1805  
  1806  			resetCounters(manager)
  1807  
  1808  			// Sleep to wait out backoff
  1809  			fakeClock := manager.failedPodsBackoff.Clock
  1810  
  1811  			// Move just before the backoff end time
  1812  			fakeClock.Sleep(delay - 1*time.Nanosecond)
  1813  			if !manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) {
  1814  				t.Errorf("Backoff delay didn't last the whole waitout period.")
  1815  			}
  1816  
  1817  			// Move to the backoff end time
  1818  			fakeClock.Sleep(1 * time.Nanosecond)
  1819  			if manager.failedPodsBackoff.IsInBackOffSinceUpdate(backoffKey, fakeClock.Now()) {
  1820  				t.Fatal("Backoff delay hasn't been reset after the period has passed.")
  1821  			}
  1822  
  1823  			// After backoff time, it will delete the failed pod
  1824  			expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 1)
  1825  		})
  1826  	}
  1827  }
  1828  
  1829  // Daemonset should not remove a running pod from a node if the pod doesn't
  1830  // tolerate the nodes NoSchedule taint
  1831  func TestNoScheduleTaintedDoesntEvicitRunningIntolerantPod(t *testing.T) {
  1832  	for _, strategy := range updateStrategies() {
  1833  		ds := newDaemonSet("intolerant")
  1834  		ds.Spec.UpdateStrategy = *strategy
  1835  		_, ctx := ktesting.NewTestContext(t)
  1836  		manager, podControl, _, err := newTestController(ctx, ds)
  1837  		if err != nil {
  1838  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1839  		}
  1840  
  1841  		node := newNode("tainted", nil)
  1842  		err = manager.nodeStore.Add(node)
  1843  		if err != nil {
  1844  			t.Fatal(err)
  1845  		}
  1846  		setNodeTaint(node, noScheduleTaints)
  1847  		err = manager.podStore.Add(newPod("keep-running-me", "tainted", simpleDaemonSetLabel, ds))
  1848  		if err != nil {
  1849  			t.Fatal(err)
  1850  		}
  1851  		err = manager.dsStore.Add(ds)
  1852  		if err != nil {
  1853  			t.Fatal(err)
  1854  		}
  1855  
  1856  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1857  	}
  1858  }
  1859  
  1860  // Daemonset should remove a running pod from a node if the pod doesn't
  1861  // tolerate the nodes NoExecute taint
  1862  func TestNoExecuteTaintedDoesEvicitRunningIntolerantPod(t *testing.T) {
  1863  	for _, strategy := range updateStrategies() {
  1864  		ds := newDaemonSet("intolerant")
  1865  		ds.Spec.UpdateStrategy = *strategy
  1866  		_, ctx := ktesting.NewTestContext(t)
  1867  		manager, podControl, _, err := newTestController(ctx, ds)
  1868  		if err != nil {
  1869  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1870  		}
  1871  
  1872  		node := newNode("tainted", nil)
  1873  		err = manager.nodeStore.Add(node)
  1874  		if err != nil {
  1875  			t.Fatal(err)
  1876  		}
  1877  		setNodeTaint(node, noExecuteTaints)
  1878  		err = manager.podStore.Add(newPod("stop-running-me", "tainted", simpleDaemonSetLabel, ds))
  1879  		if err != nil {
  1880  			t.Fatal(err)
  1881  		}
  1882  		err = manager.dsStore.Add(ds)
  1883  		if err != nil {
  1884  			t.Fatal(err)
  1885  		}
  1886  
  1887  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
  1888  	}
  1889  }
  1890  
  1891  // DaemonSet should not launch a pod on a tainted node when the pod doesn't tolerate that taint.
  1892  func TestTaintedNodeDaemonDoesNotLaunchIntolerantPod(t *testing.T) {
  1893  	for _, strategy := range updateStrategies() {
  1894  		ds := newDaemonSet("intolerant")
  1895  		ds.Spec.UpdateStrategy = *strategy
  1896  		_, ctx := ktesting.NewTestContext(t)
  1897  		manager, podControl, _, err := newTestController(ctx, ds)
  1898  		if err != nil {
  1899  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1900  		}
  1901  
  1902  		node := newNode("tainted", nil)
  1903  		setNodeTaint(node, noScheduleTaints)
  1904  		err = manager.nodeStore.Add(node)
  1905  		if err != nil {
  1906  			t.Fatal(err)
  1907  		}
  1908  		err = manager.dsStore.Add(ds)
  1909  		if err != nil {
  1910  			t.Fatal(err)
  1911  		}
  1912  
  1913  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  1914  	}
  1915  }
  1916  
  1917  // DaemonSet should launch a pod on a tainted node when the pod can tolerate that taint.
  1918  func TestTaintedNodeDaemonLaunchesToleratePod(t *testing.T) {
  1919  	for _, strategy := range updateStrategies() {
  1920  		ds := newDaemonSet("tolerate")
  1921  		ds.Spec.UpdateStrategy = *strategy
  1922  		setDaemonSetToleration(ds, noScheduleTolerations)
  1923  		_, ctx := ktesting.NewTestContext(t)
  1924  		manager, podControl, _, err := newTestController(ctx, ds)
  1925  		if err != nil {
  1926  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1927  		}
  1928  
  1929  		node := newNode("tainted", nil)
  1930  		setNodeTaint(node, noScheduleTaints)
  1931  		err = manager.nodeStore.Add(node)
  1932  		if err != nil {
  1933  			t.Fatal(err)
  1934  		}
  1935  		err = manager.dsStore.Add(ds)
  1936  		if err != nil {
  1937  			t.Fatal(err)
  1938  		}
  1939  
  1940  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1941  	}
  1942  }
  1943  
  1944  // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
  1945  func TestNotReadyNodeDaemonLaunchesPod(t *testing.T) {
  1946  	for _, strategy := range updateStrategies() {
  1947  		ds := newDaemonSet("simple")
  1948  		ds.Spec.UpdateStrategy = *strategy
  1949  		_, ctx := ktesting.NewTestContext(t)
  1950  		manager, podControl, _, err := newTestController(ctx, ds)
  1951  		if err != nil {
  1952  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1953  		}
  1954  
  1955  		node := newNode("tainted", nil)
  1956  		setNodeTaint(node, nodeNotReady)
  1957  		node.Status.Conditions = []v1.NodeCondition{
  1958  			{Type: v1.NodeReady, Status: v1.ConditionFalse},
  1959  		}
  1960  		err = manager.nodeStore.Add(node)
  1961  		if err != nil {
  1962  			t.Fatal(err)
  1963  		}
  1964  		err = manager.dsStore.Add(ds)
  1965  		if err != nil {
  1966  			t.Fatal(err)
  1967  		}
  1968  
  1969  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1970  	}
  1971  }
  1972  
  1973  // DaemonSet should launch a pod on an unreachable node with taint unreachable:NoExecute.
  1974  func TestUnreachableNodeDaemonLaunchesPod(t *testing.T) {
  1975  	for _, strategy := range updateStrategies() {
  1976  		ds := newDaemonSet("simple")
  1977  		ds.Spec.UpdateStrategy = *strategy
  1978  		_, ctx := ktesting.NewTestContext(t)
  1979  		manager, podControl, _, err := newTestController(ctx, ds)
  1980  		if err != nil {
  1981  			t.Fatalf("error creating DaemonSets controller: %v", err)
  1982  		}
  1983  
  1984  		node := newNode("tainted", nil)
  1985  		setNodeTaint(node, nodeUnreachable)
  1986  		node.Status.Conditions = []v1.NodeCondition{
  1987  			{Type: v1.NodeReady, Status: v1.ConditionUnknown},
  1988  		}
  1989  		err = manager.nodeStore.Add(node)
  1990  		if err != nil {
  1991  			t.Fatal(err)
  1992  		}
  1993  		err = manager.dsStore.Add(ds)
  1994  		if err != nil {
  1995  			t.Fatal(err)
  1996  		}
  1997  
  1998  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  1999  	}
  2000  }
  2001  
  2002  // DaemonSet should launch a pod on an untainted node when the pod has tolerations.
  2003  func TestNodeDaemonLaunchesToleratePod(t *testing.T) {
  2004  	for _, strategy := range updateStrategies() {
  2005  		ds := newDaemonSet("tolerate")
  2006  		ds.Spec.UpdateStrategy = *strategy
  2007  		setDaemonSetToleration(ds, noScheduleTolerations)
  2008  		_, ctx := ktesting.NewTestContext(t)
  2009  		manager, podControl, _, err := newTestController(ctx, ds)
  2010  		if err != nil {
  2011  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2012  		}
  2013  		addNodes(manager.nodeStore, 0, 1, nil)
  2014  		err = manager.dsStore.Add(ds)
  2015  		if err != nil {
  2016  			t.Fatal(err)
  2017  		}
  2018  
  2019  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  2020  	}
  2021  }
  2022  
  2023  // DaemonSet should launch a pod on a not ready node with taint notReady:NoExecute.
  2024  func TestDaemonSetRespectsTermination(t *testing.T) {
  2025  	for _, strategy := range updateStrategies() {
  2026  		ds := newDaemonSet("foo")
  2027  		ds.Spec.UpdateStrategy = *strategy
  2028  		_, ctx := ktesting.NewTestContext(t)
  2029  		manager, podControl, _, err := newTestController(ctx, ds)
  2030  		if err != nil {
  2031  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2032  		}
  2033  
  2034  		addNodes(manager.nodeStore, 0, 1, simpleNodeLabel)
  2035  		pod := newPod(fmt.Sprintf("%s-", "node-0"), "node-0", simpleDaemonSetLabel, ds)
  2036  		dt := metav1.Now()
  2037  		pod.DeletionTimestamp = &dt
  2038  		err = manager.podStore.Add(pod)
  2039  		if err != nil {
  2040  			t.Fatal(err)
  2041  		}
  2042  		err = manager.dsStore.Add(ds)
  2043  		if err != nil {
  2044  			t.Fatal(err)
  2045  		}
  2046  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 0, 0)
  2047  	}
  2048  }
  2049  
  2050  func setNodeTaint(node *v1.Node, taints []v1.Taint) {
  2051  	node.Spec.Taints = taints
  2052  }
  2053  
  2054  func setDaemonSetToleration(ds *apps.DaemonSet, tolerations []v1.Toleration) {
  2055  	ds.Spec.Template.Spec.Tolerations = tolerations
  2056  }
  2057  
  2058  // DaemonSet should launch a pod even when the node with MemoryPressure/DiskPressure/PIDPressure taints.
  2059  func TestTaintPressureNodeDaemonLaunchesPod(t *testing.T) {
  2060  	for _, strategy := range updateStrategies() {
  2061  		ds := newDaemonSet("critical")
  2062  		ds.Spec.UpdateStrategy = *strategy
  2063  		setDaemonSetCritical(ds)
  2064  		_, ctx := ktesting.NewTestContext(t)
  2065  		manager, podControl, _, err := newTestController(ctx, ds)
  2066  		if err != nil {
  2067  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2068  		}
  2069  
  2070  		node := newNode("resources-pressure", nil)
  2071  		node.Status.Conditions = []v1.NodeCondition{
  2072  			{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue},
  2073  			{Type: v1.NodeMemoryPressure, Status: v1.ConditionTrue},
  2074  			{Type: v1.NodePIDPressure, Status: v1.ConditionTrue},
  2075  		}
  2076  		node.Spec.Taints = []v1.Taint{
  2077  			{Key: v1.TaintNodeDiskPressure, Effect: v1.TaintEffectNoSchedule},
  2078  			{Key: v1.TaintNodeMemoryPressure, Effect: v1.TaintEffectNoSchedule},
  2079  			{Key: v1.TaintNodePIDPressure, Effect: v1.TaintEffectNoSchedule},
  2080  		}
  2081  		err = manager.nodeStore.Add(node)
  2082  		if err != nil {
  2083  			t.Fatal(err)
  2084  		}
  2085  		err = manager.dsStore.Add(ds)
  2086  		if err != nil {
  2087  			t.Fatal(err)
  2088  		}
  2089  		expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  2090  	}
  2091  }
  2092  
  2093  func setDaemonSetCritical(ds *apps.DaemonSet) {
  2094  	ds.Namespace = api.NamespaceSystem
  2095  	if ds.Spec.Template.ObjectMeta.Annotations == nil {
  2096  		ds.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
  2097  	}
  2098  	podPriority := scheduling.SystemCriticalPriority
  2099  	ds.Spec.Template.Spec.Priority = &podPriority
  2100  }
  2101  
  2102  func TestNodeShouldRunDaemonPod(t *testing.T) {
  2103  	shouldRun := true
  2104  	shouldContinueRunning := true
  2105  	cases := []struct {
  2106  		predicateName                    string
  2107  		podsOnNode                       []*v1.Pod
  2108  		nodeCondition                    []v1.NodeCondition
  2109  		nodeUnschedulable                bool
  2110  		ds                               *apps.DaemonSet
  2111  		shouldRun, shouldContinueRunning bool
  2112  	}{
  2113  		{
  2114  			predicateName: "ShouldRunDaemonPod",
  2115  			ds: &apps.DaemonSet{
  2116  				Spec: apps.DaemonSetSpec{
  2117  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2118  					Template: v1.PodTemplateSpec{
  2119  						ObjectMeta: metav1.ObjectMeta{
  2120  							Labels: simpleDaemonSetLabel,
  2121  						},
  2122  						Spec: resourcePodSpec("", "50M", "0.5"),
  2123  					},
  2124  				},
  2125  			},
  2126  			shouldRun:             true,
  2127  			shouldContinueRunning: true,
  2128  		},
  2129  		{
  2130  			predicateName: "InsufficientResourceError",
  2131  			ds: &apps.DaemonSet{
  2132  				Spec: apps.DaemonSetSpec{
  2133  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2134  					Template: v1.PodTemplateSpec{
  2135  						ObjectMeta: metav1.ObjectMeta{
  2136  							Labels: simpleDaemonSetLabel,
  2137  						},
  2138  						Spec: resourcePodSpec("", "200M", "0.5"),
  2139  					},
  2140  				},
  2141  			},
  2142  			shouldRun:             shouldRun,
  2143  			shouldContinueRunning: true,
  2144  		},
  2145  		{
  2146  			predicateName: "ErrPodNotMatchHostName",
  2147  			ds: &apps.DaemonSet{
  2148  				Spec: apps.DaemonSetSpec{
  2149  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2150  					Template: v1.PodTemplateSpec{
  2151  						ObjectMeta: metav1.ObjectMeta{
  2152  							Labels: simpleDaemonSetLabel,
  2153  						},
  2154  						Spec: resourcePodSpec("other-node", "50M", "0.5"),
  2155  					},
  2156  				},
  2157  			},
  2158  			shouldRun:             false,
  2159  			shouldContinueRunning: false,
  2160  		},
  2161  		{
  2162  			predicateName: "ErrPodNotFitsHostPorts",
  2163  			podsOnNode: []*v1.Pod{
  2164  				{
  2165  					Spec: v1.PodSpec{
  2166  						Containers: []v1.Container{{
  2167  							Ports: []v1.ContainerPort{{
  2168  								HostPort: 666,
  2169  							}},
  2170  						}},
  2171  					},
  2172  				},
  2173  			},
  2174  			ds: &apps.DaemonSet{
  2175  				Spec: apps.DaemonSetSpec{
  2176  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2177  					Template: v1.PodTemplateSpec{
  2178  						ObjectMeta: metav1.ObjectMeta{
  2179  							Labels: simpleDaemonSetLabel,
  2180  						},
  2181  						Spec: v1.PodSpec{
  2182  							Containers: []v1.Container{{
  2183  								Ports: []v1.ContainerPort{{
  2184  									HostPort: 666,
  2185  								}},
  2186  							}},
  2187  						},
  2188  					},
  2189  				},
  2190  			},
  2191  			shouldRun:             shouldRun,
  2192  			shouldContinueRunning: shouldContinueRunning,
  2193  		},
  2194  		{
  2195  			predicateName: "InsufficientResourceError",
  2196  			podsOnNode: []*v1.Pod{
  2197  				{
  2198  					Spec: v1.PodSpec{
  2199  						Containers: []v1.Container{{
  2200  							Ports: []v1.ContainerPort{{
  2201  								HostPort: 666,
  2202  							}},
  2203  							Resources: resourceContainerSpec("50M", "0.5"),
  2204  						}},
  2205  					},
  2206  				},
  2207  			},
  2208  			ds: &apps.DaemonSet{
  2209  				Spec: apps.DaemonSetSpec{
  2210  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2211  					Template: v1.PodTemplateSpec{
  2212  						ObjectMeta: metav1.ObjectMeta{
  2213  							Labels: simpleDaemonSetLabel,
  2214  						},
  2215  						Spec: resourcePodSpec("", "100M", "0.5"),
  2216  					},
  2217  				},
  2218  			},
  2219  			shouldRun:             shouldRun, // This is because we don't care about the resource constraints any more and let default scheduler handle it.
  2220  			shouldContinueRunning: true,
  2221  		},
  2222  		{
  2223  			predicateName: "ShouldRunDaemonPod",
  2224  			podsOnNode: []*v1.Pod{
  2225  				{
  2226  					Spec: v1.PodSpec{
  2227  						Containers: []v1.Container{{
  2228  							Ports: []v1.ContainerPort{{
  2229  								HostPort: 666,
  2230  							}},
  2231  							Resources: resourceContainerSpec("50M", "0.5"),
  2232  						}},
  2233  					},
  2234  				},
  2235  			},
  2236  			ds: &apps.DaemonSet{
  2237  				Spec: apps.DaemonSetSpec{
  2238  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2239  					Template: v1.PodTemplateSpec{
  2240  						ObjectMeta: metav1.ObjectMeta{
  2241  							Labels: simpleDaemonSetLabel,
  2242  						},
  2243  						Spec: resourcePodSpec("", "50M", "0.5"),
  2244  					},
  2245  				},
  2246  			},
  2247  			shouldRun:             true,
  2248  			shouldContinueRunning: true,
  2249  		},
  2250  		{
  2251  			predicateName: "ErrNodeSelectorNotMatch",
  2252  			ds: &apps.DaemonSet{
  2253  				Spec: apps.DaemonSetSpec{
  2254  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2255  					Template: v1.PodTemplateSpec{
  2256  						ObjectMeta: metav1.ObjectMeta{
  2257  							Labels: simpleDaemonSetLabel,
  2258  						},
  2259  						Spec: v1.PodSpec{
  2260  							NodeSelector: simpleDaemonSetLabel2,
  2261  						},
  2262  					},
  2263  				},
  2264  			},
  2265  			shouldRun:             false,
  2266  			shouldContinueRunning: false,
  2267  		},
  2268  		{
  2269  			predicateName: "ShouldRunDaemonPod",
  2270  			ds: &apps.DaemonSet{
  2271  				Spec: apps.DaemonSetSpec{
  2272  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2273  					Template: v1.PodTemplateSpec{
  2274  						ObjectMeta: metav1.ObjectMeta{
  2275  							Labels: simpleDaemonSetLabel,
  2276  						},
  2277  						Spec: v1.PodSpec{
  2278  							NodeSelector: simpleDaemonSetLabel,
  2279  						},
  2280  					},
  2281  				},
  2282  			},
  2283  			shouldRun:             true,
  2284  			shouldContinueRunning: true,
  2285  		},
  2286  		{
  2287  			predicateName: "ErrPodAffinityNotMatch",
  2288  			ds: &apps.DaemonSet{
  2289  				Spec: apps.DaemonSetSpec{
  2290  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2291  					Template: v1.PodTemplateSpec{
  2292  						ObjectMeta: metav1.ObjectMeta{
  2293  							Labels: simpleDaemonSetLabel,
  2294  						},
  2295  						Spec: v1.PodSpec{
  2296  							Affinity: &v1.Affinity{
  2297  								NodeAffinity: &v1.NodeAffinity{
  2298  									RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  2299  										NodeSelectorTerms: []v1.NodeSelectorTerm{
  2300  											{
  2301  												MatchExpressions: []v1.NodeSelectorRequirement{
  2302  													{
  2303  														Key:      "type",
  2304  														Operator: v1.NodeSelectorOpIn,
  2305  														Values:   []string{"test"},
  2306  													},
  2307  												},
  2308  											},
  2309  										},
  2310  									},
  2311  								},
  2312  							},
  2313  						},
  2314  					},
  2315  				},
  2316  			},
  2317  			shouldRun:             false,
  2318  			shouldContinueRunning: false,
  2319  		},
  2320  		{
  2321  			predicateName: "ShouldRunDaemonPod",
  2322  			ds: &apps.DaemonSet{
  2323  				Spec: apps.DaemonSetSpec{
  2324  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2325  					Template: v1.PodTemplateSpec{
  2326  						ObjectMeta: metav1.ObjectMeta{
  2327  							Labels: simpleDaemonSetLabel,
  2328  						},
  2329  						Spec: v1.PodSpec{
  2330  							Affinity: &v1.Affinity{
  2331  								NodeAffinity: &v1.NodeAffinity{
  2332  									RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  2333  										NodeSelectorTerms: []v1.NodeSelectorTerm{
  2334  											{
  2335  												MatchExpressions: []v1.NodeSelectorRequirement{
  2336  													{
  2337  														Key:      "type",
  2338  														Operator: v1.NodeSelectorOpIn,
  2339  														Values:   []string{"production"},
  2340  													},
  2341  												},
  2342  											},
  2343  										},
  2344  									},
  2345  								},
  2346  							},
  2347  						},
  2348  					},
  2349  				},
  2350  			},
  2351  			shouldRun:             true,
  2352  			shouldContinueRunning: true,
  2353  		},
  2354  		{
  2355  			predicateName: "ShouldRunDaemonPodOnUnschedulableNode",
  2356  			ds: &apps.DaemonSet{
  2357  				Spec: apps.DaemonSetSpec{
  2358  					Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
  2359  					Template: v1.PodTemplateSpec{
  2360  						ObjectMeta: metav1.ObjectMeta{
  2361  							Labels: simpleDaemonSetLabel,
  2362  						},
  2363  						Spec: resourcePodSpec("", "50M", "0.5"),
  2364  					},
  2365  				},
  2366  			},
  2367  			nodeUnschedulable:     true,
  2368  			shouldRun:             true,
  2369  			shouldContinueRunning: true,
  2370  		},
  2371  	}
  2372  
  2373  	for i, c := range cases {
  2374  		for _, strategy := range updateStrategies() {
  2375  			node := newNode("test-node", simpleDaemonSetLabel)
  2376  			node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
  2377  			node.Status.Allocatable = allocatableResources("100M", "1")
  2378  			node.Spec.Unschedulable = c.nodeUnschedulable
  2379  			_, ctx := ktesting.NewTestContext(t)
  2380  			manager, _, _, err := newTestController(ctx)
  2381  			if err != nil {
  2382  				t.Fatalf("error creating DaemonSets controller: %v", err)
  2383  			}
  2384  			manager.nodeStore.Add(node)
  2385  			for _, p := range c.podsOnNode {
  2386  				p.Spec.NodeName = "test-node"
  2387  				manager.podStore.Add(p)
  2388  			}
  2389  			c.ds.Spec.UpdateStrategy = *strategy
  2390  			shouldRun, shouldContinueRunning := NodeShouldRunDaemonPod(node, c.ds)
  2391  
  2392  			if shouldRun != c.shouldRun {
  2393  				t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldRun: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldRun, shouldRun)
  2394  			}
  2395  			if shouldContinueRunning != c.shouldContinueRunning {
  2396  				t.Errorf("[%v] strategy: %v, predicateName: %v expected shouldContinueRunning: %v, got: %v", i, c.ds.Spec.UpdateStrategy.Type, c.predicateName, c.shouldContinueRunning, shouldContinueRunning)
  2397  			}
  2398  		}
  2399  	}
  2400  }
  2401  
  2402  // DaemonSets should be resynced when node labels or taints changed
  2403  func TestUpdateNode(t *testing.T) {
  2404  	var enqueued bool
  2405  	cases := []struct {
  2406  		test               string
  2407  		newNode            *v1.Node
  2408  		oldNode            *v1.Node
  2409  		ds                 *apps.DaemonSet
  2410  		expectedEventsFunc func(strategyType apps.DaemonSetUpdateStrategyType) int
  2411  		shouldEnqueue      bool
  2412  		expectedCreates    func() int
  2413  	}{
  2414  		{
  2415  			test:    "Nothing changed, should not enqueue",
  2416  			oldNode: newNode("node1", nil),
  2417  			newNode: newNode("node1", nil),
  2418  			ds: func() *apps.DaemonSet {
  2419  				ds := newDaemonSet("ds")
  2420  				ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  2421  				return ds
  2422  			}(),
  2423  			shouldEnqueue:   false,
  2424  			expectedCreates: func() int { return 0 },
  2425  		},
  2426  		{
  2427  			test:    "Node labels changed",
  2428  			oldNode: newNode("node1", nil),
  2429  			newNode: newNode("node1", simpleNodeLabel),
  2430  			ds: func() *apps.DaemonSet {
  2431  				ds := newDaemonSet("ds")
  2432  				ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  2433  				return ds
  2434  			}(),
  2435  			shouldEnqueue:   true,
  2436  			expectedCreates: func() int { return 0 },
  2437  		},
  2438  		{
  2439  			test: "Node taints changed",
  2440  			oldNode: func() *v1.Node {
  2441  				node := newNode("node1", nil)
  2442  				setNodeTaint(node, noScheduleTaints)
  2443  				return node
  2444  			}(),
  2445  			newNode:         newNode("node1", nil),
  2446  			ds:              newDaemonSet("ds"),
  2447  			shouldEnqueue:   true,
  2448  			expectedCreates: func() int { return 0 },
  2449  		},
  2450  		{
  2451  			test:    "Node Allocatable changed",
  2452  			oldNode: newNode("node1", nil),
  2453  			newNode: func() *v1.Node {
  2454  				node := newNode("node1", nil)
  2455  				node.Status.Allocatable = allocatableResources("200M", "200m")
  2456  				return node
  2457  			}(),
  2458  			ds: func() *apps.DaemonSet {
  2459  				ds := newDaemonSet("ds")
  2460  				ds.Spec.Template.Spec = resourcePodSpecWithoutNodeName("200M", "200m")
  2461  				return ds
  2462  			}(),
  2463  			expectedEventsFunc: func(strategyType apps.DaemonSetUpdateStrategyType) int {
  2464  				switch strategyType {
  2465  				case apps.OnDeleteDaemonSetStrategyType:
  2466  					return 0
  2467  				case apps.RollingUpdateDaemonSetStrategyType:
  2468  					return 0
  2469  				default:
  2470  					t.Fatalf("unexpected UpdateStrategy %+v", strategyType)
  2471  				}
  2472  				return 0
  2473  			},
  2474  			shouldEnqueue: false,
  2475  			expectedCreates: func() int {
  2476  				return 1
  2477  			},
  2478  		},
  2479  	}
  2480  	for _, c := range cases {
  2481  		for _, strategy := range updateStrategies() {
  2482  			logger, ctx := ktesting.NewTestContext(t)
  2483  			manager, podControl, _, err := newTestController(ctx)
  2484  			if err != nil {
  2485  				t.Fatalf("error creating DaemonSets controller: %v", err)
  2486  			}
  2487  			err = manager.nodeStore.Add(c.oldNode)
  2488  			if err != nil {
  2489  				t.Fatal(err)
  2490  			}
  2491  			c.ds.Spec.UpdateStrategy = *strategy
  2492  			err = manager.dsStore.Add(c.ds)
  2493  			if err != nil {
  2494  				t.Fatal(err)
  2495  			}
  2496  
  2497  			expectedEvents := 0
  2498  			if c.expectedEventsFunc != nil {
  2499  				expectedEvents = c.expectedEventsFunc(strategy.Type)
  2500  			}
  2501  			expectedCreates := 0
  2502  			if c.expectedCreates != nil {
  2503  				expectedCreates = c.expectedCreates()
  2504  			}
  2505  			expectSyncDaemonSets(t, manager, c.ds, podControl, expectedCreates, 0, expectedEvents)
  2506  
  2507  			manager.enqueueDaemonSet = func(ds *apps.DaemonSet) {
  2508  				if ds.Name == "ds" {
  2509  					enqueued = true
  2510  				}
  2511  			}
  2512  
  2513  			enqueued = false
  2514  			manager.updateNode(logger, c.oldNode, c.newNode)
  2515  			if enqueued != c.shouldEnqueue {
  2516  				t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued)
  2517  			}
  2518  		}
  2519  	}
  2520  }
  2521  
  2522  // DaemonSets should be resynced when non-daemon pods was deleted.
  2523  func TestDeleteNoDaemonPod(t *testing.T) {
  2524  	var enqueued bool
  2525  
  2526  	cases := []struct {
  2527  		test          string
  2528  		node          *v1.Node
  2529  		existPods     []*v1.Pod
  2530  		deletedPod    *v1.Pod
  2531  		ds            *apps.DaemonSet
  2532  		shouldEnqueue bool
  2533  	}{
  2534  		{
  2535  			test: "Deleted non-daemon pods to release resources",
  2536  			node: func() *v1.Node {
  2537  				node := newNode("node1", nil)
  2538  				node.Status.Conditions = []v1.NodeCondition{
  2539  					{Type: v1.NodeReady, Status: v1.ConditionTrue},
  2540  				}
  2541  				node.Status.Allocatable = allocatableResources("200M", "200m")
  2542  				return node
  2543  			}(),
  2544  			existPods: func() []*v1.Pod {
  2545  				pods := []*v1.Pod{}
  2546  				for i := 0; i < 4; i++ {
  2547  					podSpec := resourcePodSpec("node1", "50M", "50m")
  2548  					pods = append(pods, &v1.Pod{
  2549  						ObjectMeta: metav1.ObjectMeta{
  2550  							Name: fmt.Sprintf("pod_%d", i),
  2551  						},
  2552  						Spec: podSpec,
  2553  					})
  2554  				}
  2555  				return pods
  2556  			}(),
  2557  			deletedPod: func() *v1.Pod {
  2558  				podSpec := resourcePodSpec("node1", "50M", "50m")
  2559  				return &v1.Pod{
  2560  					ObjectMeta: metav1.ObjectMeta{
  2561  						Name: "pod_0",
  2562  					},
  2563  					Spec: podSpec,
  2564  				}
  2565  			}(),
  2566  			ds: func() *apps.DaemonSet {
  2567  				ds := newDaemonSet("ds")
  2568  				ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
  2569  				return ds
  2570  			}(),
  2571  			shouldEnqueue: false,
  2572  		},
  2573  		{
  2574  			test: "Deleted non-daemon pods (with controller) to release resources",
  2575  			node: func() *v1.Node {
  2576  				node := newNode("node1", nil)
  2577  				node.Status.Conditions = []v1.NodeCondition{
  2578  					{Type: v1.NodeReady, Status: v1.ConditionTrue},
  2579  				}
  2580  				node.Status.Allocatable = allocatableResources("200M", "200m")
  2581  				return node
  2582  			}(),
  2583  			existPods: func() []*v1.Pod {
  2584  				pods := []*v1.Pod{}
  2585  				for i := 0; i < 4; i++ {
  2586  					podSpec := resourcePodSpec("node1", "50M", "50m")
  2587  					pods = append(pods, &v1.Pod{
  2588  						ObjectMeta: metav1.ObjectMeta{
  2589  							Name: fmt.Sprintf("pod_%d", i),
  2590  							OwnerReferences: []metav1.OwnerReference{
  2591  								{Controller: func() *bool { res := true; return &res }()},
  2592  							},
  2593  						},
  2594  						Spec: podSpec,
  2595  					})
  2596  				}
  2597  				return pods
  2598  			}(),
  2599  			deletedPod: func() *v1.Pod {
  2600  				podSpec := resourcePodSpec("node1", "50M", "50m")
  2601  				return &v1.Pod{
  2602  					ObjectMeta: metav1.ObjectMeta{
  2603  						Name: "pod_0",
  2604  						OwnerReferences: []metav1.OwnerReference{
  2605  							{Controller: func() *bool { res := true; return &res }()},
  2606  						},
  2607  					},
  2608  					Spec: podSpec,
  2609  				}
  2610  			}(),
  2611  			ds: func() *apps.DaemonSet {
  2612  				ds := newDaemonSet("ds")
  2613  				ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
  2614  				return ds
  2615  			}(),
  2616  			shouldEnqueue: false,
  2617  		},
  2618  		{
  2619  			test: "Deleted no scheduled pods",
  2620  			node: func() *v1.Node {
  2621  				node := newNode("node1", nil)
  2622  				node.Status.Conditions = []v1.NodeCondition{
  2623  					{Type: v1.NodeReady, Status: v1.ConditionTrue},
  2624  				}
  2625  				node.Status.Allocatable = allocatableResources("200M", "200m")
  2626  				return node
  2627  			}(),
  2628  			existPods: func() []*v1.Pod {
  2629  				pods := []*v1.Pod{}
  2630  				for i := 0; i < 4; i++ {
  2631  					podSpec := resourcePodSpec("node1", "50M", "50m")
  2632  					pods = append(pods, &v1.Pod{
  2633  						ObjectMeta: metav1.ObjectMeta{
  2634  							Name: fmt.Sprintf("pod_%d", i),
  2635  							OwnerReferences: []metav1.OwnerReference{
  2636  								{Controller: func() *bool { res := true; return &res }()},
  2637  							},
  2638  						},
  2639  						Spec: podSpec,
  2640  					})
  2641  				}
  2642  				return pods
  2643  			}(),
  2644  			deletedPod: func() *v1.Pod {
  2645  				podSpec := resourcePodSpec("", "50M", "50m")
  2646  				return &v1.Pod{
  2647  					ObjectMeta: metav1.ObjectMeta{
  2648  						Name: "pod_5",
  2649  					},
  2650  					Spec: podSpec,
  2651  				}
  2652  			}(),
  2653  			ds: func() *apps.DaemonSet {
  2654  				ds := newDaemonSet("ds")
  2655  				ds.Spec.Template.Spec = resourcePodSpec("", "50M", "50m")
  2656  				return ds
  2657  			}(),
  2658  			shouldEnqueue: false,
  2659  		},
  2660  	}
  2661  
  2662  	for _, c := range cases {
  2663  		for _, strategy := range updateStrategies() {
  2664  			logger, ctx := ktesting.NewTestContext(t)
  2665  			manager, podControl, _, err := newTestController(ctx)
  2666  			if err != nil {
  2667  				t.Fatalf("error creating DaemonSets controller: %v", err)
  2668  			}
  2669  			err = manager.nodeStore.Add(c.node)
  2670  			if err != nil {
  2671  				t.Fatal(err)
  2672  			}
  2673  			c.ds.Spec.UpdateStrategy = *strategy
  2674  			err = manager.dsStore.Add(c.ds)
  2675  			if err != nil {
  2676  				t.Fatal(err)
  2677  			}
  2678  			for _, pod := range c.existPods {
  2679  				err = manager.podStore.Add(pod)
  2680  				if err != nil {
  2681  					t.Fatal(err)
  2682  				}
  2683  			}
  2684  			switch strategy.Type {
  2685  			case apps.OnDeleteDaemonSetStrategyType, apps.RollingUpdateDaemonSetStrategyType:
  2686  				expectSyncDaemonSets(t, manager, c.ds, podControl, 1, 0, 0)
  2687  			default:
  2688  				t.Fatalf("unexpected UpdateStrategy %+v", strategy)
  2689  			}
  2690  
  2691  			enqueued = false
  2692  			manager.deletePod(logger, c.deletedPod)
  2693  			if enqueued != c.shouldEnqueue {
  2694  				t.Errorf("Test case: '%s', expected: %t, got: %t", c.test, c.shouldEnqueue, enqueued)
  2695  			}
  2696  		}
  2697  	}
  2698  }
  2699  
  2700  func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
  2701  	for _, strategy := range updateStrategies() {
  2702  		ds := newDaemonSet("foo")
  2703  		ds.Spec.UpdateStrategy = *strategy
  2704  		_, ctx := ktesting.NewTestContext(t)
  2705  		manager, podControl, _, err := newTestController(ctx, ds)
  2706  		if err != nil {
  2707  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2708  		}
  2709  		err = manager.dsStore.Add(ds)
  2710  		if err != nil {
  2711  			t.Fatal(err)
  2712  		}
  2713  		addNodes(manager.nodeStore, 0, 1, nil)
  2714  		addPods(manager.podStore, "node-0", simpleDaemonSetLabel, ds, 1)
  2715  		addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
  2716  
  2717  		podScheduledUsingAffinity := newPod("pod1-node-3", "", simpleDaemonSetLabel, ds)
  2718  		podScheduledUsingAffinity.Spec.Affinity = &v1.Affinity{
  2719  			NodeAffinity: &v1.NodeAffinity{
  2720  				RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
  2721  					NodeSelectorTerms: []v1.NodeSelectorTerm{
  2722  						{
  2723  							MatchFields: []v1.NodeSelectorRequirement{
  2724  								{
  2725  									Key:      metav1.ObjectNameField,
  2726  									Operator: v1.NodeSelectorOpIn,
  2727  									Values:   []string{"node-2"},
  2728  								},
  2729  							},
  2730  						},
  2731  					},
  2732  				},
  2733  			},
  2734  		}
  2735  		err = manager.podStore.Add(podScheduledUsingAffinity)
  2736  		if err != nil {
  2737  			t.Fatal(err)
  2738  		}
  2739  		expectSyncDaemonSets(t, manager, ds, podControl, 0, 1, 0)
  2740  	}
  2741  }
  2742  
  2743  func TestGetNodesToDaemonPods(t *testing.T) {
  2744  	ds := newDaemonSet("foo")
  2745  	ds2 := newDaemonSet("foo2")
  2746  	cases := map[string]struct {
  2747  		includeDeletedTerminal bool
  2748  		wantedPods             []*v1.Pod
  2749  		ignoredPods            []*v1.Pod
  2750  	}{
  2751  		"exclude deleted terminal pods": {
  2752  			wantedPods: []*v1.Pod{
  2753  				newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
  2754  				newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
  2755  				newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
  2756  				newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
  2757  				func() *v1.Pod {
  2758  					pod := newPod("matching-owned-succeeded-pod-0-", "node-0", simpleDaemonSetLabel, ds)
  2759  					pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
  2760  					return pod
  2761  				}(),
  2762  				func() *v1.Pod {
  2763  					pod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds)
  2764  					pod.Status = v1.PodStatus{Phase: v1.PodFailed}
  2765  					return pod
  2766  				}(),
  2767  			},
  2768  			ignoredPods: []*v1.Pod{
  2769  				newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
  2770  				newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
  2771  				newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
  2772  				func() *v1.Pod {
  2773  					pod := newPod("matching-owned-succeeded-deleted-pod-0-", "node-0", simpleDaemonSetLabel, ds)
  2774  					now := metav1.Now()
  2775  					pod.DeletionTimestamp = &now
  2776  					pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
  2777  					return pod
  2778  				}(),
  2779  				func() *v1.Pod {
  2780  					pod := newPod("matching-owned-failed-deleted-pod-1-", "node-1", simpleDaemonSetLabel, ds)
  2781  					now := metav1.Now()
  2782  					pod.DeletionTimestamp = &now
  2783  					pod.Status = v1.PodStatus{Phase: v1.PodFailed}
  2784  					return pod
  2785  				}(),
  2786  			},
  2787  		},
  2788  		"include deleted terminal pods": {
  2789  			includeDeletedTerminal: true,
  2790  			wantedPods: []*v1.Pod{
  2791  				newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
  2792  				newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
  2793  				newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
  2794  				newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
  2795  				func() *v1.Pod {
  2796  					pod := newPod("matching-owned-succeeded-pod-0-", "node-0", simpleDaemonSetLabel, ds)
  2797  					pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
  2798  					return pod
  2799  				}(),
  2800  				func() *v1.Pod {
  2801  					pod := newPod("matching-owned-failed-deleted-pod-1-", "node-1", simpleDaemonSetLabel, ds)
  2802  					now := metav1.Now()
  2803  					pod.DeletionTimestamp = &now
  2804  					pod.Status = v1.PodStatus{Phase: v1.PodFailed}
  2805  					return pod
  2806  				}(),
  2807  			},
  2808  			ignoredPods: []*v1.Pod{
  2809  				newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
  2810  				newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
  2811  				newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
  2812  			},
  2813  		},
  2814  	}
  2815  	for name, tc := range cases {
  2816  		t.Run(name, func(t *testing.T) {
  2817  			_, ctx := ktesting.NewTestContext(t)
  2818  			manager, _, _, err := newTestController(ctx, ds, ds2)
  2819  			if err != nil {
  2820  				t.Fatalf("error creating DaemonSets controller: %v", err)
  2821  			}
  2822  			err = manager.dsStore.Add(ds)
  2823  			if err != nil {
  2824  				t.Fatal(err)
  2825  			}
  2826  			err = manager.dsStore.Add(ds2)
  2827  			if err != nil {
  2828  				t.Fatal(err)
  2829  			}
  2830  			addNodes(manager.nodeStore, 0, 2, nil)
  2831  
  2832  			for _, pod := range tc.wantedPods {
  2833  				manager.podStore.Add(pod)
  2834  			}
  2835  
  2836  			for _, pod := range tc.ignoredPods {
  2837  				err = manager.podStore.Add(pod)
  2838  				if err != nil {
  2839  					t.Fatal(err)
  2840  				}
  2841  			}
  2842  
  2843  			nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds, tc.includeDeletedTerminal)
  2844  			if err != nil {
  2845  				t.Fatalf("getNodesToDaemonPods() error: %v", err)
  2846  			}
  2847  			gotPods := map[string]bool{}
  2848  			for node, pods := range nodesToDaemonPods {
  2849  				for _, pod := range pods {
  2850  					if pod.Spec.NodeName != node {
  2851  						t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName)
  2852  					}
  2853  					gotPods[pod.Name] = true
  2854  				}
  2855  			}
  2856  			for _, pod := range tc.wantedPods {
  2857  				if !gotPods[pod.Name] {
  2858  					t.Errorf("expected pod %v but didn't get it", pod.Name)
  2859  				}
  2860  				delete(gotPods, pod.Name)
  2861  			}
  2862  			for podName := range gotPods {
  2863  				t.Errorf("unexpected pod %v was returned", podName)
  2864  			}
  2865  		})
  2866  	}
  2867  }
  2868  
  2869  func TestAddNode(t *testing.T) {
  2870  	logger, ctx := ktesting.NewTestContext(t)
  2871  	manager, _, _, err := newTestController(ctx)
  2872  	if err != nil {
  2873  		t.Fatalf("error creating DaemonSets controller: %v", err)
  2874  	}
  2875  	node1 := newNode("node1", nil)
  2876  	ds := newDaemonSet("ds")
  2877  	ds.Spec.Template.Spec.NodeSelector = simpleNodeLabel
  2878  	err = manager.dsStore.Add(ds)
  2879  	if err != nil {
  2880  		t.Fatal(err)
  2881  	}
  2882  	manager.addNode(logger, node1)
  2883  	if got, want := manager.queue.Len(), 0; got != want {
  2884  		t.Fatalf("queue.Len() = %v, want %v", got, want)
  2885  	}
  2886  
  2887  	node2 := newNode("node2", simpleNodeLabel)
  2888  	manager.addNode(logger, node2)
  2889  	if got, want := manager.queue.Len(), 1; got != want {
  2890  		t.Fatalf("queue.Len() = %v, want %v", got, want)
  2891  	}
  2892  	key, done := manager.queue.Get()
  2893  	if key == nil || done {
  2894  		t.Fatalf("failed to enqueue controller for node %v", node2.Name)
  2895  	}
  2896  }
  2897  
  2898  func TestAddPod(t *testing.T) {
  2899  	for _, strategy := range updateStrategies() {
  2900  		logger, ctx := ktesting.NewTestContext(t)
  2901  		manager, _, _, err := newTestController(ctx)
  2902  		if err != nil {
  2903  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2904  		}
  2905  		ds1 := newDaemonSet("foo1")
  2906  		ds1.Spec.UpdateStrategy = *strategy
  2907  		ds2 := newDaemonSet("foo2")
  2908  		ds2.Spec.UpdateStrategy = *strategy
  2909  		err = manager.dsStore.Add(ds1)
  2910  		if err != nil {
  2911  			t.Fatal(err)
  2912  		}
  2913  		err = manager.dsStore.Add(ds2)
  2914  		if err != nil {
  2915  			t.Fatal(err)
  2916  		}
  2917  		pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1)
  2918  		manager.addPod(logger, pod1)
  2919  		if got, want := manager.queue.Len(), 1; got != want {
  2920  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  2921  		}
  2922  		key, done := manager.queue.Get()
  2923  		if key == nil || done {
  2924  			t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
  2925  		}
  2926  		expectedKey, _ := controller.KeyFunc(ds1)
  2927  		if got, want := key.(string), expectedKey; got != want {
  2928  			t.Errorf("queue.Get() = %v, want %v", got, want)
  2929  		}
  2930  
  2931  		pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2)
  2932  		manager.addPod(logger, pod2)
  2933  		if got, want := manager.queue.Len(), 1; got != want {
  2934  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  2935  		}
  2936  		key, done = manager.queue.Get()
  2937  		if key == nil || done {
  2938  			t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
  2939  		}
  2940  		expectedKey, _ = controller.KeyFunc(ds2)
  2941  		if got, want := key.(string), expectedKey; got != want {
  2942  			t.Errorf("queue.Get() = %v, want %v", got, want)
  2943  		}
  2944  	}
  2945  }
  2946  
  2947  func TestAddPodOrphan(t *testing.T) {
  2948  	for _, strategy := range updateStrategies() {
  2949  		logger, ctx := ktesting.NewTestContext(t)
  2950  		manager, _, _, err := newTestController(ctx)
  2951  		if err != nil {
  2952  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2953  		}
  2954  		ds1 := newDaemonSet("foo1")
  2955  		ds1.Spec.UpdateStrategy = *strategy
  2956  		ds2 := newDaemonSet("foo2")
  2957  		ds2.Spec.UpdateStrategy = *strategy
  2958  		ds3 := newDaemonSet("foo3")
  2959  		ds3.Spec.UpdateStrategy = *strategy
  2960  		ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
  2961  		err = manager.dsStore.Add(ds1)
  2962  		if err != nil {
  2963  			t.Fatal(err)
  2964  		}
  2965  		err = manager.dsStore.Add(ds2)
  2966  		if err != nil {
  2967  			t.Fatal(err)
  2968  		}
  2969  		err = manager.dsStore.Add(ds3)
  2970  		if err != nil {
  2971  			t.Fatal(err)
  2972  		}
  2973  
  2974  		// Make pod an orphan. Expect matching sets to be queued.
  2975  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
  2976  		manager.addPod(logger, pod)
  2977  		if got, want := manager.queue.Len(), 2; got != want {
  2978  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  2979  		}
  2980  		if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
  2981  			t.Errorf("getQueuedKeys() = %v, want %v", got, want)
  2982  		}
  2983  	}
  2984  }
  2985  
  2986  func TestUpdatePod(t *testing.T) {
  2987  	for _, strategy := range updateStrategies() {
  2988  		logger, ctx := ktesting.NewTestContext(t)
  2989  		manager, _, _, err := newTestController(ctx)
  2990  		if err != nil {
  2991  			t.Fatalf("error creating DaemonSets controller: %v", err)
  2992  		}
  2993  		ds1 := newDaemonSet("foo1")
  2994  		ds1.Spec.UpdateStrategy = *strategy
  2995  		ds2 := newDaemonSet("foo2")
  2996  		ds2.Spec.UpdateStrategy = *strategy
  2997  		err = manager.dsStore.Add(ds1)
  2998  		if err != nil {
  2999  			t.Fatal(err)
  3000  		}
  3001  		err = manager.dsStore.Add(ds2)
  3002  		if err != nil {
  3003  			t.Fatal(err)
  3004  		}
  3005  
  3006  		pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1)
  3007  		prev := *pod1
  3008  		bumpResourceVersion(pod1)
  3009  		manager.updatePod(logger, &prev, pod1)
  3010  		if got, want := manager.queue.Len(), 1; got != want {
  3011  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3012  		}
  3013  		key, done := manager.queue.Get()
  3014  		if key == nil || done {
  3015  			t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
  3016  		}
  3017  		expectedKey, _ := controller.KeyFunc(ds1)
  3018  		if got, want := key.(string), expectedKey; got != want {
  3019  			t.Errorf("queue.Get() = %v, want %v", got, want)
  3020  		}
  3021  
  3022  		pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2)
  3023  		prev = *pod2
  3024  		bumpResourceVersion(pod2)
  3025  		manager.updatePod(logger, &prev, pod2)
  3026  		if got, want := manager.queue.Len(), 1; got != want {
  3027  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3028  		}
  3029  		key, done = manager.queue.Get()
  3030  		if key == nil || done {
  3031  			t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
  3032  		}
  3033  		expectedKey, _ = controller.KeyFunc(ds2)
  3034  		if got, want := key.(string), expectedKey; got != want {
  3035  			t.Errorf("queue.Get() = %v, want %v", got, want)
  3036  		}
  3037  	}
  3038  }
  3039  
  3040  func TestUpdatePodOrphanSameLabels(t *testing.T) {
  3041  	for _, strategy := range updateStrategies() {
  3042  		logger, ctx := ktesting.NewTestContext(t)
  3043  		manager, _, _, err := newTestController(ctx)
  3044  		if err != nil {
  3045  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3046  		}
  3047  		ds1 := newDaemonSet("foo1")
  3048  		ds1.Spec.UpdateStrategy = *strategy
  3049  		ds2 := newDaemonSet("foo2")
  3050  		ds2.Spec.UpdateStrategy = *strategy
  3051  		err = manager.dsStore.Add(ds1)
  3052  		if err != nil {
  3053  			t.Fatal(err)
  3054  		}
  3055  		err = manager.dsStore.Add(ds2)
  3056  		if err != nil {
  3057  			t.Fatal(err)
  3058  		}
  3059  
  3060  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
  3061  		prev := *pod
  3062  		bumpResourceVersion(pod)
  3063  		manager.updatePod(logger, &prev, pod)
  3064  		if got, want := manager.queue.Len(), 0; got != want {
  3065  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3066  		}
  3067  	}
  3068  }
  3069  
  3070  func TestUpdatePodOrphanWithNewLabels(t *testing.T) {
  3071  	for _, strategy := range updateStrategies() {
  3072  		logger, ctx := ktesting.NewTestContext(t)
  3073  		manager, _, _, err := newTestController(ctx)
  3074  		if err != nil {
  3075  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3076  		}
  3077  		ds1 := newDaemonSet("foo1")
  3078  		ds1.Spec.UpdateStrategy = *strategy
  3079  		ds2 := newDaemonSet("foo2")
  3080  		ds2.Spec.UpdateStrategy = *strategy
  3081  		err = manager.dsStore.Add(ds1)
  3082  		if err != nil {
  3083  			t.Fatal(err)
  3084  		}
  3085  		err = manager.dsStore.Add(ds2)
  3086  		if err != nil {
  3087  			t.Fatal(err)
  3088  		}
  3089  
  3090  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
  3091  		prev := *pod
  3092  		prev.Labels = map[string]string{"foo2": "bar2"}
  3093  		bumpResourceVersion(pod)
  3094  		manager.updatePod(logger, &prev, pod)
  3095  		if got, want := manager.queue.Len(), 2; got != want {
  3096  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3097  		}
  3098  		if got, want := getQueuedKeys(manager.queue), []string{"default/foo1", "default/foo2"}; !reflect.DeepEqual(got, want) {
  3099  			t.Errorf("getQueuedKeys() = %v, want %v", got, want)
  3100  		}
  3101  	}
  3102  }
  3103  
  3104  func TestUpdatePodChangeControllerRef(t *testing.T) {
  3105  	for _, strategy := range updateStrategies() {
  3106  		ds := newDaemonSet("foo")
  3107  		ds.Spec.UpdateStrategy = *strategy
  3108  		logger, ctx := ktesting.NewTestContext(t)
  3109  		manager, _, _, err := newTestController(ctx)
  3110  		if err != nil {
  3111  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3112  		}
  3113  		ds1 := newDaemonSet("foo1")
  3114  		ds2 := newDaemonSet("foo2")
  3115  		err = manager.dsStore.Add(ds1)
  3116  		if err != nil {
  3117  			t.Fatal(err)
  3118  		}
  3119  		err = manager.dsStore.Add(ds2)
  3120  		if err != nil {
  3121  			t.Fatal(err)
  3122  		}
  3123  
  3124  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1)
  3125  		prev := *pod
  3126  		prev.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(ds2, controllerKind)}
  3127  		bumpResourceVersion(pod)
  3128  		manager.updatePod(logger, &prev, pod)
  3129  		if got, want := manager.queue.Len(), 2; got != want {
  3130  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3131  		}
  3132  	}
  3133  }
  3134  
  3135  func TestUpdatePodControllerRefRemoved(t *testing.T) {
  3136  	for _, strategy := range updateStrategies() {
  3137  		logger, ctx := ktesting.NewTestContext(t)
  3138  		manager, _, _, err := newTestController(ctx)
  3139  		if err != nil {
  3140  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3141  		}
  3142  		ds1 := newDaemonSet("foo1")
  3143  		ds1.Spec.UpdateStrategy = *strategy
  3144  		ds2 := newDaemonSet("foo2")
  3145  		ds2.Spec.UpdateStrategy = *strategy
  3146  		err = manager.dsStore.Add(ds1)
  3147  		if err != nil {
  3148  			t.Fatal(err)
  3149  		}
  3150  		err = manager.dsStore.Add(ds2)
  3151  		if err != nil {
  3152  			t.Fatal(err)
  3153  		}
  3154  
  3155  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1)
  3156  		prev := *pod
  3157  		pod.OwnerReferences = nil
  3158  		bumpResourceVersion(pod)
  3159  		manager.updatePod(logger, &prev, pod)
  3160  		if got, want := manager.queue.Len(), 2; got != want {
  3161  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3162  		}
  3163  	}
  3164  }
  3165  
  3166  func TestDeletePod(t *testing.T) {
  3167  	for _, strategy := range updateStrategies() {
  3168  		logger, ctx := ktesting.NewTestContext(t)
  3169  		manager, _, _, err := newTestController(ctx)
  3170  		if err != nil {
  3171  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3172  		}
  3173  		ds1 := newDaemonSet("foo1")
  3174  		ds1.Spec.UpdateStrategy = *strategy
  3175  		ds2 := newDaemonSet("foo2")
  3176  		ds2.Spec.UpdateStrategy = *strategy
  3177  		err = manager.dsStore.Add(ds1)
  3178  		if err != nil {
  3179  			t.Fatal(err)
  3180  		}
  3181  		err = manager.dsStore.Add(ds2)
  3182  		if err != nil {
  3183  			t.Fatal(err)
  3184  		}
  3185  
  3186  		pod1 := newPod("pod1-", "node-0", simpleDaemonSetLabel, ds1)
  3187  		manager.deletePod(logger, pod1)
  3188  		if got, want := manager.queue.Len(), 1; got != want {
  3189  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3190  		}
  3191  		key, done := manager.queue.Get()
  3192  		if key == nil || done {
  3193  			t.Fatalf("failed to enqueue controller for pod %v", pod1.Name)
  3194  		}
  3195  		expectedKey, _ := controller.KeyFunc(ds1)
  3196  		if got, want := key.(string), expectedKey; got != want {
  3197  			t.Errorf("queue.Get() = %v, want %v", got, want)
  3198  		}
  3199  
  3200  		pod2 := newPod("pod2-", "node-0", simpleDaemonSetLabel, ds2)
  3201  		manager.deletePod(logger, pod2)
  3202  		if got, want := manager.queue.Len(), 1; got != want {
  3203  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3204  		}
  3205  		key, done = manager.queue.Get()
  3206  		if key == nil || done {
  3207  			t.Fatalf("failed to enqueue controller for pod %v", pod2.Name)
  3208  		}
  3209  		expectedKey, _ = controller.KeyFunc(ds2)
  3210  		if got, want := key.(string), expectedKey; got != want {
  3211  			t.Errorf("queue.Get() = %v, want %v", got, want)
  3212  		}
  3213  	}
  3214  }
  3215  
  3216  func TestDeletePodOrphan(t *testing.T) {
  3217  	for _, strategy := range updateStrategies() {
  3218  		logger, ctx := ktesting.NewTestContext(t)
  3219  		manager, _, _, err := newTestController(ctx)
  3220  		if err != nil {
  3221  			t.Fatalf("error creating DaemonSets controller: %v", err)
  3222  		}
  3223  		ds1 := newDaemonSet("foo1")
  3224  		ds1.Spec.UpdateStrategy = *strategy
  3225  		ds2 := newDaemonSet("foo2")
  3226  		ds2.Spec.UpdateStrategy = *strategy
  3227  		ds3 := newDaemonSet("foo3")
  3228  		ds3.Spec.UpdateStrategy = *strategy
  3229  		ds3.Spec.Selector.MatchLabels = simpleDaemonSetLabel2
  3230  		err = manager.dsStore.Add(ds1)
  3231  		if err != nil {
  3232  			t.Fatal(err)
  3233  		}
  3234  		err = manager.dsStore.Add(ds2)
  3235  		if err != nil {
  3236  			t.Fatal(err)
  3237  		}
  3238  		err = manager.dsStore.Add(ds3)
  3239  		if err != nil {
  3240  			t.Fatal(err)
  3241  		}
  3242  
  3243  		pod := newPod("pod1-", "node-0", simpleDaemonSetLabel, nil)
  3244  		manager.deletePod(logger, pod)
  3245  		if got, want := manager.queue.Len(), 0; got != want {
  3246  			t.Fatalf("queue.Len() = %v, want %v", got, want)
  3247  		}
  3248  	}
  3249  }
  3250  
  3251  func bumpResourceVersion(obj metav1.Object) {
  3252  	ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32)
  3253  	obj.SetResourceVersion(strconv.FormatInt(ver+1, 10))
  3254  }
  3255  
  3256  // getQueuedKeys returns a sorted list of keys in the queue.
  3257  // It can be used to quickly check that multiple keys are in there.
  3258  func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
  3259  	var keys []string
  3260  	count := queue.Len()
  3261  	for i := 0; i < count; i++ {
  3262  		key, done := queue.Get()
  3263  		if done {
  3264  			return keys
  3265  		}
  3266  		keys = append(keys, key.(string))
  3267  	}
  3268  	sort.Strings(keys)
  3269  	return keys
  3270  }
  3271  
  3272  // Controller should not create pods on nodes which have daemon pods, and should remove excess pods from nodes that have extra pods.
  3273  func TestSurgeDealsWithExistingPods(t *testing.T) {
  3274  	_, ctx := ktesting.NewTestContext(t)
  3275  	ds := newDaemonSet("foo")
  3276  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3277  	manager, podControl, _, err := newTestController(ctx, ds)
  3278  	if err != nil {
  3279  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3280  	}
  3281  	manager.dsStore.Add(ds)
  3282  	addNodes(manager.nodeStore, 0, 5, nil)
  3283  	addPods(manager.podStore, "node-1", simpleDaemonSetLabel, ds, 1)
  3284  	addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 2)
  3285  	addPods(manager.podStore, "node-3", simpleDaemonSetLabel, ds, 5)
  3286  	addPods(manager.podStore, "node-4", simpleDaemonSetLabel2, ds, 2)
  3287  	expectSyncDaemonSets(t, manager, ds, podControl, 2, 5, 0)
  3288  }
  3289  
  3290  func TestSurgePreservesReadyOldPods(t *testing.T) {
  3291  	_, ctx := ktesting.NewTestContext(t)
  3292  	ds := newDaemonSet("foo")
  3293  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3294  	manager, podControl, _, err := newTestController(ctx, ds)
  3295  	if err != nil {
  3296  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3297  	}
  3298  	manager.dsStore.Add(ds)
  3299  	addNodes(manager.nodeStore, 0, 5, nil)
  3300  
  3301  	// will be preserved because it's the current hash
  3302  	pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
  3303  	pod.CreationTimestamp.Time = time.Unix(100, 0)
  3304  	manager.podStore.Add(pod)
  3305  
  3306  	// will be preserved because it's the oldest AND it is ready
  3307  	pod = newPod("node-1-old-", "node-1", simpleDaemonSetLabel, ds)
  3308  	delete(pod.Labels, apps.ControllerRevisionHashLabelKey)
  3309  	pod.CreationTimestamp.Time = time.Unix(50, 0)
  3310  	pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3311  	manager.podStore.Add(pod)
  3312  
  3313  	// will be deleted because it's not the oldest, even though it is ready
  3314  	oldReadyPod := newPod("node-1-delete-", "node-1", simpleDaemonSetLabel, ds)
  3315  	delete(oldReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3316  	oldReadyPod.CreationTimestamp.Time = time.Unix(60, 0)
  3317  	oldReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3318  	manager.podStore.Add(oldReadyPod)
  3319  
  3320  	addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 1)
  3321  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 1, 0)
  3322  
  3323  	actual := sets.NewString(podControl.DeletePodName...)
  3324  	expected := sets.NewString(oldReadyPod.Name)
  3325  	if !actual.Equal(expected) {
  3326  		t.Errorf("unexpected deletes\nexpected: %v\n  actual: %v", expected.List(), actual.List())
  3327  	}
  3328  }
  3329  
  3330  func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
  3331  	_, ctx := ktesting.NewTestContext(t)
  3332  	ds := newDaemonSet("foo")
  3333  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3334  	manager, podControl, _, err := newTestController(ctx, ds)
  3335  	if err != nil {
  3336  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3337  	}
  3338  	manager.dsStore.Add(ds)
  3339  	addNodes(manager.nodeStore, 0, 5, nil)
  3340  
  3341  	// will be preserved because it has the newest hash, and is also consuming the surge budget
  3342  	pod := newPod("node-0-", "node-0", simpleDaemonSetLabel, ds)
  3343  	pod.CreationTimestamp.Time = time.Unix(100, 0)
  3344  	pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}}
  3345  	manager.podStore.Add(pod)
  3346  
  3347  	// will be preserved because it is ready
  3348  	oldPodReady := newPod("node-0-old-ready-", "node-0", simpleDaemonSetLabel, ds)
  3349  	delete(oldPodReady.Labels, apps.ControllerRevisionHashLabelKey)
  3350  	oldPodReady.CreationTimestamp.Time = time.Unix(50, 0)
  3351  	oldPodReady.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3352  	manager.podStore.Add(oldPodReady)
  3353  
  3354  	// create old ready pods on all other nodes
  3355  	for i := 1; i < 5; i++ {
  3356  		oldPod := newPod(fmt.Sprintf("node-%d-preserve-", i), fmt.Sprintf("node-%d", i), simpleDaemonSetLabel, ds)
  3357  		delete(oldPod.Labels, apps.ControllerRevisionHashLabelKey)
  3358  		oldPod.CreationTimestamp.Time = time.Unix(1, 0)
  3359  		oldPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3360  		manager.podStore.Add(oldPod)
  3361  
  3362  		// mark the last old pod as deleted, which should trigger a creation above surge
  3363  		if i == 4 {
  3364  			thirty := int64(30)
  3365  			timestamp := metav1.Time{Time: time.Unix(1+thirty, 0)}
  3366  			oldPod.DeletionGracePeriodSeconds = &thirty
  3367  			oldPod.DeletionTimestamp = &timestamp
  3368  		}
  3369  	}
  3370  
  3371  	// controller should detect that node-4 has only a deleted pod
  3372  	clearExpectations(t, manager, ds, podControl)
  3373  	expectSyncDaemonSets(t, manager, ds, podControl, 1, 0, 0)
  3374  	clearExpectations(t, manager, ds, podControl)
  3375  }
  3376  
  3377  func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
  3378  	_, ctx := ktesting.NewTestContext(t)
  3379  	ds := newDaemonSet("foo")
  3380  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3381  	manager, podControl, _, err := newTestController(ctx, ds)
  3382  	if err != nil {
  3383  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3384  	}
  3385  	manager.dsStore.Add(ds)
  3386  	addNodes(manager.nodeStore, 0, 5, nil)
  3387  
  3388  	// will be preserved because it has the newest hash
  3389  	pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
  3390  	pod.CreationTimestamp.Time = time.Unix(100, 0)
  3391  	manager.podStore.Add(pod)
  3392  
  3393  	// will be deleted because it is unready
  3394  	oldUnreadyPod := newPod("node-1-old-unready-", "node-1", simpleDaemonSetLabel, ds)
  3395  	delete(oldUnreadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3396  	oldUnreadyPod.CreationTimestamp.Time = time.Unix(50, 0)
  3397  	oldUnreadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionFalse}}
  3398  	manager.podStore.Add(oldUnreadyPod)
  3399  
  3400  	// will be deleted because it is not the oldest
  3401  	oldReadyPod := newPod("node-1-delete-", "node-1", simpleDaemonSetLabel, ds)
  3402  	delete(oldReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3403  	oldReadyPod.CreationTimestamp.Time = time.Unix(60, 0)
  3404  	oldReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3405  	manager.podStore.Add(oldReadyPod)
  3406  
  3407  	addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 1)
  3408  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 2, 0)
  3409  
  3410  	actual := sets.NewString(podControl.DeletePodName...)
  3411  	expected := sets.NewString(oldReadyPod.Name, oldUnreadyPod.Name)
  3412  	if !actual.Equal(expected) {
  3413  		t.Errorf("unexpected deletes\nexpected: %v\n  actual: %v", expected.List(), actual.List())
  3414  	}
  3415  }
  3416  
  3417  func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
  3418  	_, ctx := ktesting.NewTestContext(t)
  3419  	ds := newDaemonSet("foo")
  3420  	ds.Spec.MinReadySeconds = 15
  3421  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3422  	manager, podControl, _, err := newTestController(ctx, ds)
  3423  	if err != nil {
  3424  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3425  	}
  3426  	manager.dsStore.Add(ds)
  3427  	addNodes(manager.nodeStore, 0, 5, nil)
  3428  
  3429  	// the clock will be set 10s after the newest pod on node-1 went ready, which is not long enough to be available
  3430  	manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+10, 0))
  3431  
  3432  	// will be preserved because it has the newest hash
  3433  	pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
  3434  	pod.CreationTimestamp.Time = time.Unix(100, 0)
  3435  	pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: time.Unix(50, 0)}}}
  3436  	manager.podStore.Add(pod)
  3437  
  3438  	// will be preserved because it is ready AND the newest pod is not yet available for long enough
  3439  	oldReadyPod := newPod("node-1-old-ready-", "node-1", simpleDaemonSetLabel, ds)
  3440  	delete(oldReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3441  	oldReadyPod.CreationTimestamp.Time = time.Unix(50, 0)
  3442  	oldReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3443  	manager.podStore.Add(oldReadyPod)
  3444  
  3445  	// will be deleted because it is not the oldest
  3446  	oldExcessReadyPod := newPod("node-1-delete-", "node-1", simpleDaemonSetLabel, ds)
  3447  	delete(oldExcessReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3448  	oldExcessReadyPod.CreationTimestamp.Time = time.Unix(60, 0)
  3449  	oldExcessReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3450  	manager.podStore.Add(oldExcessReadyPod)
  3451  
  3452  	addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 1)
  3453  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 1, 0)
  3454  
  3455  	actual := sets.NewString(podControl.DeletePodName...)
  3456  	expected := sets.NewString(oldExcessReadyPod.Name)
  3457  	if !actual.Equal(expected) {
  3458  		t.Errorf("unexpected deletes\nexpected: %v\n  actual: %v", expected.List(), actual.List())
  3459  	}
  3460  }
  3461  
  3462  func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
  3463  	_, ctx := ktesting.NewTestContext(t)
  3464  	ds := newDaemonSet("foo")
  3465  	ds.Spec.MinReadySeconds = 15
  3466  	ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
  3467  	manager, podControl, _, err := newTestController(ctx, ds)
  3468  	if err != nil {
  3469  		t.Fatalf("error creating DaemonSets controller: %v", err)
  3470  	}
  3471  	manager.dsStore.Add(ds)
  3472  	addNodes(manager.nodeStore, 0, 5, nil)
  3473  
  3474  	// the clock will be set 20s after the newest pod on node-1 went ready, which is not long enough to be available
  3475  	manager.DaemonSetsController.failedPodsBackoff.Clock = testingclock.NewFakeClock(time.Unix(50+20, 0))
  3476  
  3477  	// will be preserved because it has the newest hash
  3478  	pod := newPod("node-1-", "node-1", simpleDaemonSetLabel, ds)
  3479  	pod.CreationTimestamp.Time = time.Unix(100, 0)
  3480  	pod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: metav1.Time{Time: time.Unix(50, 0)}}}
  3481  	manager.podStore.Add(pod)
  3482  
  3483  	// will be preserved because it is ready AND the newest pod is not yet available for long enough
  3484  	oldReadyPod := newPod("node-1-old-ready-", "node-1", simpleDaemonSetLabel, ds)
  3485  	delete(oldReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3486  	oldReadyPod.CreationTimestamp.Time = time.Unix(50, 0)
  3487  	oldReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3488  	manager.podStore.Add(oldReadyPod)
  3489  
  3490  	// will be deleted because it is not the oldest
  3491  	oldExcessReadyPod := newPod("node-1-delete-", "node-1", simpleDaemonSetLabel, ds)
  3492  	delete(oldExcessReadyPod.Labels, apps.ControllerRevisionHashLabelKey)
  3493  	oldExcessReadyPod.CreationTimestamp.Time = time.Unix(60, 0)
  3494  	oldExcessReadyPod.Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
  3495  	manager.podStore.Add(oldExcessReadyPod)
  3496  
  3497  	addPods(manager.podStore, "node-2", simpleDaemonSetLabel, ds, 1)
  3498  	expectSyncDaemonSets(t, manager, ds, podControl, 3, 2, 0)
  3499  
  3500  	actual := sets.NewString(podControl.DeletePodName...)
  3501  	expected := sets.NewString(oldExcessReadyPod.Name, oldReadyPod.Name)
  3502  	if !actual.Equal(expected) {
  3503  		t.Errorf("unexpected deletes\nexpected: %v\n  actual: %v", expected.List(), actual.List())
  3504  	}
  3505  }
  3506  
  3507  func TestStoreDaemonSetStatus(t *testing.T) {
  3508  	getError := fmt.Errorf("fake get error")
  3509  	updateError := fmt.Errorf("fake update error")
  3510  	tests := []struct {
  3511  		name                 string
  3512  		updateErrorNum       int
  3513  		getErrorNum          int
  3514  		expectedUpdateCalled int
  3515  		expectedGetCalled    int
  3516  		expectedError        error
  3517  	}{
  3518  		{
  3519  			name:                 "succeed immediately",
  3520  			updateErrorNum:       0,
  3521  			getErrorNum:          0,
  3522  			expectedUpdateCalled: 1,
  3523  			expectedGetCalled:    0,
  3524  			expectedError:        nil,
  3525  		},
  3526  		{
  3527  			name:                 "succeed after one update failure",
  3528  			updateErrorNum:       1,
  3529  			getErrorNum:          0,
  3530  			expectedUpdateCalled: 2,
  3531  			expectedGetCalled:    1,
  3532  			expectedError:        nil,
  3533  		},
  3534  		{
  3535  			name:                 "fail after two update failures",
  3536  			updateErrorNum:       2,
  3537  			getErrorNum:          0,
  3538  			expectedUpdateCalled: 2,
  3539  			expectedGetCalled:    1,
  3540  			expectedError:        updateError,
  3541  		},
  3542  		{
  3543  			name:                 "fail after one update failure and one get failure",
  3544  			updateErrorNum:       1,
  3545  			getErrorNum:          1,
  3546  			expectedUpdateCalled: 1,
  3547  			expectedGetCalled:    1,
  3548  			expectedError:        getError,
  3549  		},
  3550  	}
  3551  	for _, tt := range tests {
  3552  		t.Run(tt.name, func(t *testing.T) {
  3553  			ds := newDaemonSet("foo")
  3554  			fakeClient := &fake.Clientset{}
  3555  			getCalled := 0
  3556  			fakeClient.AddReactor("get", "daemonsets", func(action core.Action) (bool, runtime.Object, error) {
  3557  				getCalled += 1
  3558  				if getCalled <= tt.getErrorNum {
  3559  					return true, nil, getError
  3560  				}
  3561  				return true, ds, nil
  3562  			})
  3563  			updateCalled := 0
  3564  			fakeClient.AddReactor("update", "daemonsets", func(action core.Action) (bool, runtime.Object, error) {
  3565  				updateCalled += 1
  3566  				if updateCalled <= tt.updateErrorNum {
  3567  					return true, nil, updateError
  3568  				}
  3569  				return true, ds, nil
  3570  			})
  3571  			if err := storeDaemonSetStatus(context.TODO(), fakeClient.AppsV1().DaemonSets("default"), ds, 2, 2, 2, 2, 2, 2, 2, true); err != tt.expectedError {
  3572  				t.Errorf("storeDaemonSetStatus() got %v, expected %v", err, tt.expectedError)
  3573  			}
  3574  			if getCalled != tt.expectedGetCalled {
  3575  				t.Errorf("Get() was called %v times, expected %v times", getCalled, tt.expectedGetCalled)
  3576  			}
  3577  			if updateCalled != tt.expectedUpdateCalled {
  3578  				t.Errorf("UpdateStatus() was called %v times, expected %v times", updateCalled, tt.expectedUpdateCalled)
  3579  			}
  3580  		})
  3581  	}
  3582  }
  3583  
  3584  func TestShouldIgnoreNodeUpdate(t *testing.T) {
  3585  	cases := []struct {
  3586  		name           string
  3587  		newNode        *v1.Node
  3588  		oldNode        *v1.Node
  3589  		expectedResult bool
  3590  	}{
  3591  		{
  3592  			name:           "Nothing changed",
  3593  			oldNode:        newNode("node1", nil),
  3594  			newNode:        newNode("node1", nil),
  3595  			expectedResult: true,
  3596  		},
  3597  		{
  3598  			name:           "Node labels changed",
  3599  			oldNode:        newNode("node1", nil),
  3600  			newNode:        newNode("node1", simpleNodeLabel),
  3601  			expectedResult: false,
  3602  		},
  3603  		{
  3604  			name: "Node taints changed",
  3605  			oldNode: func() *v1.Node {
  3606  				node := newNode("node1", nil)
  3607  				setNodeTaint(node, noScheduleTaints)
  3608  				return node
  3609  			}(),
  3610  			newNode:        newNode("node1", nil),
  3611  			expectedResult: false,
  3612  		},
  3613  	}
  3614  
  3615  	for _, c := range cases {
  3616  		result := shouldIgnoreNodeUpdate(*c.oldNode, *c.newNode)
  3617  
  3618  		if result != c.expectedResult {
  3619  			t.Errorf("[%s] unexpected results: %v", c.name, result)
  3620  		}
  3621  	}
  3622  }
  3623  

View as plain text