...

Source file src/k8s.io/kubernetes/test/integration/daemonset/daemonset_test.go

Documentation: k8s.io/kubernetes/test/integration/daemonset

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package daemonset
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/google/go-cmp/cmp"
    27  	apps "k8s.io/api/apps/v1"
    28  	v1 "k8s.io/api/core/v1"
    29  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    30  	"k8s.io/apimachinery/pkg/api/resource"
    31  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    32  	"k8s.io/apimachinery/pkg/util/intstr"
    33  	"k8s.io/apimachinery/pkg/util/uuid"
    34  	"k8s.io/apimachinery/pkg/util/wait"
    35  	"k8s.io/client-go/informers"
    36  	clientset "k8s.io/client-go/kubernetes"
    37  	appstyped "k8s.io/client-go/kubernetes/typed/apps/v1"
    38  	corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
    39  	restclient "k8s.io/client-go/rest"
    40  	"k8s.io/client-go/tools/cache"
    41  	"k8s.io/client-go/tools/events"
    42  	"k8s.io/client-go/util/flowcontrol"
    43  	"k8s.io/client-go/util/retry"
    44  	"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
    45  	kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
    46  	podutil "k8s.io/kubernetes/pkg/api/v1/pod"
    47  	"k8s.io/kubernetes/pkg/controller"
    48  	"k8s.io/kubernetes/pkg/controller/daemon"
    49  	"k8s.io/kubernetes/pkg/controlplane"
    50  	"k8s.io/kubernetes/pkg/scheduler"
    51  	"k8s.io/kubernetes/pkg/scheduler/profile"
    52  	labelsutil "k8s.io/kubernetes/pkg/util/labels"
    53  	"k8s.io/kubernetes/test/integration/framework"
    54  	testutils "k8s.io/kubernetes/test/integration/util"
    55  	"k8s.io/kubernetes/test/utils/ktesting"
    56  )
    57  
    58  var zero = int64(0)
    59  
    60  func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
    61  	return setupWithServerSetup(t, framework.TestServerSetup{})
    62  }
    63  
    64  func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
    65  	tCtx := ktesting.Init(t)
    66  	modifyServerRunOptions := serverSetup.ModifyServerRunOptions
    67  	serverSetup.ModifyServerRunOptions = func(opts *options.ServerRunOptions) {
    68  		if modifyServerRunOptions != nil {
    69  			modifyServerRunOptions(opts)
    70  		}
    71  
    72  		opts.Admission.GenericAdmission.DisablePlugins = append(opts.Admission.GenericAdmission.DisablePlugins,
    73  			// Disable ServiceAccount admission plugin as we don't have
    74  			// serviceaccount controller running.
    75  			"ServiceAccount",
    76  			"TaintNodesByCondition",
    77  		)
    78  	}
    79  
    80  	clientSet, config, closeFn := framework.StartTestServer(tCtx, t, serverSetup)
    81  
    82  	resyncPeriod := 12 * time.Hour
    83  	informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "daemonset-informers")), resyncPeriod)
    84  	dc, err := daemon.NewDaemonSetsController(
    85  		tCtx,
    86  		informers.Apps().V1().DaemonSets(),
    87  		informers.Apps().V1().ControllerRevisions(),
    88  		informers.Core().V1().Pods(),
    89  		informers.Core().V1().Nodes(),
    90  		clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "daemonset-controller")),
    91  		flowcontrol.NewBackOff(5*time.Second, 15*time.Minute),
    92  	)
    93  	if err != nil {
    94  		t.Fatalf("error creating DaemonSets controller: %v", err)
    95  	}
    96  
    97  	eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
    98  		Interface: clientSet.EventsV1(),
    99  	})
   100  
   101  	sched, err := scheduler.New(
   102  		tCtx,
   103  		clientSet,
   104  		informers,
   105  		nil,
   106  		profile.NewRecorderFactory(eventBroadcaster),
   107  	)
   108  	if err != nil {
   109  		t.Fatalf("Couldn't create scheduler: %v", err)
   110  	}
   111  
   112  	eventBroadcaster.StartRecordingToSink(tCtx.Done())
   113  	go sched.Run(tCtx)
   114  
   115  	tearDownFn := func() {
   116  		tCtx.Cancel("tearing down apiserver")
   117  		closeFn()
   118  		eventBroadcaster.Shutdown()
   119  	}
   120  
   121  	return tCtx, tearDownFn, dc, informers, clientSet
   122  }
   123  
   124  func testLabels() map[string]string {
   125  	return map[string]string{"name": "test"}
   126  }
   127  
   128  func newDaemonSet(name, namespace string) *apps.DaemonSet {
   129  	two := int32(2)
   130  	return &apps.DaemonSet{
   131  		TypeMeta: metav1.TypeMeta{
   132  			Kind:       "DaemonSet",
   133  			APIVersion: "apps/v1",
   134  		},
   135  		ObjectMeta: metav1.ObjectMeta{
   136  			Namespace: namespace,
   137  			Name:      name,
   138  		},
   139  		Spec: apps.DaemonSetSpec{
   140  			RevisionHistoryLimit: &two,
   141  			Selector:             &metav1.LabelSelector{MatchLabels: testLabels()},
   142  			UpdateStrategy: apps.DaemonSetUpdateStrategy{
   143  				Type: apps.OnDeleteDaemonSetStrategyType,
   144  			},
   145  			Template: v1.PodTemplateSpec{
   146  				ObjectMeta: metav1.ObjectMeta{
   147  					Labels: testLabels(),
   148  				},
   149  				Spec: v1.PodSpec{
   150  					Containers:                    []v1.Container{{Name: "foo", Image: "bar"}},
   151  					TerminationGracePeriodSeconds: &zero,
   152  				},
   153  			},
   154  		},
   155  	}
   156  }
   157  
   158  func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {
   159  	t.Helper()
   160  	ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
   161  	if err != nil {
   162  		t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
   163  		return
   164  	}
   165  
   166  	// We set the nodeSelector to a random label. This label is nearly guaranteed
   167  	// to not be set on any node so the DameonSetController will start deleting
   168  	// daemon pods. Once it's done deleting the daemon pods, it's safe to delete
   169  	// the DaemonSet.
   170  	ds.Spec.Template.Spec.NodeSelector = map[string]string{
   171  		string(uuid.NewUUID()): string(uuid.NewUUID()),
   172  	}
   173  	// force update to avoid version conflict
   174  	ds.ResourceVersion = ""
   175  
   176  	if ds, err = cs.AppsV1().DaemonSets(ds.Namespace).Update(context.TODO(), ds, metav1.UpdateOptions{}); err != nil {
   177  		t.Errorf("Failed to update DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
   178  		return
   179  	}
   180  
   181  	if len(ds.Spec.Template.Finalizers) > 0 {
   182  		testutils.RemovePodFinalizersInNamespace(context.TODO(), cs, t, ds.Namespace)
   183  	}
   184  
   185  	// Wait for the daemon set controller to kill all the daemon pods.
   186  	if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
   187  		updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
   188  		if err != nil {
   189  			return false, nil
   190  		}
   191  		return updatedDS.Status.CurrentNumberScheduled+updatedDS.Status.NumberMisscheduled == 0, nil
   192  	}); err != nil {
   193  		t.Errorf("Failed to kill the pods of DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
   194  		return
   195  	}
   196  
   197  	falseVar := false
   198  	deleteOptions := metav1.DeleteOptions{OrphanDependents: &falseVar}
   199  	if err := cs.AppsV1().DaemonSets(ds.Namespace).Delete(context.TODO(), ds.Name, deleteOptions); err != nil {
   200  		t.Errorf("Failed to delete DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
   201  	}
   202  }
   203  
   204  func newRollbackStrategy() *apps.DaemonSetUpdateStrategy {
   205  	one := intstr.FromInt32(1)
   206  	return &apps.DaemonSetUpdateStrategy{
   207  		Type:          apps.RollingUpdateDaemonSetStrategyType,
   208  		RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
   209  	}
   210  }
   211  
   212  func newOnDeleteStrategy() *apps.DaemonSetUpdateStrategy {
   213  	return &apps.DaemonSetUpdateStrategy{
   214  		Type: apps.OnDeleteDaemonSetStrategyType,
   215  	}
   216  }
   217  
   218  func updateStrategies() []*apps.DaemonSetUpdateStrategy {
   219  	return []*apps.DaemonSetUpdateStrategy{newOnDeleteStrategy(), newRollbackStrategy()}
   220  }
   221  
   222  func allocatableResources(memory, cpu string) v1.ResourceList {
   223  	return v1.ResourceList{
   224  		v1.ResourceMemory: resource.MustParse(memory),
   225  		v1.ResourceCPU:    resource.MustParse(cpu),
   226  		v1.ResourcePods:   resource.MustParse("100"),
   227  	}
   228  }
   229  
   230  func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
   231  	return v1.PodSpec{
   232  		NodeName: nodeName,
   233  		Containers: []v1.Container{
   234  			{
   235  				Name:  "foo",
   236  				Image: "bar",
   237  				Resources: v1.ResourceRequirements{
   238  					Requests: v1.ResourceList{
   239  						v1.ResourceMemory: resource.MustParse(memory),
   240  						v1.ResourceCPU:    resource.MustParse(cpu),
   241  					},
   242  				},
   243  			},
   244  		},
   245  		TerminationGracePeriodSeconds: &zero,
   246  	}
   247  }
   248  
   249  func newNode(name string, label map[string]string) *v1.Node {
   250  	return &v1.Node{
   251  		TypeMeta: metav1.TypeMeta{
   252  			Kind:       "Node",
   253  			APIVersion: "v1",
   254  		},
   255  		ObjectMeta: metav1.ObjectMeta{
   256  			Name:      name,
   257  			Labels:    label,
   258  			Namespace: metav1.NamespaceNone,
   259  		},
   260  		Status: v1.NodeStatus{
   261  			Conditions:  []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
   262  			Allocatable: v1.ResourceList{v1.ResourcePods: resource.MustParse("100")},
   263  		},
   264  	}
   265  }
   266  
   267  func addNodes(nodeClient corev1client.NodeInterface, startIndex, numNodes int, label map[string]string, t *testing.T) {
   268  	for i := startIndex; i < startIndex+numNodes; i++ {
   269  		_, err := nodeClient.Create(context.TODO(), newNode(fmt.Sprintf("node-%d", i), label), metav1.CreateOptions{})
   270  		if err != nil {
   271  			t.Fatalf("Failed to create node: %v", err)
   272  		}
   273  	}
   274  }
   275  
   276  func validateDaemonSetPodsAndMarkReady(
   277  	podClient corev1client.PodInterface,
   278  	podInformer cache.SharedIndexInformer,
   279  	numberPods int,
   280  	t *testing.T,
   281  ) {
   282  	if err := wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
   283  		objects := podInformer.GetIndexer().List()
   284  		nonTerminatedPods := 0
   285  
   286  		for _, object := range objects {
   287  			pod := object.(*v1.Pod)
   288  
   289  			ownerReferences := pod.ObjectMeta.OwnerReferences
   290  			if len(ownerReferences) != 1 {
   291  				return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
   292  			}
   293  			controllerRef := ownerReferences[0]
   294  			if got, want := controllerRef.Kind, "DaemonSet"; got != want {
   295  				t.Errorf("controllerRef.Kind = %q, want %q", got, want)
   296  			}
   297  			if controllerRef.Controller == nil || *controllerRef.Controller != true {
   298  				t.Errorf("controllerRef.Controller is not set to true")
   299  			}
   300  
   301  			if podutil.IsPodPhaseTerminal(pod.Status.Phase) {
   302  				continue
   303  			}
   304  			nonTerminatedPods++
   305  			if !podutil.IsPodReady(pod) && len(pod.Spec.NodeName) != 0 {
   306  				podCopy := pod.DeepCopy()
   307  				podCopy.Status = v1.PodStatus{
   308  					Phase:      v1.PodRunning,
   309  					Conditions: []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}},
   310  				}
   311  				_, err := podClient.UpdateStatus(context.TODO(), podCopy, metav1.UpdateOptions{})
   312  				if err != nil {
   313  					return false, err
   314  				}
   315  			}
   316  		}
   317  
   318  		return nonTerminatedPods == numberPods, nil
   319  	}); err != nil {
   320  		t.Fatal(err)
   321  	}
   322  }
   323  
   324  func validateDaemonSetPodsActive(
   325  	podClient corev1client.PodInterface,
   326  	podInformer cache.SharedIndexInformer,
   327  	numberPods int,
   328  	t *testing.T,
   329  ) {
   330  	if err := wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
   331  		objects := podInformer.GetIndexer().List()
   332  		if len(objects) < numberPods {
   333  			return false, nil
   334  		}
   335  		podsActiveCount := 0
   336  		for _, object := range objects {
   337  			pod := object.(*v1.Pod)
   338  			ownerReferences := pod.ObjectMeta.OwnerReferences
   339  			if len(ownerReferences) != 1 {
   340  				return false, fmt.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
   341  			}
   342  			controllerRef := ownerReferences[0]
   343  			if got, want := controllerRef.Kind, "DaemonSet"; got != want {
   344  				t.Errorf("controllerRef.Kind = %q, want %q", got, want)
   345  			}
   346  			if controllerRef.Controller == nil || *controllerRef.Controller != true {
   347  				t.Errorf("controllerRef.Controller is not set to true")
   348  			}
   349  			if pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodPending {
   350  				podsActiveCount += 1
   351  			}
   352  		}
   353  		return podsActiveCount == numberPods, nil
   354  	}); err != nil {
   355  		t.Fatal(err)
   356  	}
   357  }
   358  
   359  func validateDaemonSetPodsTolerations(
   360  	podClient corev1client.PodInterface,
   361  	podInformer cache.SharedIndexInformer,
   362  	expectedTolerations []v1.Toleration,
   363  	prefix string,
   364  	t *testing.T,
   365  ) {
   366  	objects := podInformer.GetIndexer().List()
   367  	for _, object := range objects {
   368  		var prefixedPodToleration []v1.Toleration
   369  		pod := object.(*v1.Pod)
   370  		ownerReferences := pod.ObjectMeta.OwnerReferences
   371  		if len(ownerReferences) != 1 {
   372  			t.Errorf("Pod %s has %d OwnerReferences, expected only 1", pod.Name, len(ownerReferences))
   373  		}
   374  		controllerRef := ownerReferences[0]
   375  		if got, want := controllerRef.Kind, "DaemonSet"; got != want {
   376  			t.Errorf("controllerRef.Kind = %q, want %q", got, want)
   377  		}
   378  		if controllerRef.Controller == nil || *controllerRef.Controller != true {
   379  			t.Errorf("controllerRef.Controller is not set to true")
   380  		}
   381  		for _, podToleration := range pod.Spec.Tolerations {
   382  			if strings.HasPrefix(podToleration.Key, prefix) {
   383  				prefixedPodToleration = append(prefixedPodToleration, podToleration)
   384  			}
   385  		}
   386  		if diff := cmp.Diff(expectedTolerations, prefixedPodToleration); diff != "" {
   387  			t.Fatalf("Unexpected tolerations (-want,+got):\n%s", diff)
   388  		}
   389  	}
   390  }
   391  
   392  // podUnschedulable returns a condition function that returns true if the given pod
   393  // gets unschedulable status.
   394  func podUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
   395  	return func() (bool, error) {
   396  		pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
   397  		if apierrors.IsNotFound(err) {
   398  			return false, nil
   399  		}
   400  		if err != nil {
   401  			// This could be a connection error so we want to retry.
   402  			return false, nil
   403  		}
   404  		_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
   405  		return cond != nil && cond.Status == v1.ConditionFalse &&
   406  			cond.Reason == v1.PodReasonUnschedulable, nil
   407  	}
   408  }
   409  
   410  // waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
   411  // an error if it does not become unschedulable within the given timeout.
   412  func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
   413  	return wait.Poll(100*time.Millisecond, timeout, podUnschedulable(cs, pod.Namespace, pod.Name))
   414  }
   415  
   416  // waitForPodUnschedule waits for a pod to fail scheduling and returns
   417  // an error if it does not become unschedulable within the timeout duration (30 seconds).
   418  func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
   419  	return waitForPodUnschedulableWithTimeout(cs, pod, 10*time.Second)
   420  }
   421  
   422  // waitForPodsCreated waits for number of pods are created.
   423  func waitForPodsCreated(podInformer cache.SharedIndexInformer, num int) error {
   424  	return wait.Poll(100*time.Millisecond, 10*time.Second, func() (bool, error) {
   425  		objects := podInformer.GetIndexer().List()
   426  		return len(objects) == num, nil
   427  	})
   428  }
   429  
   430  func waitForDaemonSetAndControllerRevisionCreated(c clientset.Interface, name string, namespace string) error {
   431  	return wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
   432  		ds, err := c.AppsV1().DaemonSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
   433  		if err != nil {
   434  			return false, err
   435  		}
   436  		if ds == nil {
   437  			return false, nil
   438  		}
   439  
   440  		revs, err := c.AppsV1().ControllerRevisions(namespace).List(context.TODO(), metav1.ListOptions{})
   441  		if err != nil {
   442  			return false, err
   443  		}
   444  		if revs.Size() == 0 {
   445  			return false, nil
   446  		}
   447  
   448  		for _, rev := range revs.Items {
   449  			for _, oref := range rev.OwnerReferences {
   450  				if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
   451  					return true, nil
   452  				}
   453  			}
   454  		}
   455  		return false, nil
   456  	})
   457  }
   458  
   459  func hashAndNameForDaemonSet(ds *apps.DaemonSet) (string, string) {
   460  	hash := fmt.Sprint(controller.ComputeHash(&ds.Spec.Template, ds.Status.CollisionCount))
   461  	name := ds.Name + "-" + hash
   462  	return hash, name
   463  }
   464  
   465  func validateDaemonSetCollisionCount(dsClient appstyped.DaemonSetInterface, dsName string, expCount int32, t *testing.T) {
   466  	ds, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{})
   467  	if err != nil {
   468  		t.Fatalf("Failed to look up DaemonSet: %v", err)
   469  	}
   470  	collisionCount := ds.Status.CollisionCount
   471  	if *collisionCount != expCount {
   472  		t.Fatalf("Expected collisionCount to be %d, but found %d", expCount, *collisionCount)
   473  	}
   474  }
   475  
   476  func validateDaemonSetStatus(
   477  	dsClient appstyped.DaemonSetInterface,
   478  	dsName string,
   479  	expectedNumberReady int32,
   480  	t *testing.T) {
   481  	if err := wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
   482  		ds, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{})
   483  		if err != nil {
   484  			return false, err
   485  		}
   486  		return ds.Status.NumberReady == expectedNumberReady, nil
   487  	}); err != nil {
   488  		t.Fatal(err)
   489  	}
   490  }
   491  
   492  func validateUpdatedNumberScheduled(
   493  	ctx context.Context,
   494  	dsClient appstyped.DaemonSetInterface,
   495  	dsName string,
   496  	expectedUpdatedNumberScheduled int32,
   497  	t *testing.T) {
   498  	if err := wait.PollUntilContextTimeout(ctx, time.Second, 60*time.Second, true, func(ctx context.Context) (bool, error) {
   499  		ds, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{})
   500  		if err != nil {
   501  			return false, err
   502  		}
   503  		return ds.Status.UpdatedNumberScheduled == expectedUpdatedNumberScheduled, nil
   504  	}); err != nil {
   505  		t.Fatal(err)
   506  	}
   507  }
   508  
   509  func updateDS(t *testing.T, dsClient appstyped.DaemonSetInterface, dsName string, updateFunc func(*apps.DaemonSet)) *apps.DaemonSet {
   510  	var ds *apps.DaemonSet
   511  	if err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
   512  		newDS, err := dsClient.Get(context.TODO(), dsName, metav1.GetOptions{})
   513  		if err != nil {
   514  			return err
   515  		}
   516  		updateFunc(newDS)
   517  		ds, err = dsClient.Update(context.TODO(), newDS, metav1.UpdateOptions{})
   518  		return err
   519  	}); err != nil {
   520  		t.Fatalf("Failed to update DaemonSet: %v", err)
   521  	}
   522  	return ds
   523  }
   524  
   525  func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy)) {
   526  	for _, strategy := range updateStrategies() {
   527  		t.Run(string(strategy.Type), func(t *testing.T) {
   528  			tf(t, strategy)
   529  		})
   530  	}
   531  }
   532  
   533  func TestOneNodeDaemonLaunchesPod(t *testing.T) {
   534  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   535  		ctx, closeFn, dc, informers, clientset := setup(t)
   536  		defer closeFn()
   537  		ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
   538  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   539  
   540  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   541  		podClient := clientset.CoreV1().Pods(ns.Name)
   542  		nodeClient := clientset.CoreV1().Nodes()
   543  		podInformer := informers.Core().V1().Pods().Informer()
   544  
   545  		informers.Start(ctx.Done())
   546  		go dc.Run(ctx, 2)
   547  
   548  		ds := newDaemonSet("foo", ns.Name)
   549  		ds.Spec.UpdateStrategy = *strategy
   550  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
   551  		if err != nil {
   552  			t.Fatalf("Failed to create DaemonSet: %v", err)
   553  		}
   554  		defer cleanupDaemonSets(t, clientset, ds)
   555  
   556  		_, err = nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{})
   557  		if err != nil {
   558  			t.Fatalf("Failed to create node: %v", err)
   559  		}
   560  
   561  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
   562  		validateDaemonSetStatus(dsClient, ds.Name, 1, t)
   563  	})
   564  }
   565  
   566  func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
   567  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   568  		ctx, closeFn, dc, informers, clientset := setup(t)
   569  		defer closeFn()
   570  		ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
   571  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   572  
   573  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   574  		podClient := clientset.CoreV1().Pods(ns.Name)
   575  		nodeClient := clientset.CoreV1().Nodes()
   576  		podInformer := informers.Core().V1().Pods().Informer()
   577  
   578  		informers.Start(ctx.Done())
   579  		go dc.Run(ctx, 2)
   580  
   581  		ds := newDaemonSet("foo", ns.Name)
   582  		ds.Spec.UpdateStrategy = *strategy
   583  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
   584  		if err != nil {
   585  			t.Fatalf("Failed to create DaemonSet: %v", err)
   586  		}
   587  		defer cleanupDaemonSets(t, clientset, ds)
   588  
   589  		addNodes(nodeClient, 0, 5, nil, t)
   590  
   591  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 5, t)
   592  		validateDaemonSetStatus(dsClient, ds.Name, 5, t)
   593  	})
   594  }
   595  
   596  func TestSimpleDaemonSetRestartsPodsOnTerminalPhase(t *testing.T) {
   597  	cases := map[string]struct {
   598  		phase     v1.PodPhase
   599  		finalizer bool
   600  	}{
   601  		"Succeeded": {
   602  			phase: v1.PodSucceeded,
   603  		},
   604  		"Failed": {
   605  			phase: v1.PodFailed,
   606  		},
   607  		"Succeeded with finalizer": {
   608  			phase:     v1.PodSucceeded,
   609  			finalizer: true,
   610  		},
   611  	}
   612  	for name, tc := range cases {
   613  		t.Run(name, func(t *testing.T) {
   614  			forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   615  				ctx, closeFn, dc, informers, clientset := setup(t)
   616  				defer closeFn()
   617  				ns := framework.CreateNamespaceOrDie(clientset, "daemonset-restart-terminal-pod-test", t)
   618  				defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   619  
   620  				dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   621  				podClient := clientset.CoreV1().Pods(ns.Name)
   622  				nodeClient := clientset.CoreV1().Nodes()
   623  				podInformer := informers.Core().V1().Pods().Informer()
   624  
   625  				informers.Start(ctx.Done())
   626  				go dc.Run(ctx, 2)
   627  
   628  				ds := newDaemonSet("restart-terminal-pod", ns.Name)
   629  				if tc.finalizer {
   630  					ds.Spec.Template.Finalizers = append(ds.Spec.Template.Finalizers, "test.k8s.io/finalizer")
   631  				}
   632  				ds.Spec.UpdateStrategy = *strategy
   633  				if _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}); err != nil {
   634  					t.Fatalf("Failed to create DaemonSet: %v", err)
   635  				}
   636  				defer cleanupDaemonSets(t, clientset, ds)
   637  
   638  				numNodes := 3
   639  				addNodes(nodeClient, 0, numNodes, nil, t)
   640  
   641  				validateDaemonSetPodsAndMarkReady(podClient, podInformer, numNodes, t)
   642  				validateDaemonSetStatus(dsClient, ds.Name, int32(numNodes), t)
   643  				podToMarkAsTerminal := podInformer.GetIndexer().List()[0].(*v1.Pod)
   644  				podCopy := podToMarkAsTerminal.DeepCopy()
   645  				podCopy.Status.Phase = tc.phase
   646  				if _, err := podClient.UpdateStatus(ctx, podCopy, metav1.UpdateOptions{}); err != nil {
   647  					t.Fatalf("Failed to mark the pod as terminal with phase: %v. Error: %v", tc.phase, err)
   648  				}
   649  				// verify all pods are active. They either continue Running or are Pending after restart
   650  				validateDaemonSetPodsActive(podClient, podInformer, numNodes, t)
   651  				validateDaemonSetPodsAndMarkReady(podClient, podInformer, numNodes, t)
   652  				validateDaemonSetStatus(dsClient, ds.Name, int32(numNodes), t)
   653  			})
   654  		})
   655  	}
   656  }
   657  
   658  func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
   659  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   660  		ctx, closeFn, dc, informers, clientset := setup(t)
   661  		defer closeFn()
   662  		ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
   663  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   664  
   665  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   666  		podClient := clientset.CoreV1().Pods(ns.Name)
   667  		nodeClient := clientset.CoreV1().Nodes()
   668  		podInformer := informers.Core().V1().Pods().Informer()
   669  
   670  		informers.Start(ctx.Done())
   671  		go dc.Run(ctx, 2)
   672  
   673  		ds := newDaemonSet("foo", ns.Name)
   674  		ds.Spec.UpdateStrategy = *strategy
   675  
   676  		ds.Spec.Template.Spec.Affinity = &v1.Affinity{
   677  			NodeAffinity: &v1.NodeAffinity{
   678  				RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
   679  					NodeSelectorTerms: []v1.NodeSelectorTerm{
   680  						{
   681  							MatchExpressions: []v1.NodeSelectorRequirement{
   682  								{
   683  									Key:      "zone",
   684  									Operator: v1.NodeSelectorOpIn,
   685  									Values:   []string{"test"},
   686  								},
   687  							},
   688  						},
   689  						{
   690  							MatchFields: []v1.NodeSelectorRequirement{
   691  								{
   692  									Key:      metav1.ObjectNameField,
   693  									Operator: v1.NodeSelectorOpIn,
   694  									Values:   []string{"node-1"},
   695  								},
   696  							},
   697  						},
   698  					},
   699  				},
   700  			},
   701  		}
   702  
   703  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
   704  		if err != nil {
   705  			t.Fatalf("Failed to create DaemonSet: %v", err)
   706  		}
   707  		defer cleanupDaemonSets(t, clientset, ds)
   708  
   709  		addNodes(nodeClient, 0, 2, nil, t)
   710  		// Two nodes with labels
   711  		addNodes(nodeClient, 2, 2, map[string]string{
   712  			"zone": "test",
   713  		}, t)
   714  		addNodes(nodeClient, 4, 2, nil, t)
   715  
   716  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 3, t)
   717  		validateDaemonSetStatus(dsClient, ds.Name, 3, t)
   718  	})
   719  }
   720  
   721  func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
   722  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   723  		ctx, closeFn, dc, informers, clientset := setup(t)
   724  		defer closeFn()
   725  		ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
   726  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   727  
   728  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   729  		podClient := clientset.CoreV1().Pods(ns.Name)
   730  		nodeClient := clientset.CoreV1().Nodes()
   731  		podInformer := informers.Core().V1().Pods().Informer()
   732  
   733  		informers.Start(ctx.Done())
   734  		go dc.Run(ctx, 2)
   735  
   736  		ds := newDaemonSet("foo", ns.Name)
   737  		ds.Spec.UpdateStrategy = *strategy
   738  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
   739  		if err != nil {
   740  			t.Fatalf("Failed to create DaemonSet: %v", err)
   741  		}
   742  
   743  		defer cleanupDaemonSets(t, clientset, ds)
   744  
   745  		node := newNode("single-node", nil)
   746  		node.Status.Conditions = []v1.NodeCondition{
   747  			{Type: v1.NodeReady, Status: v1.ConditionFalse},
   748  		}
   749  		_, err = nodeClient.Create(ctx, node, metav1.CreateOptions{})
   750  		if err != nil {
   751  			t.Fatalf("Failed to create node: %v", err)
   752  		}
   753  
   754  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
   755  		validateDaemonSetStatus(dsClient, ds.Name, 1, t)
   756  	})
   757  }
   758  
   759  // TestInsufficientCapacityNodeDaemonSetCreateButNotLaunchPod tests thaat the DaemonSet should create
   760  // Pods for all the nodes regardless of available resource on the nodes, and kube-scheduler should
   761  // not schedule Pods onto the nodes with insufficient resource.
   762  func TestInsufficientCapacityNode(t *testing.T) {
   763  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
   764  		ctx, closeFn, dc, informers, clientset := setup(t)
   765  		defer closeFn()
   766  		ns := framework.CreateNamespaceOrDie(clientset, "insufficient-capacity", t)
   767  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   768  
   769  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   770  		podClient := clientset.CoreV1().Pods(ns.Name)
   771  		podInformer := informers.Core().V1().Pods().Informer()
   772  		nodeClient := clientset.CoreV1().Nodes()
   773  
   774  		informers.Start(ctx.Done())
   775  		go dc.Run(ctx, 2)
   776  
   777  		ds := newDaemonSet("foo", ns.Name)
   778  		ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m")
   779  		ds.Spec.UpdateStrategy = *strategy
   780  		ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
   781  		if err != nil {
   782  			t.Fatalf("Failed to create DaemonSet: %v", err)
   783  		}
   784  
   785  		defer cleanupDaemonSets(t, clientset, ds)
   786  
   787  		node := newNode("node-with-limited-memory", nil)
   788  		node.Status.Allocatable = allocatableResources("100M", "200m")
   789  		_, err = nodeClient.Create(ctx, node, metav1.CreateOptions{})
   790  		if err != nil {
   791  			t.Fatalf("Failed to create node: %v", err)
   792  		}
   793  
   794  		if err := waitForPodsCreated(podInformer, 1); err != nil {
   795  			t.Errorf("Failed to wait for pods created: %v", err)
   796  		}
   797  
   798  		objects := podInformer.GetIndexer().List()
   799  		for _, object := range objects {
   800  			pod := object.(*v1.Pod)
   801  			if err := waitForPodUnschedulable(clientset, pod); err != nil {
   802  				t.Errorf("Failed to wait for unschedulable status of pod %+v", pod)
   803  			}
   804  		}
   805  
   806  		node1 := newNode("node-with-enough-memory", nil)
   807  		node1.Status.Allocatable = allocatableResources("200M", "2000m")
   808  		_, err = nodeClient.Create(ctx, node1, metav1.CreateOptions{})
   809  		if err != nil {
   810  			t.Fatalf("Failed to create node: %v", err)
   811  		}
   812  
   813  		// 2 pods are created. But only one of two Pods is scheduled by default scheduler.
   814  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
   815  		validateDaemonSetStatus(dsClient, ds.Name, 1, t)
   816  	})
   817  }
   818  
   819  // TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
   820  // hash collision with an existing ControllerRevision
   821  func TestLaunchWithHashCollision(t *testing.T) {
   822  	ctx, closeFn, dc, informers, clientset := setup(t)
   823  	defer closeFn()
   824  	ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
   825  	defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   826  
   827  	dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   828  	podInformer := informers.Core().V1().Pods().Informer()
   829  	nodeClient := clientset.CoreV1().Nodes()
   830  
   831  	informers.Start(ctx.Done())
   832  	go dc.Run(ctx, 2)
   833  
   834  	// Create single node
   835  	_, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{})
   836  	if err != nil {
   837  		t.Fatalf("Failed to create node: %v", err)
   838  	}
   839  
   840  	// Create new DaemonSet with RollingUpdate strategy
   841  	orgDs := newDaemonSet("foo", ns.Name)
   842  	oneIntString := intstr.FromInt32(1)
   843  	orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
   844  		Type: apps.RollingUpdateDaemonSetStrategyType,
   845  		RollingUpdate: &apps.RollingUpdateDaemonSet{
   846  			MaxUnavailable: &oneIntString,
   847  		},
   848  	}
   849  	ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{})
   850  	if err != nil {
   851  		t.Fatalf("Failed to create DaemonSet: %v", err)
   852  	}
   853  
   854  	// Wait for the DaemonSet to be created before proceeding
   855  	err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
   856  	if err != nil {
   857  		t.Fatalf("Failed to create DaemonSet: %v", err)
   858  	}
   859  
   860  	ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{})
   861  	if err != nil {
   862  		t.Fatalf("Failed to get DaemonSet: %v", err)
   863  	}
   864  	var orgCollisionCount int32
   865  	if ds.Status.CollisionCount != nil {
   866  		orgCollisionCount = *ds.Status.CollisionCount
   867  	}
   868  
   869  	// Look up the ControllerRevision for the DaemonSet
   870  	_, name := hashAndNameForDaemonSet(ds)
   871  	revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{})
   872  	if err != nil || revision == nil {
   873  		t.Fatalf("Failed to look up ControllerRevision: %v", err)
   874  	}
   875  
   876  	// Create a "fake" ControllerRevision that we know will create a hash collision when we make
   877  	// the next update
   878  	one := int64(1)
   879  	ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
   880  
   881  	newHash, newName := hashAndNameForDaemonSet(ds)
   882  	newRevision := &apps.ControllerRevision{
   883  		ObjectMeta: metav1.ObjectMeta{
   884  			Name:            newName,
   885  			Namespace:       ds.Namespace,
   886  			Labels:          labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
   887  			Annotations:     ds.Annotations,
   888  			OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
   889  		},
   890  		Data:     revision.Data,
   891  		Revision: revision.Revision + 1,
   892  	}
   893  	_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{})
   894  	if err != nil {
   895  		t.Fatalf("Failed to create ControllerRevision: %v", err)
   896  	}
   897  
   898  	// Make an update of the DaemonSet which we know will create a hash collision when
   899  	// the next ControllerRevision is created.
   900  	ds = updateDS(t, dsClient, ds.Name, func(updateDS *apps.DaemonSet) {
   901  		updateDS.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
   902  	})
   903  
   904  	// Wait for any pod with the latest Spec to exist
   905  	err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
   906  		objects := podInformer.GetIndexer().List()
   907  		for _, object := range objects {
   908  			pod := object.(*v1.Pod)
   909  			if *pod.Spec.TerminationGracePeriodSeconds == *ds.Spec.Template.Spec.TerminationGracePeriodSeconds {
   910  				return true, nil
   911  			}
   912  		}
   913  		return false, nil
   914  	})
   915  	if err != nil {
   916  		t.Fatalf("Failed to wait for Pods with the latest Spec to be created: %v", err)
   917  	}
   918  
   919  	validateDaemonSetCollisionCount(dsClient, ds.Name, orgCollisionCount+1, t)
   920  }
   921  
   922  // Test DaemonSet Controller updates label of the pod after "DedupCurHistories". The scenario is
   923  // 1. Create an another controllerrevision owned by the daemonset but with higher revision and different hash
   924  // 2. Add a node to ensure the controller sync
   925  // 3. The dsc is expected to "PATCH" the existing pod label with new hash and deletes the old controllerrevision once finishes the update
   926  func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {
   927  	ctx, closeFn, dc, informers, clientset := setup(t)
   928  	defer closeFn()
   929  	ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
   930  	defer framework.DeleteNamespaceOrDie(clientset, ns, t)
   931  
   932  	dsClient := clientset.AppsV1().DaemonSets(ns.Name)
   933  	podInformer := informers.Core().V1().Pods().Informer()
   934  	nodeClient := clientset.CoreV1().Nodes()
   935  
   936  	informers.Start(ctx.Done())
   937  	go dc.Run(ctx, 2)
   938  
   939  	// Create single node
   940  	_, err := nodeClient.Create(ctx, newNode("single-node", nil), metav1.CreateOptions{})
   941  	if err != nil {
   942  		t.Fatalf("Failed to create node: %v", err)
   943  	}
   944  
   945  	// Create new DaemonSet with RollingUpdate strategy
   946  	orgDs := newDaemonSet("foo", ns.Name)
   947  	oneIntString := intstr.FromInt32(1)
   948  	orgDs.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
   949  		Type: apps.RollingUpdateDaemonSetStrategyType,
   950  		RollingUpdate: &apps.RollingUpdateDaemonSet{
   951  			MaxUnavailable: &oneIntString,
   952  		},
   953  	}
   954  	ds, err := dsClient.Create(ctx, orgDs, metav1.CreateOptions{})
   955  	if err != nil {
   956  		t.Fatalf("Failed to create DaemonSet: %v", err)
   957  	}
   958  	t.Logf("ds created")
   959  	// Wait for the DaemonSet to be created before proceeding
   960  	err = waitForDaemonSetAndControllerRevisionCreated(clientset, ds.Name, ds.Namespace)
   961  	if err != nil {
   962  		t.Fatalf("Failed to create DaemonSet: %v", err)
   963  	}
   964  
   965  	ds, err = dsClient.Get(ctx, ds.Name, metav1.GetOptions{})
   966  	if err != nil {
   967  		t.Fatalf("Failed to get DaemonSet: %v", err)
   968  	}
   969  
   970  	// Look up the ControllerRevision for the DaemonSet
   971  	_, name := hashAndNameForDaemonSet(ds)
   972  	revision, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).Get(ctx, name, metav1.GetOptions{})
   973  	if err != nil || revision == nil {
   974  		t.Fatalf("Failed to look up ControllerRevision: %v", err)
   975  	}
   976  	t.Logf("revision: %v", revision.Name)
   977  
   978  	// Create a "fake" ControllerRevision which is owned by the same daemonset
   979  	one := int64(1)
   980  	ds.Spec.Template.Spec.TerminationGracePeriodSeconds = &one
   981  
   982  	newHash, newName := hashAndNameForDaemonSet(ds)
   983  	newRevision := &apps.ControllerRevision{
   984  		ObjectMeta: metav1.ObjectMeta{
   985  			Name:            newName,
   986  			Namespace:       ds.Namespace,
   987  			Labels:          labelsutil.CloneAndAddLabel(ds.Spec.Template.Labels, apps.DefaultDaemonSetUniqueLabelKey, newHash),
   988  			Annotations:     ds.Annotations,
   989  			OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(ds, apps.SchemeGroupVersion.WithKind("DaemonSet"))},
   990  		},
   991  		Data:     revision.Data,
   992  		Revision: revision.Revision + 1,
   993  	}
   994  	_, err = clientset.AppsV1().ControllerRevisions(ds.Namespace).Create(ctx, newRevision, metav1.CreateOptions{})
   995  	if err != nil {
   996  		t.Fatalf("Failed to create ControllerRevision: %v", err)
   997  	}
   998  	t.Logf("revision: %v", newName)
   999  
  1000  	// ensure the daemonset to be synced
  1001  	_, err = nodeClient.Create(ctx, newNode("second-node", nil), metav1.CreateOptions{})
  1002  	if err != nil {
  1003  		t.Fatalf("Failed to create node: %v", err)
  1004  	}
  1005  
  1006  	// check whether the pod label is updated after controllerrevision is created
  1007  	err = wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) {
  1008  		objects := podInformer.GetIndexer().List()
  1009  		for _, object := range objects {
  1010  			pod := object.(*v1.Pod)
  1011  			t.Logf("newHash: %v, label: %v", newHash, pod.ObjectMeta.Labels[apps.DefaultDaemonSetUniqueLabelKey])
  1012  			for _, oref := range pod.OwnerReferences {
  1013  				if oref.Name == ds.Name && oref.UID == ds.UID && oref.Kind == "DaemonSet" {
  1014  					if pod.ObjectMeta.Labels[apps.DefaultDaemonSetUniqueLabelKey] != newHash {
  1015  						return false, nil
  1016  					}
  1017  				}
  1018  			}
  1019  		}
  1020  		return true, nil
  1021  	})
  1022  	if err != nil {
  1023  		t.Fatalf("Failed to update the pod label after new controllerrevision is created: %v", err)
  1024  	}
  1025  
  1026  	err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
  1027  		revs, err := clientset.AppsV1().ControllerRevisions(ds.Namespace).List(ctx, metav1.ListOptions{})
  1028  		if err != nil {
  1029  			return false, fmt.Errorf("failed to list controllerrevision: %v", err)
  1030  		}
  1031  		if revs.Size() == 0 {
  1032  			return false, fmt.Errorf("no avaialable controllerrevision")
  1033  		}
  1034  
  1035  		for _, rev := range revs.Items {
  1036  			t.Logf("revision: %v;hash: %v", rev.Name, rev.ObjectMeta.Labels[apps.DefaultDaemonSetUniqueLabelKey])
  1037  			for _, oref := range rev.OwnerReferences {
  1038  				if oref.Kind == "DaemonSet" && oref.UID == ds.UID {
  1039  					if rev.Name != newName {
  1040  						t.Logf("waiting for duplicate controllerrevision %v to be deleted", newName)
  1041  						return false, nil
  1042  					}
  1043  				}
  1044  			}
  1045  		}
  1046  		return true, nil
  1047  	})
  1048  	if err != nil {
  1049  		t.Fatalf("Failed to check that duplicate controllerrevision is not deleted: %v", err)
  1050  	}
  1051  }
  1052  
  1053  // TestTaintedNode tests tainted node isn't expected to have pod scheduled
  1054  func TestTaintedNode(t *testing.T) {
  1055  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
  1056  		ctx, closeFn, dc, informers, clientset := setup(t)
  1057  		defer closeFn()
  1058  		ns := framework.CreateNamespaceOrDie(clientset, "tainted-node", t)
  1059  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
  1060  
  1061  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
  1062  		podClient := clientset.CoreV1().Pods(ns.Name)
  1063  		podInformer := informers.Core().V1().Pods().Informer()
  1064  		nodeClient := clientset.CoreV1().Nodes()
  1065  
  1066  		informers.Start(ctx.Done())
  1067  		go dc.Run(ctx, 2)
  1068  
  1069  		ds := newDaemonSet("foo", ns.Name)
  1070  		ds.Spec.UpdateStrategy = *strategy
  1071  		ds, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
  1072  		if err != nil {
  1073  			t.Fatalf("Failed to create DaemonSet: %v", err)
  1074  		}
  1075  
  1076  		defer cleanupDaemonSets(t, clientset, ds)
  1077  
  1078  		nodeWithTaint := newNode("node-with-taint", nil)
  1079  		nodeWithTaint.Spec.Taints = []v1.Taint{{Key: "key1", Value: "val1", Effect: "NoSchedule"}}
  1080  		_, err = nodeClient.Create(ctx, nodeWithTaint, metav1.CreateOptions{})
  1081  		if err != nil {
  1082  			t.Fatalf("Failed to create nodeWithTaint: %v", err)
  1083  		}
  1084  
  1085  		nodeWithoutTaint := newNode("node-without-taint", nil)
  1086  		_, err = nodeClient.Create(ctx, nodeWithoutTaint, metav1.CreateOptions{})
  1087  		if err != nil {
  1088  			t.Fatalf("Failed to create nodeWithoutTaint: %v", err)
  1089  		}
  1090  
  1091  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 1, t)
  1092  		validateDaemonSetStatus(dsClient, ds.Name, 1, t)
  1093  
  1094  		// remove taint from nodeWithTaint
  1095  		nodeWithTaint, err = nodeClient.Get(ctx, "node-with-taint", metav1.GetOptions{})
  1096  		if err != nil {
  1097  			t.Fatalf("Failed to retrieve nodeWithTaint: %v", err)
  1098  		}
  1099  		nodeWithTaintCopy := nodeWithTaint.DeepCopy()
  1100  		nodeWithTaintCopy.Spec.Taints = []v1.Taint{}
  1101  		_, err = nodeClient.Update(ctx, nodeWithTaintCopy, metav1.UpdateOptions{})
  1102  		if err != nil {
  1103  			t.Fatalf("Failed to update nodeWithTaint: %v", err)
  1104  		}
  1105  
  1106  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
  1107  		validateDaemonSetStatus(dsClient, ds.Name, 2, t)
  1108  	})
  1109  }
  1110  
  1111  // TestUnschedulableNodeDaemonDoesLaunchPod tests that the DaemonSet Pods can still be scheduled
  1112  // to the Unschedulable nodes.
  1113  func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
  1114  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
  1115  		ctx, closeFn, dc, informers, clientset := setup(t)
  1116  		defer closeFn()
  1117  		ns := framework.CreateNamespaceOrDie(clientset, "daemonset-unschedulable-test", t)
  1118  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
  1119  
  1120  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
  1121  		podClient := clientset.CoreV1().Pods(ns.Name)
  1122  		nodeClient := clientset.CoreV1().Nodes()
  1123  		podInformer := informers.Core().V1().Pods().Informer()
  1124  
  1125  		informers.Start(ctx.Done())
  1126  		go dc.Run(ctx, 2)
  1127  
  1128  		ds := newDaemonSet("foo", ns.Name)
  1129  		ds.Spec.UpdateStrategy = *strategy
  1130  		ds.Spec.Template.Spec.HostNetwork = true
  1131  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
  1132  		if err != nil {
  1133  			t.Fatalf("Failed to create DaemonSet: %v", err)
  1134  		}
  1135  
  1136  		defer cleanupDaemonSets(t, clientset, ds)
  1137  
  1138  		// Creates unschedulable node.
  1139  		node := newNode("unschedulable-node", nil)
  1140  		node.Spec.Unschedulable = true
  1141  		node.Spec.Taints = []v1.Taint{
  1142  			{
  1143  				Key:    v1.TaintNodeUnschedulable,
  1144  				Effect: v1.TaintEffectNoSchedule,
  1145  			},
  1146  		}
  1147  
  1148  		_, err = nodeClient.Create(ctx, node, metav1.CreateOptions{})
  1149  		if err != nil {
  1150  			t.Fatalf("Failed to create node: %v", err)
  1151  		}
  1152  
  1153  		// Creates network-unavailable node.
  1154  		nodeNU := newNode("network-unavailable-node", nil)
  1155  		nodeNU.Status.Conditions = []v1.NodeCondition{
  1156  			{Type: v1.NodeReady, Status: v1.ConditionFalse},
  1157  			{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionTrue},
  1158  		}
  1159  		nodeNU.Spec.Taints = []v1.Taint{
  1160  			{
  1161  				Key:    v1.TaintNodeNetworkUnavailable,
  1162  				Effect: v1.TaintEffectNoSchedule,
  1163  			},
  1164  		}
  1165  
  1166  		_, err = nodeClient.Create(ctx, nodeNU, metav1.CreateOptions{})
  1167  		if err != nil {
  1168  			t.Fatalf("Failed to create node: %v", err)
  1169  		}
  1170  
  1171  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, 2, t)
  1172  		validateDaemonSetStatus(dsClient, ds.Name, 2, t)
  1173  	})
  1174  }
  1175  
  1176  func TestUpdateStatusDespitePodCreationFailure(t *testing.T) {
  1177  	forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
  1178  		limitedPodNumber := 2
  1179  		ctx, closeFn, dc, informers, clientset := setupWithServerSetup(t, framework.TestServerSetup{
  1180  			ModifyServerConfig: func(config *controlplane.Config) {
  1181  				config.GenericConfig.AdmissionControl = &fakePodFailAdmission{
  1182  					limitedPodNumber: limitedPodNumber,
  1183  				}
  1184  			},
  1185  		})
  1186  		defer closeFn()
  1187  		ns := framework.CreateNamespaceOrDie(clientset, "update-status-despite-pod-failure", t)
  1188  		defer framework.DeleteNamespaceOrDie(clientset, ns, t)
  1189  
  1190  		dsClient := clientset.AppsV1().DaemonSets(ns.Name)
  1191  		podClient := clientset.CoreV1().Pods(ns.Name)
  1192  		nodeClient := clientset.CoreV1().Nodes()
  1193  		podInformer := informers.Core().V1().Pods().Informer()
  1194  
  1195  		informers.Start(ctx.Done())
  1196  		go dc.Run(ctx, 2)
  1197  
  1198  		ds := newDaemonSet("foo", ns.Name)
  1199  		ds.Spec.UpdateStrategy = *strategy
  1200  		_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
  1201  		if err != nil {
  1202  			t.Fatalf("Failed to create DaemonSet: %v", err)
  1203  		}
  1204  		defer cleanupDaemonSets(t, clientset, ds)
  1205  
  1206  		addNodes(nodeClient, 0, 5, nil, t)
  1207  
  1208  		validateDaemonSetPodsAndMarkReady(podClient, podInformer, limitedPodNumber, t)
  1209  		validateDaemonSetStatus(dsClient, ds.Name, int32(limitedPodNumber), t)
  1210  	})
  1211  }
  1212  
  1213  func TestDaemonSetRollingUpdateWithTolerations(t *testing.T) {
  1214  	var taints []v1.Taint
  1215  	var node *v1.Node
  1216  	var tolerations []v1.Toleration
  1217  	ctx, closeFn, dc, informers, clientset := setup(t)
  1218  	defer closeFn()
  1219  	ns := framework.CreateNamespaceOrDie(clientset, "daemonset-rollingupdate-with-tolerations-test", t)
  1220  	defer framework.DeleteNamespaceOrDie(clientset, ns, t)
  1221  
  1222  	dsClient := clientset.AppsV1().DaemonSets(ns.Name)
  1223  	podClient := clientset.CoreV1().Pods(ns.Name)
  1224  	nodeClient := clientset.CoreV1().Nodes()
  1225  	podInformer := informers.Core().V1().Pods().Informer()
  1226  	informers.Start(ctx.Done())
  1227  	go dc.Run(ctx, 2)
  1228  
  1229  	zero := intstr.FromInt32(0)
  1230  	maxSurge := intstr.FromInt32(1)
  1231  	ds := newDaemonSet("foo", ns.Name)
  1232  	ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{
  1233  		Type: apps.RollingUpdateDaemonSetStrategyType,
  1234  		RollingUpdate: &apps.RollingUpdateDaemonSet{
  1235  			MaxUnavailable: &zero,
  1236  			MaxSurge:       &maxSurge,
  1237  		},
  1238  	}
  1239  
  1240  	// Add six nodes with zone-y, zone-z or common taint
  1241  	for i := 0; i < 6; i++ {
  1242  		if i < 2 {
  1243  			taints = []v1.Taint{
  1244  				{Key: "zone-y", Effect: v1.TaintEffectNoSchedule},
  1245  			}
  1246  		} else if i < 4 {
  1247  			taints = []v1.Taint{
  1248  				{Key: "zone-z", Effect: v1.TaintEffectNoSchedule},
  1249  			}
  1250  		} else {
  1251  			taints = []v1.Taint{
  1252  				{Key: "zone-common", Effect: v1.TaintEffectNoSchedule},
  1253  			}
  1254  		}
  1255  		node = newNode(fmt.Sprintf("node-%d", i), nil)
  1256  		node.Spec.Taints = taints
  1257  		_, err := nodeClient.Create(context.TODO(), node, metav1.CreateOptions{})
  1258  		if err != nil {
  1259  			t.Fatalf("Failed to create node: %v", err)
  1260  		}
  1261  	}
  1262  
  1263  	// Create DaemonSet with zone-y toleration
  1264  	tolerations = []v1.Toleration{
  1265  		{Key: "zone-y", Operator: v1.TolerationOpExists},
  1266  		{Key: "zone-common", Operator: v1.TolerationOpExists},
  1267  	}
  1268  	ds.Spec.Template.Spec.Tolerations = tolerations
  1269  	_, err := dsClient.Create(ctx, ds, metav1.CreateOptions{})
  1270  	if err != nil {
  1271  		t.Fatalf("Failed to create DaemonSet: %v", err)
  1272  	}
  1273  	defer cleanupDaemonSets(t, clientset, ds)
  1274  	validateDaemonSetPodsActive(podClient, podInformer, 4, t)
  1275  	validateDaemonSetPodsAndMarkReady(podClient, podInformer, 4, t)
  1276  	validateDaemonSetStatus(dsClient, ds.Name, 4, t)
  1277  	validateUpdatedNumberScheduled(ctx, dsClient, ds.Name, 4, t)
  1278  	validateDaemonSetPodsTolerations(podClient, podInformer, tolerations, "zone-", t)
  1279  
  1280  	// Update DaemonSet with zone-z toleration
  1281  	tolerations = []v1.Toleration{
  1282  		{Key: "zone-z", Operator: v1.TolerationOpExists},
  1283  		{Key: "zone-common", Operator: v1.TolerationOpExists},
  1284  	}
  1285  	ds.Spec.Template.Spec.Tolerations = tolerations
  1286  	_, err = dsClient.Update(ctx, ds, metav1.UpdateOptions{})
  1287  	if err != nil {
  1288  		t.Fatalf("Failed to update DaemonSet: %v", err)
  1289  	}
  1290  
  1291  	// Expected numberPods of validateDaemonSetPodsActive is 7 when update DaemonSet
  1292  	// and before updated pods become ready because:
  1293  	//   - New 2 pods are created and Pending on Zone Z nodes
  1294  	//   - New 1 pod are created as surge and Pending on Zone Common node
  1295  	//   - Old 2 pods that violate scheduling constraints on Zone Y nodes will remain existing and Running
  1296  	//     until other new pods become available
  1297  	validateDaemonSetPodsActive(podClient, podInformer, 7, t)
  1298  	validateDaemonSetPodsAndMarkReady(podClient, podInformer, 4, t)
  1299  	validateDaemonSetStatus(dsClient, ds.Name, 4, t)
  1300  	validateUpdatedNumberScheduled(ctx, dsClient, ds.Name, 4, t)
  1301  	validateDaemonSetPodsTolerations(podClient, podInformer, tolerations, "zone-", t)
  1302  
  1303  	// Update DaemonSet with zone-y and zone-z toleration
  1304  	tolerations = []v1.Toleration{
  1305  		{Key: "zone-y", Operator: v1.TolerationOpExists},
  1306  		{Key: "zone-z", Operator: v1.TolerationOpExists},
  1307  		{Key: "zone-common", Operator: v1.TolerationOpExists},
  1308  	}
  1309  	ds.Spec.Template.Spec.Tolerations = tolerations
  1310  	_, err = dsClient.Update(ctx, ds, metav1.UpdateOptions{})
  1311  	if err != nil {
  1312  		t.Fatalf("Failed to update DaemonSet: %v", err)
  1313  	}
  1314  	validateDaemonSetPodsActive(podClient, podInformer, 7, t)
  1315  	validateDaemonSetPodsAndMarkReady(podClient, podInformer, 6, t)
  1316  	validateDaemonSetStatus(dsClient, ds.Name, 6, t)
  1317  	validateUpdatedNumberScheduled(ctx, dsClient, ds.Name, 6, t)
  1318  	validateDaemonSetPodsTolerations(podClient, podInformer, tolerations, "zone-", t)
  1319  
  1320  	// Update DaemonSet with no toleration
  1321  	ds.Spec.Template.Spec.Tolerations = nil
  1322  	_, err = dsClient.Update(ctx, ds, metav1.UpdateOptions{})
  1323  	if err != nil {
  1324  		t.Fatalf("Failed to update DaemonSet: %v", err)
  1325  	}
  1326  	validateDaemonSetPodsActive(podClient, podInformer, 0, t)
  1327  	validateDaemonSetStatus(dsClient, ds.Name, 0, t)
  1328  	validateUpdatedNumberScheduled(ctx, dsClient, ds.Name, 0, t)
  1329  }
  1330  

View as plain text