...

Source file src/k8s.io/kubernetes/test/e2e/apps/rc.go

Documentation: k8s.io/kubernetes/test/e2e/apps

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package apps
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"errors"
    23  	"fmt"
    24  	"time"
    25  
    26  	autoscalingv1 "k8s.io/api/autoscaling/v1"
    27  	v1 "k8s.io/api/core/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/runtime/schema"
    32  	"k8s.io/apimachinery/pkg/types"
    33  	utilrand "k8s.io/apimachinery/pkg/util/rand"
    34  	"k8s.io/apimachinery/pkg/util/uuid"
    35  	"k8s.io/apimachinery/pkg/util/wait"
    36  	watch "k8s.io/apimachinery/pkg/watch"
    37  	"k8s.io/client-go/dynamic"
    38  	clientset "k8s.io/client-go/kubernetes"
    39  	watchtools "k8s.io/client-go/tools/watch"
    40  	"k8s.io/kubernetes/pkg/controller/replication"
    41  	"k8s.io/kubernetes/test/e2e/framework"
    42  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    43  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    44  	imageutils "k8s.io/kubernetes/test/utils/image"
    45  	admissionapi "k8s.io/pod-security-admission/api"
    46  
    47  	"github.com/onsi/ginkgo/v2"
    48  	"github.com/onsi/gomega"
    49  	"github.com/onsi/gomega/format"
    50  	"k8s.io/utils/pointer"
    51  )
    52  
    53  var _ = SIGDescribe("ReplicationController", func() {
    54  	f := framework.NewDefaultFramework("replication-controller")
    55  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
    56  
    57  	var ns string
    58  	var dc dynamic.Interface
    59  
    60  	ginkgo.BeforeEach(func() {
    61  		ns = f.Namespace.Name
    62  		dc = f.DynamicClient
    63  	})
    64  
    65  	/*
    66  		Release: v1.9
    67  		Testname: Replication Controller, run basic image
    68  		Description: Replication Controller MUST create a Pod with Basic Image and MUST run the service with the provided image. Image MUST be tested by dialing into the service listening through TCP, UDP and HTTP.
    69  	*/
    70  	framework.ConformanceIt("should serve a basic image on each replica with a public image", func(ctx context.Context) {
    71  		TestReplicationControllerServeImageOrFail(ctx, f, "basic", framework.ServeHostnameImage)
    72  	})
    73  
    74  	ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
    75  		// requires private images
    76  		e2eskipper.SkipUnlessProviderIs("gce", "gke")
    77  		privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
    78  		TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
    79  	})
    80  
    81  	/*
    82  		Release: v1.15
    83  		Testname: Replication Controller, check for issues like exceeding allocated quota
    84  		Description: Attempt to create a Replication Controller with pods exceeding the namespace quota. The creation MUST fail
    85  	*/
    86  	framework.ConformanceIt("should surface a failure condition on a common issue like exceeded quota", func(ctx context.Context) {
    87  		testReplicationControllerConditionCheck(ctx, f)
    88  	})
    89  
    90  	/*
    91  		Release: v1.13
    92  		Testname: Replication Controller, adopt matching pods
    93  		Description: An ownerless Pod is created, then a Replication Controller (RC) is created whose label selector will match the Pod. The RC MUST either adopt the Pod or delete and replace it with a new Pod
    94  	*/
    95  	framework.ConformanceIt("should adopt matching pods on creation", func(ctx context.Context) {
    96  		testRCAdoptMatchingOrphans(ctx, f)
    97  	})
    98  
    99  	/*
   100  		Release: v1.13
   101  		Testname: Replication Controller, release pods
   102  		Description: A Replication Controller (RC) is created, and its Pods are created. When the labels on one of the Pods change to no longer match the RC's label selector, the RC MUST release the Pod and update the Pod's owner references.
   103  	*/
   104  	framework.ConformanceIt("should release no longer matching pods", func(ctx context.Context) {
   105  		testRCReleaseControlledNotMatching(ctx, f)
   106  	})
   107  
   108  	/*
   109  		Release: v1.20
   110  		Testname: Replication Controller, lifecycle
   111  		Description: A Replication Controller (RC) is created, read, patched, and deleted with verification.
   112  	*/
   113  	framework.ConformanceIt("should test the lifecycle of a ReplicationController", func(ctx context.Context) {
   114  		testRcName := "rc-test"
   115  		testRcNamespace := ns
   116  		testRcInitialReplicaCount := int32(1)
   117  		testRcMaxReplicaCount := int32(2)
   118  		rcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}
   119  		expectedWatchEvents := []watch.Event{
   120  			{Type: watch.Added},
   121  			{Type: watch.Modified},
   122  			{Type: watch.Modified},
   123  			{Type: watch.Modified},
   124  			{Type: watch.Modified},
   125  			{Type: watch.Deleted},
   126  		}
   127  
   128  		rcTest := v1.ReplicationController{
   129  			ObjectMeta: metav1.ObjectMeta{
   130  				Name:   testRcName,
   131  				Labels: map[string]string{"test-rc-static": "true"},
   132  			},
   133  			Spec: v1.ReplicationControllerSpec{
   134  				Replicas: &testRcInitialReplicaCount,
   135  				Selector: map[string]string{"test-rc-static": "true"},
   136  				Template: &v1.PodTemplateSpec{
   137  					ObjectMeta: metav1.ObjectMeta{
   138  						Name:   testRcName,
   139  						Labels: map[string]string{"test-rc-static": "true"},
   140  					},
   141  					Spec: v1.PodSpec{
   142  						Containers: []v1.Container{{
   143  							Name:  testRcName,
   144  							Image: imageutils.GetE2EImage(imageutils.Nginx),
   145  						}},
   146  					},
   147  				},
   148  			},
   149  		}
   150  
   151  		framework.WatchEventSequenceVerifier(ctx, dc, rcResource, testRcNamespace, testRcName, metav1.ListOptions{LabelSelector: "test-rc-static=true"}, expectedWatchEvents, func(retryWatcher *watchtools.RetryWatcher) (actualWatchEvents []watch.Event) {
   152  			ginkgo.By("creating a ReplicationController")
   153  			// Create a ReplicationController
   154  			_, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Create(ctx, &rcTest, metav1.CreateOptions{})
   155  			framework.ExpectNoError(err, "Failed to create ReplicationController")
   156  
   157  			ginkgo.By("waiting for RC to be added")
   158  			eventFound := false
   159  			ctxUntil, cancel := context.WithTimeout(ctx, 60*time.Second)
   160  			defer cancel()
   161  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   162  				if watchEvent.Type != watch.Added {
   163  					return false, nil
   164  				}
   165  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   166  				eventFound = true
   167  				return true, nil
   168  			})
   169  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   170  			if !eventFound {
   171  				framework.Failf("failed to find RC %v event", watch.Added)
   172  			}
   173  
   174  			ginkgo.By("waiting for available Replicas")
   175  			eventFound = false
   176  			ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
   177  			defer cancel()
   178  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   179  				var rc *v1.ReplicationController
   180  				rcBytes, err := json.Marshal(watchEvent.Object)
   181  				if err != nil {
   182  					return false, err
   183  				}
   184  				err = json.Unmarshal(rcBytes, &rc)
   185  				if err != nil {
   186  					return false, err
   187  				}
   188  				if rc.Status.Replicas != testRcInitialReplicaCount || rc.Status.ReadyReplicas != testRcInitialReplicaCount {
   189  					return false, nil
   190  				}
   191  				eventFound = true
   192  				return true, nil
   193  			})
   194  			framework.ExpectNoError(err, "Wait for condition with watch events should not return an error")
   195  			if !eventFound {
   196  				framework.Failf("RC has not reached ReadyReplicas count of %v", testRcInitialReplicaCount)
   197  			}
   198  
   199  			rcLabelPatchPayload, err := json.Marshal(v1.ReplicationController{
   200  				ObjectMeta: metav1.ObjectMeta{
   201  					Labels: map[string]string{"test-rc": "patched"},
   202  				},
   203  			})
   204  			framework.ExpectNoError(err, "failed to marshal json of replicationcontroller label patch")
   205  			// Patch the ReplicationController
   206  			ginkgo.By("patching ReplicationController")
   207  			testRcPatched, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcLabelPatchPayload), metav1.PatchOptions{})
   208  			framework.ExpectNoError(err, "Failed to patch ReplicationController")
   209  			gomega.Expect(testRcPatched.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "failed to patch RC")
   210  			ginkgo.By("waiting for RC to be modified")
   211  			eventFound = false
   212  			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
   213  			defer cancel()
   214  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   215  				if watchEvent.Type != watch.Modified {
   216  					return false, nil
   217  				}
   218  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   219  				eventFound = true
   220  				return true, nil
   221  			})
   222  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   223  			if !eventFound {
   224  				framework.Failf("failed to find RC %v event", watch.Added)
   225  			}
   226  
   227  			rcStatusPatchPayload, err := json.Marshal(map[string]interface{}{
   228  				"status": map[string]interface{}{
   229  					"readyReplicas":     0,
   230  					"availableReplicas": 0,
   231  				},
   232  			})
   233  			framework.ExpectNoError(err, "Failed to marshal JSON of ReplicationController label patch")
   234  
   235  			// Patch the ReplicationController's status
   236  			ginkgo.By("patching ReplicationController status")
   237  			rcStatus, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcStatusPatchPayload), metav1.PatchOptions{}, "status")
   238  			framework.ExpectNoError(err, "Failed to patch ReplicationControllerStatus")
   239  			gomega.Expect(rcStatus.Status.ReadyReplicas).To(gomega.Equal(int32(0)), "ReplicationControllerStatus's readyReplicas does not equal 0")
   240  			ginkgo.By("waiting for RC to be modified")
   241  			eventFound = false
   242  			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
   243  			defer cancel()
   244  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   245  				if watchEvent.Type != watch.Modified {
   246  					return false, nil
   247  				}
   248  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   249  				eventFound = true
   250  				return true, nil
   251  			})
   252  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   253  
   254  			if !eventFound {
   255  				framework.Failf("failed to find RC %v event", watch.Added)
   256  			}
   257  
   258  			ginkgo.By("waiting for available Replicas")
   259  			_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   260  				var rc *v1.ReplicationController
   261  				rcBytes, err := json.Marshal(watchEvent.Object)
   262  				if err != nil {
   263  					return false, err
   264  				}
   265  				err = json.Unmarshal(rcBytes, &rc)
   266  				if err != nil {
   267  					return false, err
   268  				}
   269  				if rc.Status.Replicas != testRcInitialReplicaCount {
   270  					return false, nil
   271  				}
   272  				return true, nil
   273  			})
   274  			framework.ExpectNoError(err, "Failed to find updated ready replica count")
   275  			if !eventFound {
   276  				framework.Fail("Failed to find updated ready replica count")
   277  			}
   278  			ginkgo.By("fetching ReplicationController status")
   279  			rcStatusUnstructured, err := dc.Resource(rcResource).Namespace(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{}, "status")
   280  			framework.ExpectNoError(err, "Failed to fetch ReplicationControllerStatus")
   281  
   282  			rcStatusUjson, err := json.Marshal(rcStatusUnstructured)
   283  			framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch")
   284  			json.Unmarshal(rcStatusUjson, &rcStatus)
   285  			gomega.Expect(rcStatus.Status.Replicas).To(gomega.Equal(testRcInitialReplicaCount), "ReplicationController ReplicaSet cound does not match initial Replica count")
   286  
   287  			rcScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
   288  				Spec: autoscalingv1.ScaleSpec{
   289  					Replicas: testRcMaxReplicaCount,
   290  				},
   291  			})
   292  			framework.ExpectNoError(err, "Failed to marshal json of replicationcontroller label patch")
   293  
   294  			// Patch the ReplicationController's scale
   295  			ginkgo.By("patching ReplicationController scale")
   296  			_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Patch(ctx, testRcName, types.StrategicMergePatchType, []byte(rcScalePatchPayload), metav1.PatchOptions{}, "scale")
   297  			framework.ExpectNoError(err, "Failed to patch ReplicationControllerScale")
   298  			ginkgo.By("waiting for RC to be modified")
   299  			eventFound = false
   300  			ctxUntil, cancel = context.WithTimeout(ctx, f.Timeouts.PodStart)
   301  			defer cancel()
   302  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   303  				if watchEvent.Type != watch.Modified {
   304  					return false, nil
   305  				}
   306  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   307  				eventFound = true
   308  				return true, nil
   309  			})
   310  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   311  			if !eventFound {
   312  				framework.Failf("Failed to find RC %v event", watch.Added)
   313  			}
   314  
   315  			ginkgo.By("waiting for ReplicationController's scale to be the max amount")
   316  			eventFound = false
   317  			_, err = watchUntilWithoutRetry(ctx, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   318  				var rc *v1.ReplicationController
   319  				rcBytes, err := json.Marshal(watchEvent.Object)
   320  				if err != nil {
   321  					return false, err
   322  				}
   323  				err = json.Unmarshal(rcBytes, &rc)
   324  				if err != nil {
   325  					return false, err
   326  				}
   327  				if rc.ObjectMeta.Name != testRcName || rc.ObjectMeta.Namespace != testRcNamespace || rc.Status.Replicas != testRcMaxReplicaCount || rc.Status.ReadyReplicas != testRcMaxReplicaCount {
   328  					return false, nil
   329  				}
   330  				eventFound = true
   331  				return true, nil
   332  			})
   333  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   334  			if !eventFound {
   335  				framework.Fail("Failed to find updated ready replica count")
   336  			}
   337  
   338  			// Get the ReplicationController
   339  			ginkgo.By("fetching ReplicationController; ensuring that it's patched")
   340  			rc, err := f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).Get(ctx, testRcName, metav1.GetOptions{})
   341  			framework.ExpectNoError(err, "failed to fetch ReplicationController")
   342  			gomega.Expect(rc.ObjectMeta.Labels).To(gomega.HaveKeyWithValue("test-rc", "patched"), "ReplicationController is missing a label from earlier patch")
   343  
   344  			rcStatusUpdatePayload := rc
   345  			rcStatusUpdatePayload.Status.AvailableReplicas = 1
   346  			rcStatusUpdatePayload.Status.ReadyReplicas = 1
   347  
   348  			// Replace the ReplicationController's status
   349  			ginkgo.By("updating ReplicationController status")
   350  			_, err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).UpdateStatus(ctx, rcStatusUpdatePayload, metav1.UpdateOptions{})
   351  			framework.ExpectNoError(err, "failed to update ReplicationControllerStatus")
   352  
   353  			ginkgo.By("waiting for RC to be modified")
   354  			eventFound = false
   355  			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
   356  			defer cancel()
   357  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   358  				if watchEvent.Type != watch.Modified {
   359  					return false, nil
   360  				}
   361  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   362  				eventFound = true
   363  				return true, nil
   364  			})
   365  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   366  
   367  			if !eventFound {
   368  				framework.Failf("failed to find RC %v event", watch.Added)
   369  			}
   370  
   371  			ginkgo.By("listing all ReplicationControllers")
   372  			rcs, err := f.ClientSet.CoreV1().ReplicationControllers("").List(ctx, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
   373  			framework.ExpectNoError(err, "failed to list ReplicationController")
   374  			gomega.Expect(rcs.Items).ToNot(gomega.BeEmpty(), "Expected to find a ReplicationController but none was found")
   375  
   376  			ginkgo.By("checking that ReplicationController has expected values")
   377  			foundRc := false
   378  			for _, rcItem := range rcs.Items {
   379  				if rcItem.ObjectMeta.Name == testRcName &&
   380  					rcItem.ObjectMeta.Namespace == testRcNamespace &&
   381  					rcItem.ObjectMeta.Labels["test-rc-static"] == "true" &&
   382  					rcItem.ObjectMeta.Labels["test-rc"] == "patched" {
   383  					foundRc = true
   384  				}
   385  			}
   386  			if !foundRc {
   387  				framework.Failf("ReplicationController doesn't have expected values.\nValues that are in the ReplicationController list:\n%s", format.Object(rcs.Items, 1))
   388  			}
   389  
   390  			// Delete ReplicationController
   391  			ginkgo.By("deleting ReplicationControllers by collection")
   392  			err = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
   393  			framework.ExpectNoError(err, "Failed to delete ReplicationControllers")
   394  
   395  			ginkgo.By("waiting for ReplicationController to have a DELETED watchEvent")
   396  			eventFound = false
   397  			ctxUntil, cancel = context.WithTimeout(ctx, 60*time.Second)
   398  			defer cancel()
   399  			_, err = watchUntilWithoutRetry(ctxUntil, retryWatcher, func(watchEvent watch.Event) (bool, error) {
   400  				if watchEvent.Type != watch.Deleted {
   401  					return false, nil
   402  				}
   403  				actualWatchEvents = append(actualWatchEvents, watchEvent)
   404  				eventFound = true
   405  				return true, nil
   406  			})
   407  			framework.ExpectNoError(err, "Wait until condition with watch events should not return an error")
   408  			if !eventFound {
   409  				framework.Failf("failed to find RC %v event", watch.Added)
   410  			}
   411  			return actualWatchEvents
   412  		}, func() (err error) {
   413  			_ = f.ClientSet.CoreV1().ReplicationControllers(testRcNamespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "test-rc-static=true"})
   414  			return err
   415  		})
   416  	})
   417  
   418  	/*
   419  		Release: v1.26
   420  		Testname: Replication Controller, get and update ReplicationController scale
   421  		Description: A ReplicationController is created which MUST succeed. It MUST
   422  		succeed when reading the ReplicationController scale. When updating the
   423  		ReplicationController scale it MUST succeed and the field MUST equal the new value.
   424  	*/
   425  	framework.ConformanceIt("should get and update a ReplicationController scale", func(ctx context.Context) {
   426  		rcClient := f.ClientSet.CoreV1().ReplicationControllers(ns)
   427  		rcName := "e2e-rc-" + utilrand.String(5)
   428  		initialRCReplicaCount := int32(1)
   429  		expectedRCReplicaCount := int32(2)
   430  
   431  		ginkgo.By(fmt.Sprintf("Creating ReplicationController %q", rcName))
   432  		rc := newRC(rcName, initialRCReplicaCount, map[string]string{"name": rcName}, WebserverImageName, WebserverImage, nil)
   433  		_, err := rcClient.Create(ctx, rc, metav1.CreateOptions{})
   434  		framework.ExpectNoError(err, "Failed to create ReplicationController: %v", err)
   435  
   436  		err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, initialRCReplicaCount))
   437  		framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
   438  
   439  		ginkgo.By(fmt.Sprintf("Getting scale subresource for ReplicationController %q", rcName))
   440  		scale, err := rcClient.GetScale(ctx, rcName, metav1.GetOptions{})
   441  		framework.ExpectNoError(err, "Failed to get scale subresource: %v", err)
   442  		gomega.Expect(scale.Status.Replicas).To(gomega.Equal(initialRCReplicaCount), "Failed to get the current replica count")
   443  
   444  		ginkgo.By("Updating a scale subresource")
   445  		scale.ResourceVersion = "" // indicate the scale update should be unconditional
   446  		scale.Spec.Replicas = expectedRCReplicaCount
   447  		_, err = rcClient.UpdateScale(ctx, rcName, scale, metav1.UpdateOptions{})
   448  		framework.ExpectNoError(err, "Failed to update scale subresource: %v", err)
   449  
   450  		ginkgo.By(fmt.Sprintf("Verifying replicas where modified for replication controller %q", rcName))
   451  		err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 1*time.Minute, true, checkReplicationControllerStatusReplicaCount(f, rcName, expectedRCReplicaCount))
   452  		framework.ExpectNoError(err, "failed to confirm the quantity of ReplicationController replicas")
   453  	})
   454  })
   455  
   456  func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string, args []string) *v1.ReplicationController {
   457  	zero := int64(0)
   458  	return &v1.ReplicationController{
   459  		ObjectMeta: metav1.ObjectMeta{
   460  			Name: rsName,
   461  		},
   462  		Spec: v1.ReplicationControllerSpec{
   463  			Replicas: pointer.Int32(replicas),
   464  			Template: &v1.PodTemplateSpec{
   465  				ObjectMeta: metav1.ObjectMeta{
   466  					Labels: rcPodLabels,
   467  				},
   468  				Spec: v1.PodSpec{
   469  					TerminationGracePeriodSeconds: &zero,
   470  					Containers: []v1.Container{
   471  						{
   472  							Name:  imageName,
   473  							Image: image,
   474  							Args:  args,
   475  						},
   476  					},
   477  				},
   478  			},
   479  		},
   480  	}
   481  }
   482  
   483  // TestReplicationControllerServeImageOrFail is a basic test to check
   484  // the deployment of an image using a replication controller.
   485  // The image serves its hostname which is checked for each replica.
   486  func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework.Framework, test string, image string) {
   487  	name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
   488  	replicas := int32(1)
   489  
   490  	// Create a replication controller for a service
   491  	// that serves its hostname.
   492  	// The source for the Docker container kubernetes/serve_hostname is
   493  	// in contrib/for-demos/serve_hostname
   494  	ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
   495  	newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
   496  	newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
   497  	_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, newRC, metav1.CreateOptions{})
   498  	framework.ExpectNoError(err)
   499  
   500  	// Check that pods for the new RC were created.
   501  	// TODO: Maybe switch PodsCreated to just check owner references.
   502  	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, name, replicas)
   503  	framework.ExpectNoError(err)
   504  
   505  	// Wait for the pods to enter the running state and are Ready. Waiting loops until the pods
   506  	// are running so non-running pods cause a timeout for this test.
   507  	framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
   508  	running := int32(0)
   509  	for _, pod := range pods.Items {
   510  		if pod.DeletionTimestamp != nil {
   511  			continue
   512  		}
   513  		err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, framework.PodStartTimeout)
   514  		if err != nil {
   515  			updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   516  			if getErr == nil {
   517  				err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
   518  			} else {
   519  				err = fmt.Errorf("pod %q never run: %w", pod.Name, err)
   520  			}
   521  		}
   522  		framework.ExpectNoError(err)
   523  		framework.Logf("Pod %q is running and ready(conditions: %+v)", pod.Name, pod.Status.Conditions)
   524  		running++
   525  	}
   526  
   527  	// Sanity check
   528  	gomega.Expect(running).To(gomega.Equal(replicas), "unexpected number of running and ready pods: %+v", pods.Items)
   529  
   530  	// Verify that something is listening.
   531  	framework.Logf("Trying to dial the pod")
   532  	framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
   533  }
   534  
   535  // 1. Create a quota restricting pods in the current namespace to 2.
   536  // 2. Create a replication controller that wants to run 3 pods.
   537  // 3. Check replication controller conditions for a ReplicaFailure condition.
   538  // 4. Relax quota or scale down the controller and observe the condition is gone.
   539  func testReplicationControllerConditionCheck(ctx context.Context, f *framework.Framework) {
   540  	c := f.ClientSet
   541  	namespace := f.Namespace.Name
   542  	name := "condition-test"
   543  
   544  	framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
   545  	quota := newPodQuota(name, "2")
   546  	_, err := c.CoreV1().ResourceQuotas(namespace).Create(ctx, quota, metav1.CreateOptions{})
   547  	framework.ExpectNoError(err)
   548  
   549  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   550  		quota, err = c.CoreV1().ResourceQuotas(namespace).Get(ctx, name, metav1.GetOptions{})
   551  		if err != nil {
   552  			return false, err
   553  		}
   554  		podQuota := quota.Status.Hard[v1.ResourcePods]
   555  		quantity := resource.MustParse("2")
   556  		return (&podQuota).Cmp(quantity) == 0, nil
   557  	})
   558  	if wait.Interrupted(err) {
   559  		err = fmt.Errorf("resource quota %q never synced", name)
   560  	}
   561  	framework.ExpectNoError(err)
   562  
   563  	ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
   564  	rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
   565  	rc, err = c.CoreV1().ReplicationControllers(namespace).Create(ctx, rc, metav1.CreateOptions{})
   566  	framework.ExpectNoError(err)
   567  
   568  	ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
   569  	generation := rc.Generation
   570  	conditions := rc.Status.Conditions
   571  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   572  		rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
   573  		if err != nil {
   574  			return false, err
   575  		}
   576  
   577  		if generation > rc.Status.ObservedGeneration {
   578  			return false, nil
   579  		}
   580  		conditions = rc.Status.Conditions
   581  
   582  		cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
   583  		return cond != nil, nil
   584  	})
   585  	if wait.Interrupted(err) {
   586  		err = fmt.Errorf("rc manager never added the failure condition for rc %q: %#v", name, conditions)
   587  	}
   588  	framework.ExpectNoError(err)
   589  
   590  	ginkgo.By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
   591  	rc, err = updateReplicationControllerWithRetries(ctx, c, namespace, name, func(update *v1.ReplicationController) {
   592  		x := int32(2)
   593  		update.Spec.Replicas = &x
   594  	})
   595  	framework.ExpectNoError(err)
   596  
   597  	ginkgo.By(fmt.Sprintf("Checking rc %q has no failure condition set", name))
   598  	generation = rc.Generation
   599  	conditions = rc.Status.Conditions
   600  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   601  		rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{})
   602  		if err != nil {
   603  			return false, err
   604  		}
   605  
   606  		if generation > rc.Status.ObservedGeneration {
   607  			return false, nil
   608  		}
   609  		conditions = rc.Status.Conditions
   610  
   611  		cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
   612  		return cond == nil, nil
   613  	})
   614  	if wait.Interrupted(err) {
   615  		err = fmt.Errorf("rc manager never removed the failure condition for rc %q: %#v", name, conditions)
   616  	}
   617  	framework.ExpectNoError(err)
   618  }
   619  
   620  func testRCAdoptMatchingOrphans(ctx context.Context, f *framework.Framework) {
   621  	name := "pod-adoption"
   622  	ginkgo.By(fmt.Sprintf("Given a Pod with a 'name' label %s is created", name))
   623  	p := e2epod.NewPodClient(f).CreateSync(ctx, &v1.Pod{
   624  		ObjectMeta: metav1.ObjectMeta{
   625  			Name: name,
   626  			Labels: map[string]string{
   627  				"name": name,
   628  			},
   629  		},
   630  		Spec: v1.PodSpec{
   631  			Containers: []v1.Container{
   632  				{
   633  					Name:  name,
   634  					Image: WebserverImage,
   635  				},
   636  			},
   637  		},
   638  	})
   639  
   640  	ginkgo.By("When a replication controller with a matching selector is created")
   641  	replicas := int32(1)
   642  	rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
   643  	rcSt.Spec.Selector = map[string]string{"name": name}
   644  	rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
   645  	framework.ExpectNoError(err)
   646  
   647  	ginkgo.By("Then the orphan pod is adopted")
   648  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   649  		p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
   650  		// The Pod p should either be adopted or deleted by the RC
   651  		if apierrors.IsNotFound(err) {
   652  			return true, nil
   653  		}
   654  		framework.ExpectNoError(err)
   655  		for _, owner := range p2.OwnerReferences {
   656  			if *owner.Controller && owner.UID == rc.UID {
   657  				// pod adopted
   658  				return true, nil
   659  			}
   660  		}
   661  		// pod still not adopted
   662  		return false, nil
   663  	})
   664  	framework.ExpectNoError(err)
   665  }
   666  
   667  func testRCReleaseControlledNotMatching(ctx context.Context, f *framework.Framework) {
   668  	name := "pod-release"
   669  	ginkgo.By("Given a ReplicationController is created")
   670  	replicas := int32(1)
   671  	rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
   672  	rcSt.Spec.Selector = map[string]string{"name": name}
   673  	rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rcSt, metav1.CreateOptions{})
   674  	framework.ExpectNoError(err)
   675  
   676  	ginkgo.By("When the matched label of one of its pods change")
   677  	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rc.Name, replicas)
   678  	framework.ExpectNoError(err)
   679  
   680  	p := pods.Items[0]
   681  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   682  		pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
   683  		framework.ExpectNoError(err)
   684  
   685  		pod.Labels = map[string]string{"name": "not-matching-name"}
   686  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(ctx, pod, metav1.UpdateOptions{})
   687  		if err != nil && apierrors.IsConflict(err) {
   688  			return false, nil
   689  		}
   690  		if err != nil {
   691  			return false, err
   692  		}
   693  		return true, nil
   694  	})
   695  	framework.ExpectNoError(err)
   696  
   697  	ginkgo.By("Then the pod is released")
   698  	err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
   699  		p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, p.Name, metav1.GetOptions{})
   700  		framework.ExpectNoError(err)
   701  		for _, owner := range p2.OwnerReferences {
   702  			if *owner.Controller && owner.UID == rc.UID {
   703  				// pod still belonging to the replication controller
   704  				return false, nil
   705  			}
   706  		}
   707  		// pod already released
   708  		return true, nil
   709  	})
   710  	framework.ExpectNoError(err)
   711  }
   712  
   713  type updateRcFunc func(d *v1.ReplicationController)
   714  
   715  // updateReplicationControllerWithRetries retries updating the given rc on conflict with the following steps:
   716  // 1. Get latest resource
   717  // 2. applyUpdate
   718  // 3. Update the resource
   719  func updateReplicationControllerWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
   720  	var rc *v1.ReplicationController
   721  	var updateErr error
   722  	pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
   723  		var err error
   724  		if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(ctx, name, metav1.GetOptions{}); err != nil {
   725  			return false, err
   726  		}
   727  		// Apply the update, then attempt to push it to the apiserver.
   728  		applyUpdate(rc)
   729  		if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{}); err == nil {
   730  			framework.Logf("Updating replication controller %q", name)
   731  			return true, nil
   732  		}
   733  		updateErr = err
   734  		return false, nil
   735  	})
   736  	if wait.Interrupted(pollErr) {
   737  		pollErr = fmt.Errorf("couldn't apply the provided updated to rc %q: %v", name, updateErr)
   738  	}
   739  	return rc, pollErr
   740  }
   741  
   742  // watchUntilWithoutRetry ...
   743  // reads items from the watch until each provided condition succeeds, and then returns the last watch
   744  // encountered. The first condition that returns an error terminates the watch (and the event is also returned).
   745  // If no event has been received, the returned event will be nil.
   746  // Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition.
   747  // Waits until context deadline or until context is canceled.
   748  //
   749  // the same as watchtools.UntilWithoutRetry, just without the closing of the watch - as for the purpose of being paired with WatchEventSequenceVerifier, the watch is needed for continual watch event collection
   750  func watchUntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...watchtools.ConditionFunc) (*watch.Event, error) {
   751  	ch := watcher.ResultChan()
   752  	var lastEvent *watch.Event
   753  	for _, condition := range conditions {
   754  		// check the next condition against the previous event and short circuit waiting for the next watch
   755  		if lastEvent != nil {
   756  			done, err := condition(*lastEvent)
   757  			if err != nil {
   758  				return lastEvent, err
   759  			}
   760  			if done {
   761  				continue
   762  			}
   763  		}
   764  	ConditionSucceeded:
   765  		for {
   766  			select {
   767  			case event, ok := <-ch:
   768  				if !ok {
   769  					return lastEvent, watchtools.ErrWatchClosed
   770  				}
   771  				lastEvent = &event
   772  
   773  				done, err := condition(event)
   774  				if err != nil {
   775  					return lastEvent, err
   776  				}
   777  				if done {
   778  					break ConditionSucceeded
   779  				}
   780  
   781  			case <-ctx.Done():
   782  				return lastEvent, wait.ErrorInterrupted(errors.New("timed out waiting for the condition"))
   783  			}
   784  		}
   785  	}
   786  	return lastEvent, nil
   787  }
   788  
   789  func checkReplicationControllerStatusReplicaCount(f *framework.Framework, rcName string, quantity int32) func(ctx context.Context) (bool, error) {
   790  	return func(ctx context.Context) (bool, error) {
   791  
   792  		framework.Logf("Get Replication Controller %q to confirm replicas", rcName)
   793  		rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Get(ctx, rcName, metav1.GetOptions{})
   794  		if err != nil {
   795  			return false, err
   796  		}
   797  
   798  		if rc.Status.Replicas != quantity {
   799  			return false, nil
   800  		}
   801  		framework.Logf("Found %d replicas for %q replication controller", quantity, rc.Name)
   802  		return true, nil
   803  	}
   804  }
   805  

View as plain text