...

Source file src/k8s.io/kubernetes/test/e2e/scheduling/limit_range.go

Documentation: k8s.io/kubernetes/test/e2e/scheduling

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package scheduling
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"reflect"
    24  	"strconv"
    25  	"time"
    26  
    27  	v1 "k8s.io/api/core/v1"
    28  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/labels"
    32  	"k8s.io/apimachinery/pkg/runtime"
    33  	"k8s.io/apimachinery/pkg/types"
    34  	utilrand "k8s.io/apimachinery/pkg/util/rand"
    35  	"k8s.io/apimachinery/pkg/util/uuid"
    36  	"k8s.io/apimachinery/pkg/util/wait"
    37  	"k8s.io/apimachinery/pkg/watch"
    38  	"k8s.io/client-go/tools/cache"
    39  	watchtools "k8s.io/client-go/tools/watch"
    40  	"k8s.io/kubernetes/test/e2e/framework"
    41  	e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
    42  	imageutils "k8s.io/kubernetes/test/utils/image"
    43  	admissionapi "k8s.io/pod-security-admission/api"
    44  
    45  	"github.com/onsi/ginkgo/v2"
    46  	"github.com/onsi/gomega"
    47  )
    48  
    49  const (
    50  	podName = "pfpod"
    51  )
    52  
    53  var _ = SIGDescribe("LimitRange", func() {
    54  	f := framework.NewDefaultFramework("limitrange")
    55  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
    56  
    57  	/*
    58  		Release: v1.18
    59  		Testname: LimitRange, resources
    60  		Description: Creating a Limitrange and verifying the creation of Limitrange, updating the Limitrange and validating the Limitrange. Creating Pods with resources and validate the pod resources are applied to the Limitrange
    61  	*/
    62  	framework.ConformanceIt("should create a LimitRange with defaults and ensure pod has those defaults applied.", func(ctx context.Context) {
    63  		ginkgo.By("Creating a LimitRange")
    64  		min := getResourceList("50m", "100Mi", "100Gi")
    65  		max := getResourceList("500m", "500Mi", "500Gi")
    66  		defaultLimit := getResourceList("500m", "500Mi", "500Gi")
    67  		defaultRequest := getResourceList("100m", "200Mi", "200Gi")
    68  		maxLimitRequestRatio := v1.ResourceList{}
    69  		value := strconv.Itoa(time.Now().Nanosecond()) + string(uuid.NewUUID())
    70  		limitRange := newLimitRange("limit-range", value, v1.LimitTypeContainer,
    71  			min, max,
    72  			defaultLimit, defaultRequest,
    73  			maxLimitRequestRatio)
    74  
    75  		ginkgo.By("Setting up watch")
    76  		selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
    77  
    78  		options := metav1.ListOptions{LabelSelector: selector.String()}
    79  		limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options)
    80  		framework.ExpectNoError(err, "failed to query for limitRanges")
    81  		gomega.Expect(limitRanges.Items).To(gomega.BeEmpty())
    82  
    83  		lw := &cache.ListWatch{
    84  			ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
    85  				options.LabelSelector = selector.String()
    86  				limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, options)
    87  				return limitRanges, err
    88  			},
    89  			WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
    90  				options.LabelSelector = selector.String()
    91  				return f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Watch(ctx, options)
    92  			},
    93  		}
    94  		_, informer, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.LimitRange{})
    95  		defer w.Stop()
    96  
    97  		timeoutCtx, cancel := context.WithTimeout(ctx, wait.ForeverTestTimeout)
    98  		defer cancel()
    99  		if !cache.WaitForCacheSync(timeoutCtx.Done(), informer.HasSynced) {
   100  			framework.Failf("Timeout while waiting for LimitRange informer to sync")
   101  		}
   102  
   103  		ginkgo.By("Submitting a LimitRange")
   104  		limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(ctx, limitRange, metav1.CreateOptions{})
   105  		framework.ExpectNoError(err)
   106  
   107  		ginkgo.By("Verifying LimitRange creation was observed")
   108  		select {
   109  		case event, _ := <-w.ResultChan():
   110  			if event.Type != watch.Added {
   111  				framework.Failf("Failed to observe limitRange creation : %v", event)
   112  			}
   113  		case <-time.After(e2eservice.RespondingTimeout):
   114  			framework.Failf("Timeout while waiting for LimitRange creation")
   115  		}
   116  
   117  		ginkgo.By("Fetching the LimitRange to ensure it has proper values")
   118  		limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
   119  		framework.ExpectNoError(err)
   120  		expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
   121  		actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
   122  		err = equalResourceRequirement(expected, actual)
   123  		framework.ExpectNoError(err)
   124  
   125  		ginkgo.By("Creating a Pod with no resource requirements")
   126  		pod := newTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
   127  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   128  		framework.ExpectNoError(err)
   129  
   130  		ginkgo.By("Ensuring Pod has resource requirements applied from LimitRange")
   131  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   132  		framework.ExpectNoError(err)
   133  		for i := range pod.Spec.Containers {
   134  			err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
   135  			if err != nil {
   136  				// Print the pod to help in debugging.
   137  				framework.Logf("Pod %+v does not have the expected requirements", pod)
   138  				framework.ExpectNoError(err)
   139  			}
   140  		}
   141  
   142  		ginkgo.By("Creating a Pod with partial resource requirements")
   143  		pod = newTestPod("pod-partial-resources", getResourceList("", "150Mi", "150Gi"), getResourceList("300m", "", ""))
   144  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   145  		framework.ExpectNoError(err)
   146  
   147  		ginkgo.By("Ensuring Pod has merged resource requirements applied from LimitRange")
   148  		pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   149  		framework.ExpectNoError(err)
   150  		// This is an interesting case, so it's worth a comment
   151  		// If you specify a Limit, and no Request, the Limit will default to the Request
   152  		// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
   153  		expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi", "150Gi"), Limits: getResourceList("300m", "500Mi", "500Gi")}
   154  		for i := range pod.Spec.Containers {
   155  			err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
   156  			if err != nil {
   157  				// Print the pod to help in debugging.
   158  				framework.Logf("Pod %+v does not have the expected requirements", pod)
   159  				framework.ExpectNoError(err)
   160  			}
   161  		}
   162  
   163  		ginkgo.By("Failing to create a Pod with less than min resources")
   164  		pod = newTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
   165  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   166  		gomega.Expect(err).To(gomega.HaveOccurred())
   167  
   168  		ginkgo.By("Failing to create a Pod with more than max resources")
   169  		pod = newTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
   170  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   171  		gomega.Expect(err).To(gomega.HaveOccurred())
   172  
   173  		ginkgo.By("Updating a LimitRange")
   174  		newMin := getResourceList("9m", "49Mi", "49Gi")
   175  		limitRange.Spec.Limits[0].Min = newMin
   176  		limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Update(ctx, limitRange, metav1.UpdateOptions{})
   177  		framework.ExpectNoError(err)
   178  
   179  		ginkgo.By("Verifying LimitRange updating is effective")
   180  		err = wait.PollUntilContextTimeout(ctx, time.Second*2, time.Second*20, false, func(ctx context.Context) (bool, error) {
   181  			limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
   182  			framework.ExpectNoError(err)
   183  			return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
   184  		})
   185  		framework.ExpectNoError(err)
   186  
   187  		ginkgo.By("Creating a Pod with less than former min resources")
   188  		pod = newTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
   189  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   190  		framework.ExpectNoError(err)
   191  
   192  		ginkgo.By("Failing to create a Pod with more than max resources")
   193  		pod = newTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
   194  		_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   195  		gomega.Expect(err).To(gomega.HaveOccurred())
   196  
   197  		ginkgo.By("Deleting a LimitRange")
   198  		err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Delete(ctx, limitRange.Name, *metav1.NewDeleteOptions(30))
   199  		framework.ExpectNoError(err)
   200  
   201  		ginkgo.By("Verifying the LimitRange was deleted")
   202  		err = wait.PollUntilContextTimeout(ctx, time.Second*5, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
   203  			limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{})
   204  
   205  			if err != nil {
   206  				framework.Logf("Unable to retrieve LimitRanges: %v", err)
   207  				return false, nil
   208  			}
   209  
   210  			if len(limitRanges.Items) == 0 {
   211  				framework.Logf("limitRange is already deleted")
   212  				return true, nil
   213  			}
   214  
   215  			for i := range limitRanges.Items {
   216  				lr := limitRanges.Items[i]
   217  				framework.Logf("LimitRange %v/%v has not yet been deleted", lr.Namespace, lr.Name)
   218  			}
   219  
   220  			return false, nil
   221  		})
   222  		framework.ExpectNoError(err)
   223  
   224  		ginkgo.By("Creating a Pod with more than former max resources")
   225  		pod = newTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
   226  		// When the LimitRanger admission plugin find 0 items from the LimitRange informer cache,
   227  		// it will try to lookup LimitRanges from the local LiveLookupCache which liveTTL is 30s.
   228  		// If a LimitRange was deleted from the apiserver, informer watch the delete event and then
   229  		// handle it lead to the informer cache doesn't have any other items, but the local LiveLookupCache
   230  		// has it and not expired at the same time, the LimitRanger admission plugin will use the
   231  		// deleted LimitRange to validate the request. So the request will be rejected by the plugin
   232  		// till the item is expired.
   233  		//
   234  		// With the following retry, we can make sure the item is expired and the request will be
   235  		// validated as expected.
   236  		err = framework.Gomega().Eventually(ctx, func(ctx context.Context) error {
   237  			_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
   238  			return err
   239  		}).WithPolling(5 * time.Second).WithTimeout(30 * time.Second).ShouldNot(gomega.HaveOccurred())
   240  		framework.ExpectNoError(err)
   241  	})
   242  
   243  	/*
   244  		Release: v1.26
   245  		Testname: LimitRange, list, patch and delete a LimitRange by collection
   246  		Description: When two limitRanges are created in different namespaces,
   247  		both MUST succeed. Listing limitRanges across all namespaces with a
   248  		labelSelector MUST find both limitRanges. When patching the first limitRange
   249  		it MUST succeed and the fields MUST equal the new values. When deleting
   250  		the limitRange by collection with a labelSelector it MUST delete only one
   251  		limitRange.
   252  	*/
   253  	framework.ConformanceIt("should list, patch and delete a LimitRange by collection", ginkgo.NodeTimeout(wait.ForeverTestTimeout), func(ctx context.Context) {
   254  
   255  		ns := f.Namespace.Name
   256  		lrClient := f.ClientSet.CoreV1().LimitRanges(ns)
   257  		lrName := "e2e-limitrange-" + utilrand.String(5)
   258  		e2eLabelSelector := "e2e-test=" + lrName
   259  		patchedLabelSelector := lrName + "=patched"
   260  
   261  		min := getResourceList("50m", "100Mi", "100Gi")
   262  		max := getResourceList("500m", "500Mi", "500Gi")
   263  		defaultLimit := getResourceList("500m", "500Mi", "500Gi")
   264  		defaultRequest := getResourceList("100m", "200Mi", "200Gi")
   265  		maxLimitRequestRatio := v1.ResourceList{}
   266  
   267  		limitRange := &v1.LimitRange{
   268  			ObjectMeta: metav1.ObjectMeta{
   269  				Name: lrName,
   270  				Labels: map[string]string{
   271  					"e2e-test": lrName,
   272  					lrName:     "created",
   273  				},
   274  			},
   275  			Spec: v1.LimitRangeSpec{
   276  				Limits: []v1.LimitRangeItem{
   277  					{
   278  						Type:                 v1.LimitTypeContainer,
   279  						Min:                  min,
   280  						Max:                  max,
   281  						Default:              defaultLimit,
   282  						DefaultRequest:       defaultRequest,
   283  						MaxLimitRequestRatio: maxLimitRequestRatio,
   284  					},
   285  				},
   286  			},
   287  		}
   288  		// Create a copy to be used in a second namespace
   289  		limitRange2 := &v1.LimitRange{}
   290  		*limitRange2 = *limitRange
   291  
   292  		ginkgo.By(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, f.Namespace.Name))
   293  		limitRange, err := lrClient.Create(ctx, limitRange, metav1.CreateOptions{})
   294  		framework.ExpectNoError(err, "Failed to create limitRange %q", lrName)
   295  
   296  		ginkgo.By("Creating another limitRange in another namespace")
   297  		lrNamespace, err := f.CreateNamespace(ctx, lrName, nil)
   298  		framework.ExpectNoError(err, "failed creating Namespace")
   299  		framework.Logf("Namespace %q created", lrNamespace.ObjectMeta.Name)
   300  		framework.Logf(fmt.Sprintf("Creating LimitRange %q in namespace %q", lrName, lrNamespace.Name))
   301  		_, err = f.ClientSet.CoreV1().LimitRanges(lrNamespace.ObjectMeta.Name).Create(ctx, limitRange2, metav1.CreateOptions{})
   302  		framework.ExpectNoError(err, "Failed to create limitRange %q in %q namespace", lrName, lrNamespace.ObjectMeta.Name)
   303  
   304  		// Listing across all namespaces to verify api endpoint: listCoreV1LimitRangeForAllNamespaces
   305  		ginkgo.By(fmt.Sprintf("Listing all LimitRanges with label %q", e2eLabelSelector))
   306  		limitRangeList, err := f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector})
   307  		framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err)
   308  		gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(2), "Failed to find the correct limitRange count")
   309  		framework.Logf("Found %d limitRanges", len(limitRangeList.Items))
   310  
   311  		ginkgo.By(fmt.Sprintf("Patching LimitRange %q in %q namespace", lrName, ns))
   312  		newMin := getResourceList("9m", "49Mi", "49Gi")
   313  		limitRange.Spec.Limits[0].Min = newMin
   314  
   315  		limitRangePayload, err := json.Marshal(v1.LimitRange{
   316  			ObjectMeta: metav1.ObjectMeta{
   317  				CreationTimestamp: limitRange.CreationTimestamp,
   318  				Labels: map[string]string{
   319  					lrName: "patched",
   320  				},
   321  			},
   322  			Spec: v1.LimitRangeSpec{
   323  				Limits: limitRange.Spec.Limits,
   324  			},
   325  		})
   326  		framework.ExpectNoError(err, "Failed to marshal limitRange JSON")
   327  
   328  		patchedLimitRange, err := lrClient.Patch(ctx, lrName, types.StrategicMergePatchType, []byte(limitRangePayload), metav1.PatchOptions{})
   329  		framework.ExpectNoError(err, "Failed to patch limitRange %q", lrName)
   330  		gomega.Expect(patchedLimitRange.Labels[lrName]).To(gomega.Equal("patched"), "%q label didn't have value 'patched' for this limitRange. Current labels: %v", lrName, patchedLimitRange.Labels)
   331  		checkMinLimitRange := apiequality.Semantic.DeepEqual(patchedLimitRange.Spec.Limits[0].Min, newMin)
   332  		if !checkMinLimitRange {
   333  			framework.Failf("LimitRange does not have the correct min limitRange. Currently is %#v ", patchedLimitRange.Spec.Limits[0].Min)
   334  		}
   335  		framework.Logf("LimitRange %q has been patched", lrName)
   336  
   337  		ginkgo.By(fmt.Sprintf("Delete LimitRange %q by Collection with labelSelector: %q", lrName, patchedLabelSelector))
   338  		err = lrClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: patchedLabelSelector})
   339  		framework.ExpectNoError(err, "failed to delete the LimitRange by Collection")
   340  
   341  		ginkgo.By(fmt.Sprintf("Confirm that the limitRange %q has been deleted", lrName))
   342  		err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, true, checkLimitRangeListQuantity(f, patchedLabelSelector, 0))
   343  		framework.ExpectNoError(err, "failed to count the required limitRanges")
   344  		framework.Logf("LimitRange %q has been deleted.", lrName)
   345  
   346  		ginkgo.By(fmt.Sprintf("Confirm that a single LimitRange still exists with label %q", e2eLabelSelector))
   347  		limitRangeList, err = f.ClientSet.CoreV1().LimitRanges("").List(ctx, metav1.ListOptions{LabelSelector: e2eLabelSelector})
   348  		framework.ExpectNoError(err, "Failed to list any limitRanges: %v", err)
   349  		gomega.Expect(limitRangeList.Items).To(gomega.HaveLen(1), "Failed to find the correct limitRange count")
   350  		framework.Logf("Found %d limitRange", len(limitRangeList.Items))
   351  	})
   352  })
   353  
   354  func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
   355  	framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
   356  	err := equalResourceList(expected.Requests, actual.Requests)
   357  	if err != nil {
   358  		return err
   359  	}
   360  	framework.Logf("Verifying limits: expected %v with actual %v", expected.Limits, actual.Limits)
   361  	err = equalResourceList(expected.Limits, actual.Limits)
   362  	return err
   363  }
   364  
   365  func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
   366  	for k, v := range expected {
   367  		if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) {
   368  			return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String())
   369  		}
   370  	}
   371  	for k, v := range actual {
   372  		if expectedValue, found := expected[k]; !found || (v.Cmp(expectedValue) != 0) {
   373  			return fmt.Errorf("resource %v expected %v actual %v", k, expectedValue.String(), v.String())
   374  		}
   375  	}
   376  	return nil
   377  }
   378  
   379  func getResourceList(cpu, memory string, ephemeralStorage string) v1.ResourceList {
   380  	res := v1.ResourceList{}
   381  	if cpu != "" {
   382  		res[v1.ResourceCPU] = resource.MustParse(cpu)
   383  	}
   384  	if memory != "" {
   385  		res[v1.ResourceMemory] = resource.MustParse(memory)
   386  	}
   387  	if ephemeralStorage != "" {
   388  		res[v1.ResourceEphemeralStorage] = resource.MustParse(ephemeralStorage)
   389  	}
   390  	return res
   391  }
   392  
   393  // newLimitRange returns a limit range with specified data
   394  func newLimitRange(name, value string, limitType v1.LimitType,
   395  	min, max,
   396  	defaultLimit, defaultRequest,
   397  	maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
   398  	return &v1.LimitRange{
   399  		ObjectMeta: metav1.ObjectMeta{
   400  			Name: name,
   401  			Labels: map[string]string{
   402  				"time": value,
   403  			},
   404  		},
   405  		Spec: v1.LimitRangeSpec{
   406  			Limits: []v1.LimitRangeItem{
   407  				{
   408  					Type:                 limitType,
   409  					Min:                  min,
   410  					Max:                  max,
   411  					Default:              defaultLimit,
   412  					DefaultRequest:       defaultRequest,
   413  					MaxLimitRequestRatio: maxLimitRequestRatio,
   414  				},
   415  			},
   416  		},
   417  	}
   418  }
   419  
   420  // newTestPod returns a pod that has the specified requests and limits
   421  func newTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
   422  	return &v1.Pod{
   423  		ObjectMeta: metav1.ObjectMeta{
   424  			Name: name,
   425  		},
   426  		Spec: v1.PodSpec{
   427  			Containers: []v1.Container{
   428  				{
   429  					Name:  "pause",
   430  					Image: imageutils.GetPauseImageName(),
   431  					Resources: v1.ResourceRequirements{
   432  						Requests: requests,
   433  						Limits:   limits,
   434  					},
   435  				},
   436  			},
   437  		},
   438  	}
   439  }
   440  
   441  func checkLimitRangeListQuantity(f *framework.Framework, label string, quantity int) func(ctx context.Context) (bool, error) {
   442  	return func(ctx context.Context) (bool, error) {
   443  		framework.Logf("Requesting list of LimitRange to confirm quantity")
   444  
   445  		list, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{LabelSelector: label})
   446  		if err != nil {
   447  			return false, err
   448  		}
   449  
   450  		if len(list.Items) != quantity {
   451  			return false, nil
   452  		}
   453  		framework.Logf("Found %d LimitRange with label %q", quantity, label)
   454  		return true, nil
   455  	}
   456  }
   457  

View as plain text