...

Source file src/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission_test.go

Documentation: k8s.io/kubernetes/plugin/pkg/admission/resourcequota

     1  /*
     2  Copyright 2014 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package resourcequota
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  	"strings"
    24  	"testing"
    25  
    26  	corev1 "k8s.io/api/core/v1"
    27  	"k8s.io/apimachinery/pkg/api/resource"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	"k8s.io/apimachinery/pkg/util/sets"
    30  	"k8s.io/apiserver/pkg/admission"
    31  	genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer"
    32  	"k8s.io/apiserver/pkg/admission/plugin/resourcequota"
    33  	resourcequotaapi "k8s.io/apiserver/pkg/admission/plugin/resourcequota/apis/resourcequota"
    34  	"k8s.io/client-go/informers"
    35  	"k8s.io/client-go/kubernetes"
    36  	"k8s.io/client-go/kubernetes/fake"
    37  	testcore "k8s.io/client-go/testing"
    38  	"k8s.io/client-go/tools/cache"
    39  	api "k8s.io/kubernetes/pkg/apis/core"
    40  	kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
    41  	"k8s.io/kubernetes/pkg/quota/v1/install"
    42  )
    43  
    44  func getResourceList(cpu, memory string) api.ResourceList {
    45  	res := api.ResourceList{}
    46  	if cpu != "" {
    47  		res[api.ResourceCPU] = resource.MustParse(cpu)
    48  	}
    49  	if memory != "" {
    50  		res[api.ResourceMemory] = resource.MustParse(memory)
    51  	}
    52  	return res
    53  }
    54  
    55  func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
    56  	res := api.ResourceRequirements{}
    57  	res.Requests = requests
    58  	res.Limits = limits
    59  	return res
    60  }
    61  
    62  func getVolumeResourceRequirements(requests, limits api.ResourceList) api.VolumeResourceRequirements {
    63  	res := api.VolumeResourceRequirements{}
    64  	res.Requests = requests
    65  	res.Limits = limits
    66  	return res
    67  }
    68  
    69  func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod {
    70  	pod := &api.Pod{
    71  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
    72  		Spec:       api.PodSpec{},
    73  	}
    74  	pod.Spec.Containers = make([]api.Container, 0, numContainers)
    75  	for i := 0; i < numContainers; i++ {
    76  		pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
    77  			Image:     "foo:V" + strconv.Itoa(i),
    78  			Resources: resources,
    79  		})
    80  	}
    81  	return pod
    82  }
    83  
    84  func validPodWithPriority(name string, numContainers int, resources api.ResourceRequirements, priorityClass string) *api.Pod {
    85  	pod := validPod(name, numContainers, resources)
    86  	if priorityClass != "" {
    87  		pod.Spec.PriorityClassName = priorityClass
    88  	}
    89  	return pod
    90  }
    91  
    92  func validPersistentVolumeClaim(name string, resources api.VolumeResourceRequirements) *api.PersistentVolumeClaim {
    93  	return &api.PersistentVolumeClaim{
    94  		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
    95  		Spec: api.PersistentVolumeClaimSpec{
    96  			Resources: resources,
    97  		},
    98  	}
    99  }
   100  
   101  func createHandler(kubeClient kubernetes.Interface, informerFactory informers.SharedInformerFactory, stopCh chan struct{}) (*resourcequota.QuotaAdmission, error) {
   102  	return createHandlerWithConfig(kubeClient, informerFactory, nil, stopCh)
   103  }
   104  
   105  func createHandlerWithConfig(kubeClient kubernetes.Interface, informerFactory informers.SharedInformerFactory, config *resourcequotaapi.Configuration, stopCh chan struct{}) (*resourcequota.QuotaAdmission, error) {
   106  	if config == nil {
   107  		config = &resourcequotaapi.Configuration{}
   108  	}
   109  	quotaConfiguration := install.NewQuotaConfigurationForAdmission()
   110  
   111  	handler, err := resourcequota.NewResourceQuota(config, 5)
   112  	if err != nil {
   113  		return nil, err
   114  	}
   115  
   116  	initializers := admission.PluginInitializers{
   117  		genericadmissioninitializer.New(kubeClient, nil, informerFactory, nil, nil, stopCh, nil),
   118  		kubeapiserveradmission.NewPluginInitializer(nil, quotaConfiguration, nil),
   119  	}
   120  	initializers.Initialize(handler)
   121  
   122  	return handler, admission.ValidateInitialization(handler)
   123  }
   124  
   125  // TestAdmissionIgnoresDelete verifies that the admission controller ignores delete operations
   126  func TestAdmissionIgnoresDelete(t *testing.T) {
   127  	stopCh := make(chan struct{})
   128  	defer close(stopCh)
   129  
   130  	kubeClient := fake.NewSimpleClientset()
   131  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   132  
   133  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   134  	if err != nil {
   135  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   136  	}
   137  
   138  	namespace := "default"
   139  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(nil, nil, api.Kind("Pod").WithVersion("version"), namespace, "name", corev1.Resource("pods").WithVersion("version"), "", admission.Delete, &metav1.DeleteOptions{}, false, nil), nil)
   140  	if err != nil {
   141  		t.Errorf("ResourceQuota should admit all deletes: %v", err)
   142  	}
   143  }
   144  
   145  // TestAdmissionIgnoresSubresources verifies that the admission controller ignores subresources
   146  // It verifies that creation of a pod that would have exceeded quota is properly failed
   147  // It verifies that create operations to a subresource that would have exceeded quota would succeed
   148  func TestAdmissionIgnoresSubresources(t *testing.T) {
   149  	resourceQuota := &corev1.ResourceQuota{}
   150  	resourceQuota.Name = "quota"
   151  	resourceQuota.Namespace = "test"
   152  	resourceQuota.Status = corev1.ResourceQuotaStatus{
   153  		Hard: corev1.ResourceList{},
   154  		Used: corev1.ResourceList{},
   155  	}
   156  	resourceQuota.Status.Hard[corev1.ResourceMemory] = resource.MustParse("2Gi")
   157  	resourceQuota.Status.Used[corev1.ResourceMemory] = resource.MustParse("1Gi")
   158  	stopCh := make(chan struct{})
   159  	defer close(stopCh)
   160  
   161  	kubeClient := fake.NewSimpleClientset()
   162  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   163  
   164  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   165  	if err != nil {
   166  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   167  	}
   168  
   169  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   170  	newPod := validPod("123", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
   171  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   172  	if err == nil {
   173  		t.Errorf("Expected an error because the pod exceeded allowed quota")
   174  	}
   175  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "subresource", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   176  	if err != nil {
   177  		t.Errorf("Did not expect an error because the action went to a subresource: %v", err)
   178  	}
   179  }
   180  
   181  // TestAdmitBelowQuotaLimit verifies that a pod when created has its usage reflected on the quota
   182  func TestAdmitBelowQuotaLimit(t *testing.T) {
   183  	resourceQuota := &corev1.ResourceQuota{
   184  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   185  		Status: corev1.ResourceQuotaStatus{
   186  			Hard: corev1.ResourceList{
   187  				corev1.ResourceCPU:    resource.MustParse("3"),
   188  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   189  				corev1.ResourcePods:   resource.MustParse("5"),
   190  			},
   191  			Used: corev1.ResourceList{
   192  				corev1.ResourceCPU:    resource.MustParse("1"),
   193  				corev1.ResourceMemory: resource.MustParse("50Gi"),
   194  				corev1.ResourcePods:   resource.MustParse("3"),
   195  			},
   196  		},
   197  	}
   198  	stopCh := make(chan struct{})
   199  	defer close(stopCh)
   200  
   201  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   202  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   203  
   204  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   205  	if err != nil {
   206  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   207  	}
   208  
   209  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   210  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
   211  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   212  	if err != nil {
   213  		t.Errorf("Unexpected error: %v", err)
   214  	}
   215  	if len(kubeClient.Actions()) == 0 {
   216  		t.Errorf("Expected a client action")
   217  	}
   218  
   219  	expectedActionSet := sets.NewString(
   220  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   221  	)
   222  	actionSet := sets.NewString()
   223  	for _, action := range kubeClient.Actions() {
   224  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   225  	}
   226  	if !actionSet.HasAll(expectedActionSet.List()...) {
   227  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   228  	}
   229  
   230  	decimatedActions := removeListWatch(kubeClient.Actions())
   231  	lastActionIndex := len(decimatedActions) - 1
   232  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   233  	expectedUsage := corev1.ResourceQuota{
   234  		Status: corev1.ResourceQuotaStatus{
   235  			Hard: corev1.ResourceList{
   236  				corev1.ResourceCPU:    resource.MustParse("3"),
   237  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   238  				corev1.ResourcePods:   resource.MustParse("5"),
   239  			},
   240  			Used: corev1.ResourceList{
   241  				corev1.ResourceCPU:    resource.MustParse("1100m"),
   242  				corev1.ResourceMemory: resource.MustParse("52Gi"),
   243  				corev1.ResourcePods:   resource.MustParse("4"),
   244  			},
   245  		},
   246  	}
   247  	for k, v := range expectedUsage.Status.Used {
   248  		actual := usage.Status.Used[k]
   249  		actualValue := actual.String()
   250  		expectedValue := v.String()
   251  		if expectedValue != actualValue {
   252  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   253  		}
   254  	}
   255  }
   256  
   257  // TestAdmitDryRun verifies that a pod when created with dry-run doesn not have its usage reflected on the quota
   258  // and that dry-run requests can still be rejected if they would exceed the quota
   259  func TestAdmitDryRun(t *testing.T) {
   260  	resourceQuota := &corev1.ResourceQuota{
   261  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   262  		Status: corev1.ResourceQuotaStatus{
   263  			Hard: corev1.ResourceList{
   264  				corev1.ResourceCPU:    resource.MustParse("3"),
   265  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   266  				corev1.ResourcePods:   resource.MustParse("5"),
   267  			},
   268  			Used: corev1.ResourceList{
   269  				corev1.ResourceCPU:    resource.MustParse("1"),
   270  				corev1.ResourceMemory: resource.MustParse("50Gi"),
   271  				corev1.ResourcePods:   resource.MustParse("3"),
   272  			},
   273  		},
   274  	}
   275  	stopCh := make(chan struct{})
   276  	defer close(stopCh)
   277  
   278  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   279  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   280  
   281  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   282  	if err != nil {
   283  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   284  	}
   285  
   286  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   287  
   288  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
   289  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, true, nil), nil)
   290  	if err != nil {
   291  		t.Errorf("Unexpected error: %v", err)
   292  	}
   293  
   294  	newPod = validPod("too-large-pod", 1, getResourceRequirements(getResourceList("100m", "60Gi"), getResourceList("", "")))
   295  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, true, nil), nil)
   296  	if err == nil {
   297  		t.Errorf("Expected error but got none")
   298  	}
   299  
   300  	if len(kubeClient.Actions()) != 0 {
   301  		t.Errorf("Expected no client action on dry-run")
   302  	}
   303  }
   304  
   305  // TestAdmitHandlesOldObjects verifies that admit handles updates correctly with old objects
   306  func TestAdmitHandlesOldObjects(t *testing.T) {
   307  	// in this scenario, the old quota was based on a service type=loadbalancer
   308  	resourceQuota := &corev1.ResourceQuota{
   309  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   310  		Status: corev1.ResourceQuotaStatus{
   311  			Hard: corev1.ResourceList{
   312  				corev1.ResourceServices:              resource.MustParse("10"),
   313  				corev1.ResourceServicesLoadBalancers: resource.MustParse("10"),
   314  				corev1.ResourceServicesNodePorts:     resource.MustParse("10"),
   315  			},
   316  			Used: corev1.ResourceList{
   317  				corev1.ResourceServices:              resource.MustParse("1"),
   318  				corev1.ResourceServicesLoadBalancers: resource.MustParse("1"),
   319  				corev1.ResourceServicesNodePorts:     resource.MustParse("0"),
   320  			},
   321  		},
   322  	}
   323  
   324  	// start up quota system
   325  	stopCh := make(chan struct{})
   326  	defer close(stopCh)
   327  
   328  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   329  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   330  
   331  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   332  	if err != nil {
   333  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   334  	}
   335  
   336  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   337  
   338  	// old service was a load balancer, but updated version is a node port.
   339  	existingService := &api.Service{
   340  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
   341  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
   342  	}
   343  	newService := &api.Service{
   344  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
   345  		Spec: api.ServiceSpec{
   346  			Type:  api.ServiceTypeNodePort,
   347  			Ports: []api.ServicePort{{Port: 1234}},
   348  		},
   349  	}
   350  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
   351  	if err != nil {
   352  		t.Errorf("Unexpected error: %v", err)
   353  	}
   354  	if len(kubeClient.Actions()) == 0 {
   355  		t.Errorf("Expected a client action")
   356  	}
   357  
   358  	// the only action should have been to update the quota (since we should not have fetched the previous item)
   359  	expectedActionSet := sets.NewString(
   360  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   361  	)
   362  	actionSet := sets.NewString()
   363  	for _, action := range kubeClient.Actions() {
   364  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   365  	}
   366  	if !actionSet.HasAll(expectedActionSet.List()...) {
   367  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   368  	}
   369  
   370  	// verify usage decremented the loadbalancer, and incremented the nodeport, but kept the service the same.
   371  	decimatedActions := removeListWatch(kubeClient.Actions())
   372  	lastActionIndex := len(decimatedActions) - 1
   373  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   374  
   375  	// Verify service usage. Since we don't add negative values, the corev1.ResourceServicesLoadBalancers
   376  	// will remain on last reported value
   377  	expectedUsage := corev1.ResourceQuota{
   378  		Status: corev1.ResourceQuotaStatus{
   379  			Hard: corev1.ResourceList{
   380  				corev1.ResourceServices:              resource.MustParse("10"),
   381  				corev1.ResourceServicesLoadBalancers: resource.MustParse("10"),
   382  				corev1.ResourceServicesNodePorts:     resource.MustParse("10"),
   383  			},
   384  			Used: corev1.ResourceList{
   385  				corev1.ResourceServices:              resource.MustParse("1"),
   386  				corev1.ResourceServicesLoadBalancers: resource.MustParse("1"),
   387  				corev1.ResourceServicesNodePorts:     resource.MustParse("1"),
   388  			},
   389  		},
   390  	}
   391  	for k, v := range expectedUsage.Status.Used {
   392  		actual := usage.Status.Used[k]
   393  		actualValue := actual.String()
   394  		expectedValue := v.String()
   395  		if expectedValue != actualValue {
   396  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   397  		}
   398  	}
   399  }
   400  
   401  func TestAdmitHandlesNegativePVCUpdates(t *testing.T) {
   402  	resourceQuota := &corev1.ResourceQuota{
   403  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   404  		Status: corev1.ResourceQuotaStatus{
   405  			Hard: corev1.ResourceList{
   406  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"),
   407  				corev1.ResourceRequestsStorage:        resource.MustParse("100Gi"),
   408  			},
   409  			Used: corev1.ResourceList{
   410  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
   411  				corev1.ResourceRequestsStorage:        resource.MustParse("10Gi"),
   412  			},
   413  		},
   414  	}
   415  
   416  	// start up quota system
   417  	stopCh := make(chan struct{})
   418  	defer close(stopCh)
   419  
   420  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   421  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   422  
   423  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   424  	if err != nil {
   425  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   426  	}
   427  
   428  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   429  
   430  	oldPVC := &api.PersistentVolumeClaim{
   431  		ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"},
   432  		Spec: api.PersistentVolumeClaimSpec{
   433  			Resources: getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("10Gi")}, api.ResourceList{}),
   434  		},
   435  	}
   436  
   437  	newPVC := &api.PersistentVolumeClaim{
   438  		ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test"},
   439  		Spec: api.PersistentVolumeClaimSpec{
   440  			Resources: getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("5Gi")}, api.ResourceList{}),
   441  		},
   442  	}
   443  
   444  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
   445  	if err != nil {
   446  		t.Errorf("Unexpected error: %v", err)
   447  	}
   448  	if len(kubeClient.Actions()) != 0 {
   449  		t.Errorf("No client action should be taken in case of negative updates")
   450  	}
   451  }
   452  
   453  func TestAdmitHandlesPVCUpdates(t *testing.T) {
   454  	resourceQuota := &corev1.ResourceQuota{
   455  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   456  		Status: corev1.ResourceQuotaStatus{
   457  			Hard: corev1.ResourceList{
   458  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"),
   459  				corev1.ResourceRequestsStorage:        resource.MustParse("100Gi"),
   460  			},
   461  			Used: corev1.ResourceList{
   462  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
   463  				corev1.ResourceRequestsStorage:        resource.MustParse("10Gi"),
   464  			},
   465  		},
   466  	}
   467  
   468  	// start up quota system
   469  	stopCh := make(chan struct{})
   470  	defer close(stopCh)
   471  
   472  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   473  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   474  
   475  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   476  	if err != nil {
   477  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   478  	}
   479  
   480  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   481  
   482  	oldPVC := &api.PersistentVolumeClaim{
   483  		ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test", ResourceVersion: "1"},
   484  		Spec: api.PersistentVolumeClaimSpec{
   485  			Resources: getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("10Gi")}, api.ResourceList{}),
   486  		},
   487  	}
   488  
   489  	newPVC := &api.PersistentVolumeClaim{
   490  		ObjectMeta: metav1.ObjectMeta{Name: "pvc-to-update", Namespace: "test"},
   491  		Spec: api.PersistentVolumeClaimSpec{
   492  			Resources: getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("15Gi")}, api.ResourceList{}),
   493  		},
   494  	}
   495  
   496  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPVC, oldPVC, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPVC.Namespace, newPVC.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
   497  	if err != nil {
   498  		t.Errorf("Unexpected error: %v", err)
   499  	}
   500  
   501  	if len(kubeClient.Actions()) == 0 {
   502  		t.Errorf("Expected a client action")
   503  	}
   504  
   505  	// the only action should have been to update the quota (since we should not have fetched the previous item)
   506  	expectedActionSet := sets.NewString(
   507  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   508  	)
   509  	actionSet := sets.NewString()
   510  	for _, action := range kubeClient.Actions() {
   511  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   512  	}
   513  	if !actionSet.HasAll(expectedActionSet.List()...) {
   514  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   515  	}
   516  
   517  	decimatedActions := removeListWatch(kubeClient.Actions())
   518  	lastActionIndex := len(decimatedActions) - 1
   519  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   520  	expectedUsage := corev1.ResourceQuota{
   521  		Status: corev1.ResourceQuotaStatus{
   522  			Hard: corev1.ResourceList{
   523  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"),
   524  				corev1.ResourceRequestsStorage:        resource.MustParse("100Gi"),
   525  			},
   526  			Used: corev1.ResourceList{
   527  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
   528  				corev1.ResourceRequestsStorage:        resource.MustParse("15Gi"),
   529  			},
   530  		},
   531  	}
   532  	for k, v := range expectedUsage.Status.Used {
   533  		actual := usage.Status.Used[k]
   534  		actualValue := actual.String()
   535  		expectedValue := v.String()
   536  		if expectedValue != actualValue {
   537  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   538  		}
   539  	}
   540  
   541  }
   542  
   543  // TestAdmitHandlesCreatingUpdates verifies that admit handles updates which behave as creates
   544  func TestAdmitHandlesCreatingUpdates(t *testing.T) {
   545  	// in this scenario, there is an existing service
   546  	resourceQuota := &corev1.ResourceQuota{
   547  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   548  		Status: corev1.ResourceQuotaStatus{
   549  			Hard: corev1.ResourceList{
   550  				corev1.ResourceServices:              resource.MustParse("10"),
   551  				corev1.ResourceServicesLoadBalancers: resource.MustParse("10"),
   552  				corev1.ResourceServicesNodePorts:     resource.MustParse("10"),
   553  			},
   554  			Used: corev1.ResourceList{
   555  				corev1.ResourceServices:              resource.MustParse("1"),
   556  				corev1.ResourceServicesLoadBalancers: resource.MustParse("1"),
   557  				corev1.ResourceServicesNodePorts:     resource.MustParse("0"),
   558  			},
   559  		},
   560  	}
   561  
   562  	// start up quota system
   563  	stopCh := make(chan struct{})
   564  	defer close(stopCh)
   565  
   566  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   567  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   568  
   569  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   570  	if err != nil {
   571  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   572  	}
   573  
   574  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   575  
   576  	// old service didn't exist, so this update is actually a create
   577  	oldService := &api.Service{
   578  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: ""},
   579  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
   580  	}
   581  	newService := &api.Service{
   582  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
   583  		Spec: api.ServiceSpec{
   584  			Type:  api.ServiceTypeNodePort,
   585  			Ports: []api.ServicePort{{Port: 1234}},
   586  		},
   587  	}
   588  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newService, oldService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
   589  	if err != nil {
   590  		t.Errorf("Unexpected error: %v", err)
   591  	}
   592  	if len(kubeClient.Actions()) == 0 {
   593  		t.Errorf("Expected a client action")
   594  	}
   595  
   596  	// the only action should have been to update the quota (since we should not have fetched the previous item)
   597  	expectedActionSet := sets.NewString(
   598  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   599  	)
   600  	actionSet := sets.NewString()
   601  	for _, action := range kubeClient.Actions() {
   602  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   603  	}
   604  	if !actionSet.HasAll(expectedActionSet.List()...) {
   605  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   606  	}
   607  
   608  	// verify that the "old" object was ignored for calculating the new usage
   609  	decimatedActions := removeListWatch(kubeClient.Actions())
   610  	lastActionIndex := len(decimatedActions) - 1
   611  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   612  	expectedUsage := corev1.ResourceQuota{
   613  		Status: corev1.ResourceQuotaStatus{
   614  			Hard: corev1.ResourceList{
   615  				corev1.ResourceServices:              resource.MustParse("10"),
   616  				corev1.ResourceServicesLoadBalancers: resource.MustParse("10"),
   617  				corev1.ResourceServicesNodePorts:     resource.MustParse("10"),
   618  			},
   619  			Used: corev1.ResourceList{
   620  				corev1.ResourceServices:              resource.MustParse("2"),
   621  				corev1.ResourceServicesLoadBalancers: resource.MustParse("1"),
   622  				corev1.ResourceServicesNodePorts:     resource.MustParse("1"),
   623  			},
   624  		},
   625  	}
   626  	for k, v := range expectedUsage.Status.Used {
   627  		actual := usage.Status.Used[k]
   628  		actualValue := actual.String()
   629  		expectedValue := v.String()
   630  		if expectedValue != actualValue {
   631  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   632  		}
   633  	}
   634  }
   635  
   636  // TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
   637  func TestAdmitExceedQuotaLimit(t *testing.T) {
   638  	resourceQuota := &corev1.ResourceQuota{
   639  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   640  		Status: corev1.ResourceQuotaStatus{
   641  			Hard: corev1.ResourceList{
   642  				corev1.ResourceCPU:    resource.MustParse("3"),
   643  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   644  				corev1.ResourcePods:   resource.MustParse("5"),
   645  			},
   646  			Used: corev1.ResourceList{
   647  				corev1.ResourceCPU:    resource.MustParse("1"),
   648  				corev1.ResourceMemory: resource.MustParse("50Gi"),
   649  				corev1.ResourcePods:   resource.MustParse("3"),
   650  			},
   651  		},
   652  	}
   653  	stopCh := make(chan struct{})
   654  	defer close(stopCh)
   655  
   656  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   657  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   658  
   659  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   660  	if err != nil {
   661  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   662  	}
   663  
   664  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   665  	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
   666  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   667  	if err == nil {
   668  		t.Errorf("Expected an error exceeding quota")
   669  	}
   670  }
   671  
   672  // TestAdmitEnforceQuotaConstraints verifies that if a quota tracks a particular resource that that resource is
   673  // specified on the pod.  In this case, we create a quota that tracks cpu request, memory request, and memory limit.
   674  // We ensure that a pod that does not specify a memory limit that it fails in admission.
   675  func TestAdmitEnforceQuotaConstraints(t *testing.T) {
   676  	resourceQuota := &corev1.ResourceQuota{
   677  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
   678  		Status: corev1.ResourceQuotaStatus{
   679  			Hard: corev1.ResourceList{
   680  				corev1.ResourceCPU:          resource.MustParse("3"),
   681  				corev1.ResourceMemory:       resource.MustParse("100Gi"),
   682  				corev1.ResourceLimitsMemory: resource.MustParse("200Gi"),
   683  				corev1.ResourcePods:         resource.MustParse("5"),
   684  			},
   685  			Used: corev1.ResourceList{
   686  				corev1.ResourceCPU:          resource.MustParse("1"),
   687  				corev1.ResourceMemory:       resource.MustParse("50Gi"),
   688  				corev1.ResourceLimitsMemory: resource.MustParse("100Gi"),
   689  				corev1.ResourcePods:         resource.MustParse("3"),
   690  			},
   691  		},
   692  	}
   693  	stopCh := make(chan struct{})
   694  	defer close(stopCh)
   695  
   696  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   697  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   698  
   699  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   700  	if err != nil {
   701  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   702  	}
   703  
   704  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   705  	// verify all values are specified as required on the quota
   706  	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
   707  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   708  	if err == nil {
   709  		t.Errorf("Expected an error because the pod does not specify a memory limit")
   710  	}
   711  }
   712  
   713  // TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in
   714  func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
   715  	resourceQuota := &corev1.ResourceQuota{
   716  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"},
   717  		Status: corev1.ResourceQuotaStatus{
   718  			Hard: corev1.ResourceList{
   719  				corev1.ResourceCPU:          resource.MustParse("3"),
   720  				corev1.ResourceMemory:       resource.MustParse("100Gi"),
   721  				corev1.ResourceLimitsMemory: resource.MustParse("200Gi"),
   722  				corev1.ResourcePods:         resource.MustParse("5"),
   723  			},
   724  			Used: corev1.ResourceList{
   725  				corev1.ResourceCPU:          resource.MustParse("1"),
   726  				corev1.ResourceMemory:       resource.MustParse("50Gi"),
   727  				corev1.ResourceLimitsMemory: resource.MustParse("100Gi"),
   728  				corev1.ResourcePods:         resource.MustParse("3"),
   729  			},
   730  		},
   731  	}
   732  
   733  	stopCh := make(chan struct{})
   734  	defer close(stopCh)
   735  
   736  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   737  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   738  
   739  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   740  	if err != nil {
   741  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   742  	}
   743  
   744  	// Add to the index
   745  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   746  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("200m", "")))
   747  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   748  	if err != nil {
   749  		t.Errorf("Did not expect an error because the pod is in a different namespace than the quota: %v", err)
   750  	}
   751  }
   752  
   753  // TestAdmitBelowTerminatingQuotaLimit ensures that terminating pods are charged to the right quota.
   754  // It creates a terminating and non-terminating quota, and creates a terminating pod.
   755  // It ensures that the terminating quota is incremented, and the non-terminating quota is not.
   756  func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
   757  	resourceQuotaNonTerminating := &corev1.ResourceQuota{
   758  		ObjectMeta: metav1.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"},
   759  		Spec: corev1.ResourceQuotaSpec{
   760  			Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotTerminating},
   761  		},
   762  		Status: corev1.ResourceQuotaStatus{
   763  			Hard: corev1.ResourceList{
   764  				corev1.ResourceCPU:    resource.MustParse("3"),
   765  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   766  				corev1.ResourcePods:   resource.MustParse("5"),
   767  			},
   768  			Used: corev1.ResourceList{
   769  				corev1.ResourceCPU:    resource.MustParse("1"),
   770  				corev1.ResourceMemory: resource.MustParse("50Gi"),
   771  				corev1.ResourcePods:   resource.MustParse("3"),
   772  			},
   773  		},
   774  	}
   775  	resourceQuotaTerminating := &corev1.ResourceQuota{
   776  		ObjectMeta: metav1.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"},
   777  		Spec: corev1.ResourceQuotaSpec{
   778  			Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeTerminating},
   779  		},
   780  		Status: corev1.ResourceQuotaStatus{
   781  			Hard: corev1.ResourceList{
   782  				corev1.ResourceCPU:    resource.MustParse("3"),
   783  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   784  				corev1.ResourcePods:   resource.MustParse("5"),
   785  			},
   786  			Used: corev1.ResourceList{
   787  				corev1.ResourceCPU:    resource.MustParse("1"),
   788  				corev1.ResourceMemory: resource.MustParse("50Gi"),
   789  				corev1.ResourcePods:   resource.MustParse("3"),
   790  			},
   791  		},
   792  	}
   793  	stopCh := make(chan struct{})
   794  	defer close(stopCh)
   795  
   796  	kubeClient := fake.NewSimpleClientset(resourceQuotaTerminating, resourceQuotaNonTerminating)
   797  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   798  
   799  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   800  	if err != nil {
   801  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   802  	}
   803  
   804  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNonTerminating)
   805  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaTerminating)
   806  
   807  	// create a pod that has an active deadline
   808  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "2Gi"), getResourceList("", "")))
   809  	activeDeadlineSeconds := int64(30)
   810  	newPod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
   811  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   812  	if err != nil {
   813  		t.Errorf("Unexpected error: %v", err)
   814  	}
   815  	if len(kubeClient.Actions()) == 0 {
   816  		t.Errorf("Expected a client action")
   817  	}
   818  
   819  	expectedActionSet := sets.NewString(
   820  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   821  	)
   822  	actionSet := sets.NewString()
   823  	for _, action := range kubeClient.Actions() {
   824  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   825  	}
   826  	if !actionSet.HasAll(expectedActionSet.List()...) {
   827  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   828  	}
   829  
   830  	decimatedActions := removeListWatch(kubeClient.Actions())
   831  	lastActionIndex := len(decimatedActions) - 1
   832  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   833  
   834  	// ensure only the quota-terminating was updated
   835  	if usage.Name != resourceQuotaTerminating.Name {
   836  		t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaTerminating.Name, usage.Name)
   837  	}
   838  
   839  	expectedUsage := corev1.ResourceQuota{
   840  		Status: corev1.ResourceQuotaStatus{
   841  			Hard: corev1.ResourceList{
   842  				corev1.ResourceCPU:    resource.MustParse("3"),
   843  				corev1.ResourceMemory: resource.MustParse("100Gi"),
   844  				corev1.ResourcePods:   resource.MustParse("5"),
   845  			},
   846  			Used: corev1.ResourceList{
   847  				corev1.ResourceCPU:    resource.MustParse("1100m"),
   848  				corev1.ResourceMemory: resource.MustParse("52Gi"),
   849  				corev1.ResourcePods:   resource.MustParse("4"),
   850  			},
   851  		},
   852  	}
   853  	for k, v := range expectedUsage.Status.Used {
   854  		actual := usage.Status.Used[k]
   855  		actualValue := actual.String()
   856  		expectedValue := v.String()
   857  		if expectedValue != actualValue {
   858  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   859  		}
   860  	}
   861  }
   862  
   863  // TestAdmitBelowBestEffortQuotaLimit creates a best effort and non-best effort quota.
   864  // It verifies that best effort pods are properly scoped to the best effort quota document.
   865  func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) {
   866  	resourceQuotaBestEffort := &corev1.ResourceQuota{
   867  		ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
   868  		Spec: corev1.ResourceQuotaSpec{
   869  			Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort},
   870  		},
   871  		Status: corev1.ResourceQuotaStatus{
   872  			Hard: corev1.ResourceList{
   873  				corev1.ResourcePods: resource.MustParse("5"),
   874  			},
   875  			Used: corev1.ResourceList{
   876  				corev1.ResourcePods: resource.MustParse("3"),
   877  			},
   878  		},
   879  	}
   880  	resourceQuotaNotBestEffort := &corev1.ResourceQuota{
   881  		ObjectMeta: metav1.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"},
   882  		Spec: corev1.ResourceQuotaSpec{
   883  			Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotBestEffort},
   884  		},
   885  		Status: corev1.ResourceQuotaStatus{
   886  			Hard: corev1.ResourceList{
   887  				corev1.ResourcePods: resource.MustParse("5"),
   888  			},
   889  			Used: corev1.ResourceList{
   890  				corev1.ResourcePods: resource.MustParse("3"),
   891  			},
   892  		},
   893  	}
   894  	stopCh := make(chan struct{})
   895  	defer close(stopCh)
   896  
   897  	kubeClient := fake.NewSimpleClientset(resourceQuotaBestEffort, resourceQuotaNotBestEffort)
   898  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   899  
   900  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   901  	if err != nil {
   902  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   903  	}
   904  
   905  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaBestEffort)
   906  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuotaNotBestEffort)
   907  
   908  	// create a pod that is best effort because it does not make a request for anything
   909  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")))
   910  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   911  	if err != nil {
   912  		t.Errorf("Unexpected error: %v", err)
   913  	}
   914  	expectedActionSet := sets.NewString(
   915  		strings.Join([]string{"update", "resourcequotas", "status"}, "-"),
   916  	)
   917  	actionSet := sets.NewString()
   918  	for _, action := range kubeClient.Actions() {
   919  		actionSet.Insert(strings.Join([]string{action.GetVerb(), action.GetResource().Resource, action.GetSubresource()}, "-"))
   920  	}
   921  	if !actionSet.HasAll(expectedActionSet.List()...) {
   922  		t.Errorf("Expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet, actionSet, expectedActionSet.Difference(actionSet))
   923  	}
   924  	decimatedActions := removeListWatch(kubeClient.Actions())
   925  	lastActionIndex := len(decimatedActions) - 1
   926  	usage := decimatedActions[lastActionIndex].(testcore.UpdateAction).GetObject().(*corev1.ResourceQuota)
   927  
   928  	if usage.Name != resourceQuotaBestEffort.Name {
   929  		t.Errorf("Incremented the wrong quota, expected %v, actual %v", resourceQuotaBestEffort.Name, usage.Name)
   930  	}
   931  
   932  	expectedUsage := corev1.ResourceQuota{
   933  		Status: corev1.ResourceQuotaStatus{
   934  			Hard: corev1.ResourceList{
   935  				corev1.ResourcePods: resource.MustParse("5"),
   936  			},
   937  			Used: corev1.ResourceList{
   938  				corev1.ResourcePods: resource.MustParse("4"),
   939  			},
   940  		},
   941  	}
   942  	for k, v := range expectedUsage.Status.Used {
   943  		actual := usage.Status.Used[k]
   944  		actualValue := actual.String()
   945  		expectedValue := v.String()
   946  		if expectedValue != actualValue {
   947  			t.Errorf("Usage Used: Key: %v, Expected: %v, Actual: %v", k, expectedValue, actualValue)
   948  		}
   949  	}
   950  }
   951  
   952  func removeListWatch(in []testcore.Action) []testcore.Action {
   953  	decimatedActions := []testcore.Action{}
   954  	// list and watch resource quota is done to maintain our cache, so that's expected.  Remove them from results
   955  	for i := range in {
   956  		if in[i].Matches("list", "resourcequotas") || in[i].Matches("watch", "resourcequotas") {
   957  			continue
   958  		}
   959  
   960  		decimatedActions = append(decimatedActions, in[i])
   961  	}
   962  	return decimatedActions
   963  }
   964  
   965  // TestAdmitBestEffortQuotaLimitIgnoresBurstable validates that a besteffort quota does not match a resource
   966  // guaranteed pod.
   967  func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
   968  	resourceQuota := &corev1.ResourceQuota{
   969  		ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
   970  		Spec: corev1.ResourceQuotaSpec{
   971  			Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort},
   972  		},
   973  		Status: corev1.ResourceQuotaStatus{
   974  			Hard: corev1.ResourceList{
   975  				corev1.ResourcePods: resource.MustParse("5"),
   976  			},
   977  			Used: corev1.ResourceList{
   978  				corev1.ResourcePods: resource.MustParse("3"),
   979  			},
   980  		},
   981  	}
   982  	stopCh := make(chan struct{})
   983  	defer close(stopCh)
   984  
   985  	kubeClient := fake.NewSimpleClientset(resourceQuota)
   986  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
   987  
   988  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
   989  	if err != nil {
   990  		t.Errorf("Error occurred while creating admission plugin: %v", err)
   991  	}
   992  
   993  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
   994  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")))
   995  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
   996  	if err != nil {
   997  		t.Errorf("Unexpected error: %v", err)
   998  	}
   999  
  1000  	decimatedActions := removeListWatch(kubeClient.Actions())
  1001  	if len(decimatedActions) != 0 {
  1002  		t.Errorf("Expected no client actions because the incoming pod did not match best effort quota: %v", kubeClient.Actions())
  1003  	}
  1004  }
  1005  
  1006  // TestAdmissionSetsMissingNamespace verifies that if an object lacks a
  1007  // namespace, it will be set.
  1008  func TestAdmissionSetsMissingNamespace(t *testing.T) {
  1009  	namespace := "test"
  1010  	resourceQuota := &corev1.ResourceQuota{
  1011  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
  1012  		Status: corev1.ResourceQuotaStatus{
  1013  			Hard: corev1.ResourceList{
  1014  				corev1.ResourcePods: resource.MustParse("3"),
  1015  			},
  1016  			Used: corev1.ResourceList{
  1017  				corev1.ResourcePods: resource.MustParse("1"),
  1018  			},
  1019  		},
  1020  	}
  1021  
  1022  	stopCh := make(chan struct{})
  1023  	defer close(stopCh)
  1024  
  1025  	kubeClient := fake.NewSimpleClientset(resourceQuota)
  1026  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1027  
  1028  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
  1029  	if err != nil {
  1030  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1031  	}
  1032  
  1033  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
  1034  	newPod := validPod("pod-without-namespace", 1, getResourceRequirements(getResourceList("1", "2Gi"), getResourceList("", "")))
  1035  
  1036  	// unset the namespace
  1037  	newPod.ObjectMeta.Namespace = ""
  1038  
  1039  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1040  	if err != nil {
  1041  		t.Errorf("Got unexpected error: %v", err)
  1042  	}
  1043  	if newPod.Namespace != namespace {
  1044  		t.Errorf("Got unexpected pod namespace: %q != %q", newPod.Namespace, namespace)
  1045  	}
  1046  }
  1047  
  1048  // TestAdmitRejectsNegativeUsage verifies that usage for any measured resource cannot be negative.
  1049  func TestAdmitRejectsNegativeUsage(t *testing.T) {
  1050  	resourceQuota := &corev1.ResourceQuota{
  1051  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1052  		Status: corev1.ResourceQuotaStatus{
  1053  			Hard: corev1.ResourceList{
  1054  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("3"),
  1055  				corev1.ResourceRequestsStorage:        resource.MustParse("100Gi"),
  1056  			},
  1057  			Used: corev1.ResourceList{
  1058  				corev1.ResourcePersistentVolumeClaims: resource.MustParse("1"),
  1059  				corev1.ResourceRequestsStorage:        resource.MustParse("10Gi"),
  1060  			},
  1061  		},
  1062  	}
  1063  	stopCh := make(chan struct{})
  1064  	defer close(stopCh)
  1065  
  1066  	kubeClient := fake.NewSimpleClientset(resourceQuota)
  1067  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1068  
  1069  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
  1070  	if err != nil {
  1071  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1072  	}
  1073  
  1074  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
  1075  	// verify quota rejects negative pvc storage requests
  1076  	newPvc := validPersistentVolumeClaim("not-allowed-pvc", getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("-1Gi")}, api.ResourceList{}))
  1077  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1078  	if err == nil {
  1079  		t.Errorf("Expected an error because the pvc has negative storage usage")
  1080  	}
  1081  
  1082  	// verify quota accepts non-negative pvc storage requests
  1083  	newPvc = validPersistentVolumeClaim("not-allowed-pvc", getVolumeResourceRequirements(api.ResourceList{api.ResourceStorage: resource.MustParse("1Gi")}, api.ResourceList{}))
  1084  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPvc, nil, api.Kind("PersistentVolumeClaim").WithVersion("version"), newPvc.Namespace, newPvc.Name, corev1.Resource("persistentvolumeclaims").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1085  	if err != nil {
  1086  		t.Errorf("Unexpected error: %v", err)
  1087  	}
  1088  }
  1089  
  1090  // TestAdmitWhenUnrelatedResourceExceedsQuota verifies that if resource X exceeds quota, it does not prohibit resource Y from admission.
  1091  func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) {
  1092  	resourceQuota := &corev1.ResourceQuota{
  1093  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1094  		Status: corev1.ResourceQuotaStatus{
  1095  			Hard: corev1.ResourceList{
  1096  				corev1.ResourceServices: resource.MustParse("3"),
  1097  				corev1.ResourcePods:     resource.MustParse("4"),
  1098  			},
  1099  			Used: corev1.ResourceList{
  1100  				corev1.ResourceServices: resource.MustParse("4"),
  1101  				corev1.ResourcePods:     resource.MustParse("1"),
  1102  			},
  1103  		},
  1104  	}
  1105  	stopCh := make(chan struct{})
  1106  	defer close(stopCh)
  1107  
  1108  	kubeClient := fake.NewSimpleClientset(resourceQuota)
  1109  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1110  
  1111  	handler, err := createHandler(kubeClient, informerFactory, stopCh)
  1112  	if err != nil {
  1113  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1114  	}
  1115  
  1116  	informerFactory.Core().V1().ResourceQuotas().Informer().GetIndexer().Add(resourceQuota)
  1117  
  1118  	// create a pod that should pass existing quota
  1119  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")))
  1120  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1121  	if err != nil {
  1122  		t.Errorf("Unexpected error: %v", err)
  1123  	}
  1124  }
  1125  
  1126  // TestAdmitLimitedResourceNoQuota verifies if a limited resource is configured with no quota, it cannot be consumed.
  1127  func TestAdmitLimitedResourceNoQuota(t *testing.T) {
  1128  	kubeClient := fake.NewSimpleClientset()
  1129  	stopCh := make(chan struct{})
  1130  	defer close(stopCh)
  1131  
  1132  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1133  
  1134  	// disable consumption of cpu unless there is a covering quota.
  1135  	config := &resourcequotaapi.Configuration{
  1136  		LimitedResources: []resourcequotaapi.LimitedResource{
  1137  			{
  1138  				Resource:      "pods",
  1139  				MatchContains: []string{"cpu"},
  1140  			},
  1141  		},
  1142  	}
  1143  
  1144  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1145  	if err != nil {
  1146  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1147  	}
  1148  
  1149  	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
  1150  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1151  	if err == nil {
  1152  		t.Errorf("Expected an error for consuming a limited resource without quota.")
  1153  	}
  1154  }
  1155  
  1156  // TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources shows it ignores non matching resources in config.
  1157  func TestAdmitLimitedResourceNoQuotaIgnoresNonMatchingResources(t *testing.T) {
  1158  	kubeClient := fake.NewSimpleClientset()
  1159  	stopCh := make(chan struct{})
  1160  	defer close(stopCh)
  1161  
  1162  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1163  
  1164  	// disable consumption of cpu unless there is a covering quota.
  1165  	config := &resourcequotaapi.Configuration{
  1166  		LimitedResources: []resourcequotaapi.LimitedResource{
  1167  			{
  1168  				Resource:      "services",
  1169  				MatchContains: []string{"services"},
  1170  			},
  1171  		},
  1172  	}
  1173  
  1174  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1175  	if err != nil {
  1176  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1177  	}
  1178  
  1179  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
  1180  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1181  	if err != nil {
  1182  		t.Fatalf("Unexpected error: %v", err)
  1183  	}
  1184  }
  1185  
  1186  // TestAdmitLimitedResourceWithQuota verifies if a limited resource is configured with quota, it can be consumed.
  1187  func TestAdmitLimitedResourceWithQuota(t *testing.T) {
  1188  	resourceQuota := &corev1.ResourceQuota{
  1189  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1190  		Status: corev1.ResourceQuotaStatus{
  1191  			Hard: corev1.ResourceList{
  1192  				corev1.ResourceRequestsCPU: resource.MustParse("10"),
  1193  			},
  1194  			Used: corev1.ResourceList{
  1195  				corev1.ResourceRequestsCPU: resource.MustParse("1"),
  1196  			},
  1197  		},
  1198  	}
  1199  	kubeClient := fake.NewSimpleClientset(resourceQuota)
  1200  	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
  1201  	stopCh := make(chan struct{})
  1202  	defer close(stopCh)
  1203  
  1204  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1205  
  1206  	// disable consumption of cpu unless there is a covering quota.
  1207  	// disable consumption of cpu unless there is a covering quota.
  1208  	config := &resourcequotaapi.Configuration{
  1209  		LimitedResources: []resourcequotaapi.LimitedResource{
  1210  			{
  1211  				Resource:      "pods",
  1212  				MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only
  1213  			},
  1214  		},
  1215  	}
  1216  
  1217  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1218  	if err != nil {
  1219  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1220  	}
  1221  
  1222  	indexer.Add(resourceQuota)
  1223  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
  1224  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1225  	if err != nil {
  1226  		t.Errorf("unexpected error: %v", err)
  1227  	}
  1228  }
  1229  
  1230  // TestAdmitLimitedResourceWithMultipleQuota verifies if a limited resource is configured with quota, it can be consumed if one matches.
  1231  func TestAdmitLimitedResourceWithMultipleQuota(t *testing.T) {
  1232  	resourceQuota1 := &corev1.ResourceQuota{
  1233  		ObjectMeta: metav1.ObjectMeta{Name: "quota1", Namespace: "test", ResourceVersion: "124"},
  1234  		Status: corev1.ResourceQuotaStatus{
  1235  			Hard: corev1.ResourceList{
  1236  				corev1.ResourceRequestsCPU: resource.MustParse("10"),
  1237  			},
  1238  			Used: corev1.ResourceList{
  1239  				corev1.ResourceRequestsCPU: resource.MustParse("1"),
  1240  			},
  1241  		},
  1242  	}
  1243  	resourceQuota2 := &corev1.ResourceQuota{
  1244  		ObjectMeta: metav1.ObjectMeta{Name: "quota2", Namespace: "test", ResourceVersion: "124"},
  1245  		Status: corev1.ResourceQuotaStatus{
  1246  			Hard: corev1.ResourceList{
  1247  				corev1.ResourceMemory: resource.MustParse("10Gi"),
  1248  			},
  1249  			Used: corev1.ResourceList{
  1250  				corev1.ResourceMemory: resource.MustParse("1Gi"),
  1251  			},
  1252  		},
  1253  	}
  1254  	kubeClient := fake.NewSimpleClientset(resourceQuota1, resourceQuota2)
  1255  	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
  1256  	stopCh := make(chan struct{})
  1257  	defer close(stopCh)
  1258  
  1259  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1260  
  1261  	// disable consumption of cpu unless there is a covering quota.
  1262  	// disable consumption of cpu unless there is a covering quota.
  1263  	config := &resourcequotaapi.Configuration{
  1264  		LimitedResources: []resourcequotaapi.LimitedResource{
  1265  			{
  1266  				Resource:      "pods",
  1267  				MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only
  1268  			},
  1269  		},
  1270  	}
  1271  
  1272  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1273  	if err != nil {
  1274  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1275  	}
  1276  
  1277  	indexer.Add(resourceQuota1)
  1278  	indexer.Add(resourceQuota2)
  1279  	newPod := validPod("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
  1280  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1281  	if err != nil {
  1282  		t.Errorf("unexpected error: %v", err)
  1283  	}
  1284  }
  1285  
  1286  // TestAdmitLimitedResourceWithQuotaThatDoesNotCover verifies if a limited resource is configured the quota must cover the resource.
  1287  func TestAdmitLimitedResourceWithQuotaThatDoesNotCover(t *testing.T) {
  1288  	resourceQuota := &corev1.ResourceQuota{
  1289  		ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1290  		Status: corev1.ResourceQuotaStatus{
  1291  			Hard: corev1.ResourceList{
  1292  				corev1.ResourceMemory: resource.MustParse("10Gi"),
  1293  			},
  1294  			Used: corev1.ResourceList{
  1295  				corev1.ResourceMemory: resource.MustParse("1Gi"),
  1296  			},
  1297  		},
  1298  	}
  1299  	kubeClient := fake.NewSimpleClientset(resourceQuota)
  1300  	indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
  1301  	stopCh := make(chan struct{})
  1302  	defer close(stopCh)
  1303  
  1304  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1305  
  1306  	// disable consumption of cpu unless there is a covering quota.
  1307  	// disable consumption of cpu unless there is a covering quota.
  1308  	config := &resourcequotaapi.Configuration{
  1309  		LimitedResources: []resourcequotaapi.LimitedResource{
  1310  			{
  1311  				Resource:      "pods",
  1312  				MatchContains: []string{"cpu"}, // match on "cpu" only
  1313  			},
  1314  		},
  1315  	}
  1316  
  1317  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1318  	if err != nil {
  1319  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  1320  	}
  1321  
  1322  	indexer.Add(resourceQuota)
  1323  	newPod := validPod("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")))
  1324  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1325  	if err == nil {
  1326  		t.Fatalf("Expected an error since the quota did not cover cpu")
  1327  	}
  1328  }
  1329  
  1330  // TestAdmitLimitedScopeWithQuota verifies if a limited scope is configured the quota must cover the resource.
  1331  func TestAdmitLimitedScopeWithCoverQuota(t *testing.T) {
  1332  	testCases := []struct {
  1333  		description  string
  1334  		testPod      *api.Pod
  1335  		quota        *corev1.ResourceQuota
  1336  		anotherQuota *corev1.ResourceQuota
  1337  		config       *resourcequotaapi.Configuration
  1338  		expErr       string
  1339  	}{
  1340  		{
  1341  			description: "Covering quota exists for configured limited scope PriorityClassNameExists.",
  1342  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
  1343  			quota: &corev1.ResourceQuota{
  1344  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1345  				Spec: corev1.ResourceQuotaSpec{
  1346  					ScopeSelector: &corev1.ScopeSelector{
  1347  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1348  							{
  1349  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1350  								Operator:  corev1.ScopeSelectorOpExists},
  1351  						},
  1352  					},
  1353  				},
  1354  			},
  1355  			config: &resourcequotaapi.Configuration{
  1356  				LimitedResources: []resourcequotaapi.LimitedResource{
  1357  					{
  1358  						Resource: "pods",
  1359  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1360  							{
  1361  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1362  								Operator:  corev1.ScopeSelectorOpExists,
  1363  							},
  1364  						},
  1365  					},
  1366  				},
  1367  			},
  1368  			expErr: "",
  1369  		},
  1370  		{
  1371  			description: "configured limited scope PriorityClassNameExists and limited cpu resource. No covering quota for cpu and pod admit fails.",
  1372  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
  1373  			quota: &corev1.ResourceQuota{
  1374  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1375  				Spec: corev1.ResourceQuotaSpec{
  1376  					ScopeSelector: &corev1.ScopeSelector{
  1377  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1378  							{
  1379  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1380  								Operator:  corev1.ScopeSelectorOpExists},
  1381  						},
  1382  					},
  1383  				},
  1384  			},
  1385  			config: &resourcequotaapi.Configuration{
  1386  				LimitedResources: []resourcequotaapi.LimitedResource{
  1387  					{
  1388  						Resource: "pods",
  1389  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1390  							{
  1391  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1392  								Operator:  corev1.ScopeSelectorOpExists,
  1393  							},
  1394  						},
  1395  						MatchContains: []string{"requests.cpu"}, // match on "requests.cpu" only
  1396  					},
  1397  				},
  1398  			},
  1399  			expErr: "insufficient quota to consume: requests.cpu",
  1400  		},
  1401  		{
  1402  			description: "Covering quota does not exist for configured limited scope PriorityClassNameExists.",
  1403  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
  1404  			quota:       &corev1.ResourceQuota{},
  1405  			config: &resourcequotaapi.Configuration{
  1406  				LimitedResources: []resourcequotaapi.LimitedResource{
  1407  					{
  1408  						Resource: "pods",
  1409  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1410  							{
  1411  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1412  								Operator:  corev1.ScopeSelectorOpExists,
  1413  							},
  1414  						},
  1415  					},
  1416  				},
  1417  			},
  1418  			expErr: "insufficient quota to match these scopes: [{PriorityClass Exists []}]",
  1419  		},
  1420  		{
  1421  			description: "Covering quota does not exist for configured limited scope resourceQuotaBestEffort",
  1422  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
  1423  			quota:       &corev1.ResourceQuota{},
  1424  			config: &resourcequotaapi.Configuration{
  1425  				LimitedResources: []resourcequotaapi.LimitedResource{
  1426  					{
  1427  						Resource: "pods",
  1428  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1429  							{
  1430  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1431  								Operator:  corev1.ScopeSelectorOpExists,
  1432  							},
  1433  						},
  1434  					},
  1435  				},
  1436  			},
  1437  			expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
  1438  		},
  1439  		{
  1440  			description: "Covering quota exist for configured limited scope resourceQuotaBestEffort",
  1441  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
  1442  			quota: &corev1.ResourceQuota{
  1443  				ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
  1444  				Spec: corev1.ResourceQuotaSpec{
  1445  					Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort},
  1446  				},
  1447  				Status: corev1.ResourceQuotaStatus{
  1448  					Hard: corev1.ResourceList{
  1449  						corev1.ResourcePods: resource.MustParse("5"),
  1450  					},
  1451  					Used: corev1.ResourceList{
  1452  						corev1.ResourcePods: resource.MustParse("3"),
  1453  					},
  1454  				},
  1455  			},
  1456  			config: &resourcequotaapi.Configuration{
  1457  				LimitedResources: []resourcequotaapi.LimitedResource{
  1458  					{
  1459  						Resource: "pods",
  1460  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1461  							{
  1462  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1463  								Operator:  corev1.ScopeSelectorOpExists,
  1464  							},
  1465  						},
  1466  					},
  1467  				},
  1468  			},
  1469  			expErr: "",
  1470  		},
  1471  		{
  1472  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Neither matches pod. Pod allowed",
  1473  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "fake-priority"),
  1474  			quota:       &corev1.ResourceQuota{},
  1475  			config: &resourcequotaapi.Configuration{
  1476  				LimitedResources: []resourcequotaapi.LimitedResource{
  1477  					{
  1478  						Resource: "pods",
  1479  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1480  							{
  1481  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1482  								Operator:  corev1.ScopeSelectorOpExists,
  1483  							},
  1484  						},
  1485  					},
  1486  					{
  1487  						Resource: "pods",
  1488  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1489  							{
  1490  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1491  								Operator:  corev1.ScopeSelectorOpIn,
  1492  								Values:    []string{"cluster-services"},
  1493  							},
  1494  						},
  1495  					},
  1496  				},
  1497  			},
  1498  			expErr: "",
  1499  		},
  1500  		{
  1501  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only BestEffort scope matches pod. Pod admit fails because covering quota is missing for BestEffort scope",
  1502  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "fake-priority"),
  1503  			quota:       &corev1.ResourceQuota{},
  1504  			config: &resourcequotaapi.Configuration{
  1505  				LimitedResources: []resourcequotaapi.LimitedResource{
  1506  					{
  1507  						Resource: "pods",
  1508  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1509  							{
  1510  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1511  								Operator:  corev1.ScopeSelectorOpExists,
  1512  							},
  1513  						},
  1514  					},
  1515  					{
  1516  						Resource: "pods",
  1517  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1518  							{
  1519  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1520  								Operator:  corev1.ScopeSelectorOpIn,
  1521  								Values:    []string{"cluster-services"},
  1522  							},
  1523  						},
  1524  					},
  1525  				},
  1526  			},
  1527  			expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
  1528  		},
  1529  		{
  1530  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Only PriorityClass scope matches pod. Pod admit fails because covering quota is missing for PriorityClass scope",
  1531  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", "")), "cluster-services"),
  1532  			quota:       &corev1.ResourceQuota{},
  1533  			config: &resourcequotaapi.Configuration{
  1534  				LimitedResources: []resourcequotaapi.LimitedResource{
  1535  					{
  1536  						Resource: "pods",
  1537  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1538  							{
  1539  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1540  								Operator:  corev1.ScopeSelectorOpExists,
  1541  							},
  1542  						},
  1543  					},
  1544  					{
  1545  						Resource: "pods",
  1546  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1547  							{
  1548  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1549  								Operator:  corev1.ScopeSelectorOpIn,
  1550  								Values:    []string{"cluster-services"},
  1551  							},
  1552  						},
  1553  					},
  1554  				},
  1555  			},
  1556  			expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
  1557  		},
  1558  		{
  1559  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Pod admit fails because covering quota is missing for PriorityClass scope and BestEffort scope",
  1560  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
  1561  			quota:       &corev1.ResourceQuota{},
  1562  			config: &resourcequotaapi.Configuration{
  1563  				LimitedResources: []resourcequotaapi.LimitedResource{
  1564  					{
  1565  						Resource: "pods",
  1566  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1567  							{
  1568  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1569  								Operator:  corev1.ScopeSelectorOpExists,
  1570  							},
  1571  						},
  1572  					},
  1573  					{
  1574  						Resource: "pods",
  1575  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1576  							{
  1577  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1578  								Operator:  corev1.ScopeSelectorOpIn,
  1579  								Values:    []string{"cluster-services"},
  1580  							},
  1581  						},
  1582  					},
  1583  				},
  1584  			},
  1585  			expErr: "insufficient quota to match these scopes: [{BestEffort Exists []} {PriorityClass In [cluster-services]}]",
  1586  		},
  1587  		{
  1588  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for BestEffort scope. Pod admit fails because covering quota is missing for PriorityClass scope",
  1589  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
  1590  			quota: &corev1.ResourceQuota{
  1591  				ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
  1592  				Spec: corev1.ResourceQuotaSpec{
  1593  					Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort},
  1594  				},
  1595  				Status: corev1.ResourceQuotaStatus{
  1596  					Hard: corev1.ResourceList{
  1597  						corev1.ResourcePods: resource.MustParse("5"),
  1598  					},
  1599  					Used: corev1.ResourceList{
  1600  						corev1.ResourcePods: resource.MustParse("3"),
  1601  					},
  1602  				},
  1603  			},
  1604  			config: &resourcequotaapi.Configuration{
  1605  				LimitedResources: []resourcequotaapi.LimitedResource{
  1606  					{
  1607  						Resource: "pods",
  1608  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1609  							{
  1610  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1611  								Operator:  corev1.ScopeSelectorOpExists,
  1612  							},
  1613  						},
  1614  					},
  1615  					{
  1616  						Resource: "pods",
  1617  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1618  							{
  1619  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1620  								Operator:  corev1.ScopeSelectorOpIn,
  1621  								Values:    []string{"cluster-services"},
  1622  							},
  1623  						},
  1624  					},
  1625  				},
  1626  			},
  1627  			expErr: "insufficient quota to match these scopes: [{PriorityClass In [cluster-services]}]",
  1628  		},
  1629  		{
  1630  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for PriorityClass scope. Pod admit fails because covering quota is missing for BestEffort scope",
  1631  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
  1632  			quota: &corev1.ResourceQuota{
  1633  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1634  				Spec: corev1.ResourceQuotaSpec{
  1635  					ScopeSelector: &corev1.ScopeSelector{
  1636  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1637  							{
  1638  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1639  								Operator:  corev1.ScopeSelectorOpIn,
  1640  								Values:    []string{"cluster-services"},
  1641  							},
  1642  						},
  1643  					},
  1644  				},
  1645  			},
  1646  			config: &resourcequotaapi.Configuration{
  1647  				LimitedResources: []resourcequotaapi.LimitedResource{
  1648  					{
  1649  						Resource: "pods",
  1650  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1651  							{
  1652  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1653  								Operator:  corev1.ScopeSelectorOpExists,
  1654  							},
  1655  						},
  1656  					},
  1657  					{
  1658  						Resource: "pods",
  1659  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1660  							{
  1661  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1662  								Operator:  corev1.ScopeSelectorOpIn,
  1663  								Values:    []string{"cluster-services"},
  1664  							},
  1665  						},
  1666  					},
  1667  				},
  1668  			},
  1669  			expErr: "insufficient quota to match these scopes: [{BestEffort Exists []}]",
  1670  		},
  1671  		{
  1672  			description: "Two scopes,BestEffort and PriorityClassIN, in two LimitedResources. Both the scopes matches pod. Quota available only for both the scopes. Pod admit success. No Error",
  1673  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("", ""), getResourceList("", "")), "cluster-services"),
  1674  			quota: &corev1.ResourceQuota{
  1675  				ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
  1676  				Spec: corev1.ResourceQuotaSpec{
  1677  					Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeBestEffort},
  1678  				},
  1679  				Status: corev1.ResourceQuotaStatus{
  1680  					Hard: corev1.ResourceList{
  1681  						corev1.ResourcePods: resource.MustParse("5"),
  1682  					},
  1683  					Used: corev1.ResourceList{
  1684  						corev1.ResourcePods: resource.MustParse("3"),
  1685  					},
  1686  				},
  1687  			},
  1688  			anotherQuota: &corev1.ResourceQuota{
  1689  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1690  				Spec: corev1.ResourceQuotaSpec{
  1691  					ScopeSelector: &corev1.ScopeSelector{
  1692  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1693  							{
  1694  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1695  								Operator:  corev1.ScopeSelectorOpIn,
  1696  								Values:    []string{"cluster-services"},
  1697  							},
  1698  						},
  1699  					},
  1700  				},
  1701  			},
  1702  			config: &resourcequotaapi.Configuration{
  1703  				LimitedResources: []resourcequotaapi.LimitedResource{
  1704  					{
  1705  						Resource: "pods",
  1706  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1707  							{
  1708  								ScopeName: corev1.ResourceQuotaScopeBestEffort,
  1709  								Operator:  corev1.ScopeSelectorOpExists,
  1710  							},
  1711  						},
  1712  					},
  1713  					{
  1714  						Resource: "pods",
  1715  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1716  							{
  1717  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1718  								Operator:  corev1.ScopeSelectorOpIn,
  1719  								Values:    []string{"cluster-services"},
  1720  							},
  1721  						},
  1722  					},
  1723  				},
  1724  			},
  1725  			expErr: "",
  1726  		},
  1727  		{
  1728  			description: "Pod allowed with priorityclass if limited scope PriorityClassNameExists not configured.",
  1729  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
  1730  			quota:       &corev1.ResourceQuota{},
  1731  			config:      &resourcequotaapi.Configuration{},
  1732  			expErr:      "",
  1733  		},
  1734  		{
  1735  			description: "quota fails, though covering quota for configured limited scope PriorityClassNameExists exists.",
  1736  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "20Gi"), getResourceList("", "")), "fake-priority"),
  1737  			quota: &corev1.ResourceQuota{
  1738  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1739  				Spec: corev1.ResourceQuotaSpec{
  1740  					ScopeSelector: &corev1.ScopeSelector{
  1741  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1742  							{
  1743  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1744  								Operator:  corev1.ScopeSelectorOpExists},
  1745  						},
  1746  					},
  1747  				},
  1748  				Status: corev1.ResourceQuotaStatus{
  1749  					Hard: corev1.ResourceList{
  1750  						corev1.ResourceMemory: resource.MustParse("10Gi"),
  1751  					},
  1752  					Used: corev1.ResourceList{
  1753  						corev1.ResourceMemory: resource.MustParse("1Gi"),
  1754  					},
  1755  				},
  1756  			},
  1757  			config: &resourcequotaapi.Configuration{
  1758  				LimitedResources: []resourcequotaapi.LimitedResource{
  1759  					{
  1760  						Resource: "pods",
  1761  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1762  							{
  1763  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1764  								Operator:  corev1.ScopeSelectorOpExists,
  1765  							},
  1766  						},
  1767  					},
  1768  				},
  1769  			},
  1770  			expErr: "forbidden: exceeded quota: quota, requested: memory=20Gi, used: memory=1Gi, limited: memory=10Gi",
  1771  		},
  1772  		{
  1773  			description: "Pod has different priorityclass than configured limited. Covering quota exists for configured limited scope PriorityClassIn.",
  1774  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "fake-priority"),
  1775  			quota: &corev1.ResourceQuota{
  1776  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1777  				Spec: corev1.ResourceQuotaSpec{
  1778  					ScopeSelector: &corev1.ScopeSelector{
  1779  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1780  							{
  1781  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1782  								Operator:  corev1.ScopeSelectorOpIn,
  1783  								Values:    []string{"cluster-services"},
  1784  							},
  1785  						},
  1786  					},
  1787  				},
  1788  			},
  1789  			config: &resourcequotaapi.Configuration{
  1790  				LimitedResources: []resourcequotaapi.LimitedResource{
  1791  					{
  1792  						Resource: "pods",
  1793  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1794  							{
  1795  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1796  								Operator:  corev1.ScopeSelectorOpIn,
  1797  								Values:    []string{"cluster-services"},
  1798  							},
  1799  						},
  1800  					},
  1801  				},
  1802  			},
  1803  			expErr: "",
  1804  		},
  1805  		{
  1806  			description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn.",
  1807  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
  1808  			quota: &corev1.ResourceQuota{
  1809  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1810  				Spec: corev1.ResourceQuotaSpec{
  1811  					ScopeSelector: &corev1.ScopeSelector{
  1812  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1813  							{
  1814  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1815  								Operator:  corev1.ScopeSelectorOpIn,
  1816  								Values:    []string{"cluster-services"},
  1817  							},
  1818  						},
  1819  					},
  1820  				},
  1821  			},
  1822  			config: &resourcequotaapi.Configuration{
  1823  				LimitedResources: []resourcequotaapi.LimitedResource{
  1824  					{
  1825  						Resource: "pods",
  1826  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1827  							{
  1828  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1829  								Operator:  corev1.ScopeSelectorOpIn,
  1830  								Values:    []string{"another-priorityclass-name", "cluster-services"},
  1831  							},
  1832  						},
  1833  					},
  1834  				},
  1835  			},
  1836  			expErr: "",
  1837  		},
  1838  		{
  1839  			description: "Pod has limited priorityclass. Covering quota  does not exist for configured limited scope PriorityClassIn.",
  1840  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
  1841  			quota: &corev1.ResourceQuota{
  1842  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1843  				Spec: corev1.ResourceQuotaSpec{
  1844  					ScopeSelector: &corev1.ScopeSelector{
  1845  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1846  							{
  1847  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1848  								Operator:  corev1.ScopeSelectorOpIn,
  1849  								Values:    []string{"another-priorityclass-name"},
  1850  							},
  1851  						},
  1852  					},
  1853  				},
  1854  			},
  1855  			config: &resourcequotaapi.Configuration{
  1856  				LimitedResources: []resourcequotaapi.LimitedResource{
  1857  					{
  1858  						Resource: "pods",
  1859  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1860  							{
  1861  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1862  								Operator:  corev1.ScopeSelectorOpIn,
  1863  								Values:    []string{"another-priorityclass-name", "cluster-services"},
  1864  							},
  1865  						},
  1866  					},
  1867  				},
  1868  			},
  1869  			expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
  1870  		},
  1871  		{
  1872  			description: "From the above test case, just changing pod priority from cluster-services to another-priorityclass-name. expecting no error",
  1873  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "another-priorityclass-name"),
  1874  			quota: &corev1.ResourceQuota{
  1875  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1876  				Spec: corev1.ResourceQuotaSpec{
  1877  					ScopeSelector: &corev1.ScopeSelector{
  1878  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1879  							{
  1880  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1881  								Operator:  corev1.ScopeSelectorOpIn,
  1882  								Values:    []string{"another-priorityclass-name"},
  1883  							},
  1884  						},
  1885  					},
  1886  				},
  1887  			},
  1888  			config: &resourcequotaapi.Configuration{
  1889  				LimitedResources: []resourcequotaapi.LimitedResource{
  1890  					{
  1891  						Resource: "pods",
  1892  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1893  							{
  1894  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1895  								Operator:  corev1.ScopeSelectorOpIn,
  1896  								Values:    []string{"another-priorityclass-name", "cluster-services"},
  1897  							},
  1898  						},
  1899  					},
  1900  				},
  1901  			},
  1902  			expErr: "",
  1903  		},
  1904  		{
  1905  			description: "Pod has limited priorityclass. Covering quota does NOT exists for configured limited scope PriorityClassIn.",
  1906  			testPod:     validPodWithPriority("not-allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
  1907  			quota:       &corev1.ResourceQuota{},
  1908  			config: &resourcequotaapi.Configuration{
  1909  				LimitedResources: []resourcequotaapi.LimitedResource{
  1910  					{
  1911  						Resource: "pods",
  1912  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1913  							{
  1914  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1915  								Operator:  corev1.ScopeSelectorOpIn,
  1916  								Values:    []string{"another-priorityclass-name", "cluster-services"},
  1917  							},
  1918  						},
  1919  					},
  1920  				},
  1921  			},
  1922  			expErr: "insufficient quota to match these scopes: [{PriorityClass In [another-priorityclass-name cluster-services]}]",
  1923  		},
  1924  		{
  1925  			description: "Pod has limited priorityclass. Covering quota exists for configured limited scope PriorityClassIn through PriorityClassNameExists",
  1926  			testPod:     validPodWithPriority("allowed-pod", 1, getResourceRequirements(getResourceList("3", "2Gi"), getResourceList("", "")), "cluster-services"),
  1927  			quota: &corev1.ResourceQuota{
  1928  				ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
  1929  				Spec: corev1.ResourceQuotaSpec{
  1930  					ScopeSelector: &corev1.ScopeSelector{
  1931  						MatchExpressions: []corev1.ScopedResourceSelectorRequirement{
  1932  							{
  1933  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1934  								Operator:  corev1.ScopeSelectorOpExists},
  1935  						},
  1936  					},
  1937  				},
  1938  			},
  1939  			config: &resourcequotaapi.Configuration{
  1940  				LimitedResources: []resourcequotaapi.LimitedResource{
  1941  					{
  1942  						Resource: "pods",
  1943  						MatchScopes: []corev1.ScopedResourceSelectorRequirement{
  1944  							{
  1945  								ScopeName: corev1.ResourceQuotaScopePriorityClass,
  1946  								Operator:  corev1.ScopeSelectorOpIn,
  1947  								Values:    []string{"another-priorityclass-name", "cluster-services"},
  1948  							},
  1949  						},
  1950  					},
  1951  				},
  1952  			},
  1953  			expErr: "",
  1954  		},
  1955  	}
  1956  
  1957  	for _, testCase := range testCases {
  1958  		newPod := testCase.testPod
  1959  		config := testCase.config
  1960  		resourceQuota := testCase.quota
  1961  		kubeClient := fake.NewSimpleClientset(resourceQuota)
  1962  		if testCase.anotherQuota != nil {
  1963  			kubeClient = fake.NewSimpleClientset(resourceQuota, testCase.anotherQuota)
  1964  		}
  1965  		indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{"namespace": cache.MetaNamespaceIndexFunc})
  1966  		stopCh := make(chan struct{})
  1967  		defer close(stopCh)
  1968  
  1969  		informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  1970  
  1971  		handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  1972  		if err != nil {
  1973  			t.Errorf("Error occurred while creating admission plugin: %v", err)
  1974  		}
  1975  
  1976  		indexer.Add(resourceQuota)
  1977  		if testCase.anotherQuota != nil {
  1978  			indexer.Add(testCase.anotherQuota)
  1979  		}
  1980  		err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newPod, nil, api.Kind("Pod").WithVersion("version"), newPod.Namespace, newPod.Name, corev1.Resource("pods").WithVersion("version"), "", admission.Create, &metav1.CreateOptions{}, false, nil), nil)
  1981  		if testCase.expErr == "" {
  1982  			if err != nil {
  1983  				t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
  1984  			}
  1985  		} else {
  1986  			if !strings.Contains(fmt.Sprintf("%v", err), testCase.expErr) {
  1987  				t.Fatalf("Testcase, %v, failed with unexpected error: %v. ExpErr: %v", testCase.description, err, testCase.expErr)
  1988  			}
  1989  		}
  1990  
  1991  	}
  1992  }
  1993  
  1994  // TestAdmitZeroDeltaUsageWithoutCoveringQuota verifies that resource quota is not required for zero delta requests.
  1995  func TestAdmitZeroDeltaUsageWithoutCoveringQuota(t *testing.T) {
  1996  
  1997  	kubeClient := fake.NewSimpleClientset()
  1998  	stopCh := make(chan struct{})
  1999  	defer close(stopCh)
  2000  
  2001  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  2002  
  2003  	// disable services unless there is a covering quota.
  2004  	config := &resourcequotaapi.Configuration{
  2005  		LimitedResources: []resourcequotaapi.LimitedResource{
  2006  			{
  2007  				Resource:      "services",
  2008  				MatchContains: []string{"services.loadbalancers"},
  2009  			},
  2010  		},
  2011  	}
  2012  
  2013  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  2014  	if err != nil {
  2015  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  2016  	}
  2017  
  2018  	existingService := &api.Service{
  2019  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
  2020  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
  2021  	}
  2022  	newService := &api.Service{
  2023  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
  2024  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
  2025  	}
  2026  
  2027  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.CreateOptions{}, false, nil), nil)
  2028  	if err != nil {
  2029  		t.Errorf("unexpected error: %v", err)
  2030  	}
  2031  }
  2032  
  2033  // TestAdmitRejectIncreaseUsageWithoutCoveringQuota verifies that resource quota is required for delta requests that increase usage.
  2034  func TestAdmitRejectIncreaseUsageWithoutCoveringQuota(t *testing.T) {
  2035  	kubeClient := fake.NewSimpleClientset()
  2036  	stopCh := make(chan struct{})
  2037  	defer close(stopCh)
  2038  
  2039  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  2040  
  2041  	// disable services unless there is a covering quota.
  2042  	config := &resourcequotaapi.Configuration{
  2043  		LimitedResources: []resourcequotaapi.LimitedResource{
  2044  			{
  2045  				Resource:      "services",
  2046  				MatchContains: []string{"services.loadbalancers"},
  2047  			},
  2048  		},
  2049  	}
  2050  
  2051  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  2052  	if err != nil {
  2053  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  2054  	}
  2055  
  2056  	existingService := &api.Service{
  2057  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
  2058  		Spec: api.ServiceSpec{
  2059  			Type:  api.ServiceTypeNodePort,
  2060  			Ports: []api.ServicePort{{Port: 1234}},
  2061  		},
  2062  	}
  2063  	newService := &api.Service{
  2064  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
  2065  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
  2066  	}
  2067  
  2068  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
  2069  	if err == nil {
  2070  		t.Errorf("Expected an error for consuming a limited resource without quota.")
  2071  	}
  2072  }
  2073  
  2074  // TestAdmitAllowDecreaseUsageWithoutCoveringQuota verifies that resource quota is not required for delta requests that decrease usage.
  2075  func TestAdmitAllowDecreaseUsageWithoutCoveringQuota(t *testing.T) {
  2076  	kubeClient := fake.NewSimpleClientset()
  2077  	stopCh := make(chan struct{})
  2078  	defer close(stopCh)
  2079  
  2080  	informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
  2081  
  2082  	// disable services unless there is a covering quota.
  2083  	config := &resourcequotaapi.Configuration{
  2084  		LimitedResources: []resourcequotaapi.LimitedResource{
  2085  			{
  2086  				Resource:      "services",
  2087  				MatchContains: []string{"services.loadbalancers"},
  2088  			},
  2089  		},
  2090  	}
  2091  
  2092  	handler, err := createHandlerWithConfig(kubeClient, informerFactory, config, stopCh)
  2093  	if err != nil {
  2094  		t.Errorf("Error occurred while creating admission plugin: %v", err)
  2095  	}
  2096  
  2097  	existingService := &api.Service{
  2098  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
  2099  		Spec:       api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
  2100  	}
  2101  	newService := &api.Service{
  2102  		ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
  2103  		Spec: api.ServiceSpec{
  2104  			Type:  api.ServiceTypeNodePort,
  2105  			Ports: []api.ServicePort{{Port: 1234}},
  2106  		},
  2107  	}
  2108  
  2109  	err = handler.Validate(context.TODO(), admission.NewAttributesRecord(newService, existingService, api.Kind("Service").WithVersion("version"), newService.Namespace, newService.Name, corev1.Resource("services").WithVersion("version"), "", admission.Update, &metav1.UpdateOptions{}, false, nil), nil)
  2110  
  2111  	if err != nil {
  2112  		t.Errorf("Expected no error for decreasing a limited resource without quota, got %v", err)
  2113  	}
  2114  }
  2115  

View as plain text