...

Source file src/k8s.io/kubernetes/pkg/controller/podautoscaler/replica_calculator_test.go

Documentation: k8s.io/kubernetes/pkg/controller/podautoscaler

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package podautoscaler
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math"
    23  	"testing"
    24  	"time"
    25  
    26  	autoscalingv2 "k8s.io/api/autoscaling/v2"
    27  	v1 "k8s.io/api/core/v1"
    28  	"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/runtime"
    32  	"k8s.io/apimachinery/pkg/runtime/schema"
    33  	"k8s.io/apimachinery/pkg/util/sets"
    34  	"k8s.io/client-go/informers"
    35  	"k8s.io/client-go/kubernetes/fake"
    36  	core "k8s.io/client-go/testing"
    37  	"k8s.io/client-go/tools/cache"
    38  	"k8s.io/kubernetes/pkg/api/legacyscheme"
    39  	"k8s.io/kubernetes/pkg/controller"
    40  	metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
    41  	cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
    42  	emapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1"
    43  	metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
    44  	metricsfake "k8s.io/metrics/pkg/client/clientset/versioned/fake"
    45  	cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
    46  	emfake "k8s.io/metrics/pkg/client/external_metrics/fake"
    47  
    48  	"github.com/stretchr/testify/assert"
    49  	"github.com/stretchr/testify/require"
    50  )
    51  
    52  type resourceInfo struct {
    53  	name     v1.ResourceName
    54  	requests []resource.Quantity
    55  	levels   [][]int64
    56  	// only applies to pod names returned from "heapster"
    57  	podNames []string
    58  
    59  	targetUtilization   int32
    60  	expectedUtilization int32
    61  	expectedValue       int64
    62  }
    63  
    64  type metricType int
    65  
    66  const (
    67  	objectMetric metricType = iota
    68  	objectPerPodMetric
    69  	externalMetric
    70  	externalPerPodMetric
    71  	podMetric
    72  )
    73  
    74  type metricInfo struct {
    75  	name         string
    76  	levels       []int64
    77  	singleObject *autoscalingv2.CrossVersionObjectReference
    78  	selector     *metav1.LabelSelector
    79  	metricType   metricType
    80  
    81  	targetUsage       int64
    82  	perPodTargetUsage int64
    83  	expectedUsage     int64
    84  }
    85  
    86  type replicaCalcTestCase struct {
    87  	currentReplicas  int32
    88  	expectedReplicas int32
    89  	expectedError    error
    90  
    91  	timestamp time.Time
    92  
    93  	resource  *resourceInfo
    94  	metric    *metricInfo
    95  	container string
    96  
    97  	podReadiness         []v1.ConditionStatus
    98  	podStartTime         []metav1.Time
    99  	podPhase             []v1.PodPhase
   100  	podDeletionTimestamp []bool
   101  }
   102  
   103  const (
   104  	testNamespace       = "test-namespace"
   105  	podNamePrefix       = "test-pod"
   106  	numContainersPerPod = 2
   107  )
   108  
   109  func (tc *replicaCalcTestCase) prepareTestClientSet() *fake.Clientset {
   110  	fakeClient := &fake.Clientset{}
   111  	fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   112  		obj := &v1.PodList{}
   113  		podsCount := int(tc.currentReplicas)
   114  		// Failed pods are not included in tc.currentReplicas
   115  		if tc.podPhase != nil && len(tc.podPhase) > podsCount {
   116  			podsCount = len(tc.podPhase)
   117  		}
   118  		for i := 0; i < podsCount; i++ {
   119  			podReadiness := v1.ConditionTrue
   120  			if tc.podReadiness != nil && i < len(tc.podReadiness) {
   121  				podReadiness = tc.podReadiness[i]
   122  			}
   123  			var podStartTime metav1.Time
   124  			if tc.podStartTime != nil {
   125  				podStartTime = tc.podStartTime[i]
   126  			}
   127  			podPhase := v1.PodRunning
   128  			if tc.podPhase != nil {
   129  				podPhase = tc.podPhase[i]
   130  			}
   131  			podDeletionTimestamp := false
   132  			if tc.podDeletionTimestamp != nil {
   133  				podDeletionTimestamp = tc.podDeletionTimestamp[i]
   134  			}
   135  			podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
   136  			pod := v1.Pod{
   137  				Status: v1.PodStatus{
   138  					Phase:     podPhase,
   139  					StartTime: &podStartTime,
   140  					Conditions: []v1.PodCondition{
   141  						{
   142  							Type:   v1.PodReady,
   143  							Status: podReadiness,
   144  						},
   145  					},
   146  				},
   147  				ObjectMeta: metav1.ObjectMeta{
   148  					Name:      podName,
   149  					Namespace: testNamespace,
   150  					Labels: map[string]string{
   151  						"name": podNamePrefix,
   152  					},
   153  				},
   154  				Spec: v1.PodSpec{
   155  					Containers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
   156  				},
   157  			}
   158  			if podDeletionTimestamp {
   159  				pod.DeletionTimestamp = &metav1.Time{Time: time.Now()}
   160  			}
   161  
   162  			if tc.resource != nil && i < len(tc.resource.requests) {
   163  				pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
   164  					Requests: v1.ResourceList{
   165  						tc.resource.name: tc.resource.requests[i],
   166  					},
   167  				}
   168  				pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
   169  					Requests: v1.ResourceList{
   170  						tc.resource.name: tc.resource.requests[i],
   171  					},
   172  				}
   173  			}
   174  			obj.Items = append(obj.Items, pod)
   175  		}
   176  		return true, obj, nil
   177  	})
   178  	return fakeClient
   179  }
   180  
   181  func (tc *replicaCalcTestCase) prepareTestMetricsClient() *metricsfake.Clientset {
   182  	fakeMetricsClient := &metricsfake.Clientset{}
   183  	// NB: we have to sound like Gollum due to gengo's inability to handle already-plural resource names
   184  	fakeMetricsClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   185  		if tc.resource != nil {
   186  			metrics := &metricsapi.PodMetricsList{}
   187  			for i, resValue := range tc.resource.levels {
   188  				podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
   189  				if len(tc.resource.podNames) > i {
   190  					podName = tc.resource.podNames[i]
   191  				}
   192  				// NB: the list reactor actually does label selector filtering for us,
   193  				// so we have to make sure our results match the label selector
   194  				podMetric := metricsapi.PodMetrics{
   195  					ObjectMeta: metav1.ObjectMeta{
   196  						Name:      podName,
   197  						Namespace: testNamespace,
   198  						Labels:    map[string]string{"name": podNamePrefix},
   199  					},
   200  					Timestamp:  metav1.Time{Time: tc.timestamp},
   201  					Window:     metav1.Duration{Duration: time.Minute},
   202  					Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
   203  				}
   204  
   205  				for i, m := range resValue {
   206  					podMetric.Containers[i] = metricsapi.ContainerMetrics{
   207  						Name: fmt.Sprintf("container%v", i+1),
   208  						Usage: v1.ResourceList{
   209  							tc.resource.name: *resource.NewMilliQuantity(m, resource.DecimalSI),
   210  						},
   211  					}
   212  				}
   213  				metrics.Items = append(metrics.Items, podMetric)
   214  			}
   215  			return true, metrics, nil
   216  		}
   217  
   218  		return true, nil, fmt.Errorf("no pod resource metrics specified in test client")
   219  	})
   220  	return fakeMetricsClient
   221  }
   222  
   223  func (tc *replicaCalcTestCase) prepareTestCMClient(t *testing.T) *cmfake.FakeCustomMetricsClient {
   224  	fakeCMClient := &cmfake.FakeCustomMetricsClient{}
   225  	fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   226  		getForAction, wasGetFor := action.(cmfake.GetForAction)
   227  		if !wasGetFor {
   228  			return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action)
   229  		}
   230  
   231  		if tc.metric == nil {
   232  			return true, nil, fmt.Errorf("no custom metrics specified in test client")
   233  		}
   234  
   235  		assert.Equal(t, tc.metric.name, getForAction.GetMetricName(), "the metric requested should have matched the one specified")
   236  
   237  		if getForAction.GetName() == "*" {
   238  			metrics := cmapi.MetricValueList{}
   239  
   240  			// multiple objects
   241  			assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
   242  
   243  			for i, level := range tc.metric.levels {
   244  				podMetric := cmapi.MetricValue{
   245  					DescribedObject: v1.ObjectReference{
   246  						Kind:      "Pod",
   247  						Name:      fmt.Sprintf("%s-%d", podNamePrefix, i),
   248  						Namespace: testNamespace,
   249  					},
   250  					Timestamp: metav1.Time{Time: tc.timestamp},
   251  					Metric: cmapi.MetricIdentifier{
   252  						Name: tc.metric.name,
   253  					},
   254  					Value: *resource.NewMilliQuantity(level, resource.DecimalSI),
   255  				}
   256  				metrics.Items = append(metrics.Items, podMetric)
   257  			}
   258  
   259  			return true, &metrics, nil
   260  		}
   261  		name := getForAction.GetName()
   262  		mapper := testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)
   263  		metrics := &cmapi.MetricValueList{}
   264  		assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
   265  		gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
   266  		mapping, err := mapper.RESTMapping(gk)
   267  		if err != nil {
   268  			return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
   269  		}
   270  		groupResource := mapping.Resource.GroupResource()
   271  
   272  		assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
   273  		assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
   274  
   275  		metrics.Items = []cmapi.MetricValue{
   276  			{
   277  				DescribedObject: v1.ObjectReference{
   278  					Kind:       tc.metric.singleObject.Kind,
   279  					APIVersion: tc.metric.singleObject.APIVersion,
   280  					Name:       name,
   281  				},
   282  				Timestamp: metav1.Time{Time: tc.timestamp},
   283  				Metric: cmapi.MetricIdentifier{
   284  					Name: tc.metric.name,
   285  				},
   286  				Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
   287  			},
   288  		}
   289  
   290  		return true, metrics, nil
   291  	})
   292  	return fakeCMClient
   293  }
   294  
   295  func (tc *replicaCalcTestCase) prepareTestEMClient(t *testing.T) *emfake.FakeExternalMetricsClient {
   296  	fakeEMClient := &emfake.FakeExternalMetricsClient{}
   297  	fakeEMClient.AddReactor("list", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
   298  		listAction, wasList := action.(core.ListAction)
   299  		if !wasList {
   300  			return true, nil, fmt.Errorf("expected a list-for action, got %v instead", action)
   301  		}
   302  
   303  		if tc.metric == nil {
   304  			return true, nil, fmt.Errorf("no external metrics specified in test client")
   305  		}
   306  
   307  		assert.Equal(t, tc.metric.name, listAction.GetResource().Resource, "the metric requested should have matched the one specified")
   308  
   309  		selector, err := metav1.LabelSelectorAsSelector(tc.metric.selector)
   310  		if err != nil {
   311  			return true, nil, fmt.Errorf("failed to convert label selector specified in test client")
   312  		}
   313  		assert.Equal(t, selector, listAction.GetListRestrictions().Labels, "the metric selector should have matched the one specified")
   314  
   315  		metrics := emapi.ExternalMetricValueList{}
   316  
   317  		for _, level := range tc.metric.levels {
   318  			metric := emapi.ExternalMetricValue{
   319  				Timestamp:  metav1.Time{Time: tc.timestamp},
   320  				MetricName: tc.metric.name,
   321  				Value:      *resource.NewMilliQuantity(level, resource.DecimalSI),
   322  			}
   323  			metrics.Items = append(metrics.Items, metric)
   324  		}
   325  
   326  		return true, &metrics, nil
   327  	})
   328  	return fakeEMClient
   329  }
   330  
   331  func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *emfake.FakeExternalMetricsClient) {
   332  	fakeClient := tc.prepareTestClientSet()
   333  	fakeMetricsClient := tc.prepareTestMetricsClient()
   334  	fakeCMClient := tc.prepareTestCMClient(t)
   335  	fakeEMClient := tc.prepareTestEMClient(t)
   336  	return fakeClient, fakeMetricsClient, fakeCMClient, fakeEMClient
   337  }
   338  
   339  func (tc *replicaCalcTestCase) runTest(t *testing.T) {
   340  	testClient, testMetricsClient, testCMClient, testEMClient := tc.prepareTestClient(t)
   341  	metricsClient := metricsclient.NewRESTMetricsClient(testMetricsClient.MetricsV1beta1(), testCMClient, testEMClient)
   342  
   343  	informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
   344  	informer := informerFactory.Core().V1().Pods()
   345  
   346  	replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
   347  
   348  	stop := make(chan struct{})
   349  	defer close(stop)
   350  	informerFactory.Start(stop)
   351  	if !cache.WaitForNamedCacheSync("HPA", stop, informer.Informer().HasSynced) {
   352  		return
   353  	}
   354  
   355  	selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
   356  		MatchLabels: map[string]string{"name": podNamePrefix},
   357  	})
   358  	if err != nil {
   359  		require.Nil(t, err, "something went horribly wrong...")
   360  	}
   361  
   362  	if tc.resource != nil {
   363  		outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(context.TODO(), tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, tc.container)
   364  
   365  		if tc.expectedError != nil {
   366  			require.Error(t, err, "there should be an error calculating the replica count")
   367  			assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
   368  			return
   369  		}
   370  		require.NoError(t, err, "there should not have been an error calculating the replica count")
   371  		assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
   372  		assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
   373  		assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
   374  		assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
   375  		return
   376  	}
   377  
   378  	var outReplicas int32
   379  	var outUsage int64
   380  	var outTimestamp time.Time
   381  	switch tc.metric.metricType {
   382  	case objectMetric:
   383  		if tc.metric.singleObject == nil {
   384  			t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
   385  		}
   386  		outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, tc.metric.singleObject, selector, nil)
   387  	case objectPerPodMetric:
   388  		if tc.metric.singleObject == nil {
   389  			t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
   390  		}
   391  		outReplicas, outUsage, outTimestamp, err = replicaCalc.GetObjectPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, testNamespace, tc.metric.singleObject, nil)
   392  	case externalMetric:
   393  		if tc.metric.selector == nil {
   394  			t.Fatal("Metric specified as externalMetric but metric.selector is nil.")
   395  		}
   396  		if tc.metric.targetUsage <= 0 {
   397  			t.Fatalf("Metric specified as externalMetric but metric.targetUsage is %d which is <=0.", tc.metric.targetUsage)
   398  		}
   399  		outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, tc.metric.selector, selector)
   400  	case externalPerPodMetric:
   401  		if tc.metric.selector == nil {
   402  			t.Fatal("Metric specified as externalPerPodMetric but metric.selector is nil.")
   403  		}
   404  		if tc.metric.perPodTargetUsage <= 0 {
   405  			t.Fatalf("Metric specified as externalPerPodMetric but metric.perPodTargetUsage is %d which is <=0.", tc.metric.perPodTargetUsage)
   406  		}
   407  
   408  		outReplicas, outUsage, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUsage, tc.metric.name, testNamespace, tc.metric.selector)
   409  	case podMetric:
   410  		outReplicas, outUsage, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUsage, tc.metric.name, testNamespace, selector, nil)
   411  	default:
   412  		t.Fatalf("Unknown metric type: %d", tc.metric.metricType)
   413  	}
   414  
   415  	if tc.expectedError != nil {
   416  		require.Error(t, err, "there should be an error calculating the replica count")
   417  		assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
   418  		return
   419  	}
   420  	require.NoError(t, err, "there should not have been an error calculating the replica count")
   421  	assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
   422  	assert.Equal(t, tc.metric.expectedUsage, outUsage, "usage should be as expected")
   423  	assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
   424  }
   425  func makePodMetricLevels(containerMetric ...int64) [][]int64 {
   426  	metrics := make([][]int64, len(containerMetric))
   427  	for i := 0; i < len(containerMetric); i++ {
   428  		metrics[i] = make([]int64, numContainersPerPod)
   429  		for j := 0; j < numContainersPerPod; j++ {
   430  			metrics[i][j] = containerMetric[i]
   431  		}
   432  	}
   433  	return metrics
   434  }
   435  func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
   436  	tc := replicaCalcTestCase{
   437  		currentReplicas: 1,
   438  		expectedError:   fmt.Errorf("no metrics returned matched known pods"),
   439  		resource: &resourceInfo{
   440  			name:     v1.ResourceCPU,
   441  			requests: []resource.Quantity{resource.MustParse("1.0")},
   442  			levels:   makePodMetricLevels(100),
   443  			podNames: []string{"an-older-pod-name"},
   444  
   445  			targetUtilization: 100,
   446  		},
   447  	}
   448  	tc.runTest(t)
   449  }
   450  
   451  func TestReplicaCalcMissingContainerMetricError(t *testing.T) {
   452  	tc := replicaCalcTestCase{
   453  		currentReplicas: 1,
   454  		expectedError:   fmt.Errorf("container container2 not present in metrics for pod test-namespace/test-pod-0"),
   455  		resource: &resourceInfo{
   456  			name:     v1.ResourceCPU,
   457  			requests: []resource.Quantity{resource.MustParse("1.0")},
   458  			levels:   [][]int64{{0}},
   459  		},
   460  		container: "container2",
   461  	}
   462  	tc.runTest(t)
   463  }
   464  
   465  func TestReplicaCalcScaleUp(t *testing.T) {
   466  	tc := replicaCalcTestCase{
   467  		currentReplicas:  3,
   468  		expectedReplicas: 5,
   469  		resource: &resourceInfo{
   470  			name:     v1.ResourceCPU,
   471  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   472  			levels:   makePodMetricLevels(300, 500, 700),
   473  
   474  			targetUtilization:   30,
   475  			expectedUtilization: 50,
   476  			expectedValue:       numContainersPerPod * 500,
   477  		},
   478  	}
   479  	tc.runTest(t)
   480  }
   481  
   482  func TestReplicaCalcContainerScaleUp(t *testing.T) {
   483  	tc := replicaCalcTestCase{
   484  		currentReplicas:  3,
   485  		expectedReplicas: 5,
   486  		resource: &resourceInfo{
   487  			name:     v1.ResourceCPU,
   488  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   489  			levels:   [][]int64{{1000, 300}, {1000, 500}, {1000, 700}},
   490  
   491  			targetUtilization:   30,
   492  			expectedUtilization: 50,
   493  			expectedValue:       500,
   494  		},
   495  		container: "container2",
   496  	}
   497  	tc.runTest(t)
   498  }
   499  
   500  func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
   501  	tc := replicaCalcTestCase{
   502  		currentReplicas:  3,
   503  		expectedReplicas: 4,
   504  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
   505  		resource: &resourceInfo{
   506  			name:     v1.ResourceCPU,
   507  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   508  			levels:   makePodMetricLevels(300, 500, 700),
   509  
   510  			targetUtilization:   30,
   511  			expectedUtilization: 60,
   512  			expectedValue:       numContainersPerPod * 600,
   513  		},
   514  	}
   515  	tc.runTest(t)
   516  }
   517  
   518  func TestReplicaCalcScaleUpContainerHotCpuLessScale(t *testing.T) {
   519  	tc := replicaCalcTestCase{
   520  		currentReplicas:  3,
   521  		expectedReplicas: 4,
   522  		podStartTime:     []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
   523  		resource: &resourceInfo{
   524  			name:     v1.ResourceCPU,
   525  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   526  			levels:   [][]int64{{0, 300}, {0, 500}, {0, 700}},
   527  
   528  			targetUtilization:   30,
   529  			expectedUtilization: 60,
   530  			expectedValue:       600,
   531  		},
   532  		container: "container2",
   533  	}
   534  	tc.runTest(t)
   535  }
   536  
   537  func TestReplicaCalcScaleUpHotCpuLessScale(t *testing.T) {
   538  	tc := replicaCalcTestCase{
   539  		currentReplicas:  3,
   540  		expectedReplicas: 4,
   541  		podStartTime:     []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
   542  		resource: &resourceInfo{
   543  			name:     v1.ResourceCPU,
   544  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   545  			levels:   makePodMetricLevels(300, 500, 700),
   546  
   547  			targetUtilization:   30,
   548  			expectedUtilization: 60,
   549  			expectedValue:       numContainersPerPod * 600,
   550  		},
   551  	}
   552  	tc.runTest(t)
   553  }
   554  
   555  func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
   556  	tc := replicaCalcTestCase{
   557  		currentReplicas:  3,
   558  		expectedReplicas: 3,
   559  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   560  		resource: &resourceInfo{
   561  			name:     v1.ResourceCPU,
   562  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   563  			levels:   makePodMetricLevels(400, 500, 700),
   564  
   565  			targetUtilization:   30,
   566  			expectedUtilization: 40,
   567  			expectedValue:       numContainersPerPod * 400,
   568  		},
   569  	}
   570  	tc.runTest(t)
   571  }
   572  
   573  func TestReplicaCalcScaleHotCpuNoScale(t *testing.T) {
   574  	tc := replicaCalcTestCase{
   575  		currentReplicas:  3,
   576  		expectedReplicas: 3,
   577  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   578  		podStartTime:     []metav1.Time{coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
   579  		resource: &resourceInfo{
   580  			name:     v1.ResourceCPU,
   581  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   582  			levels:   makePodMetricLevels(400, 500, 700),
   583  
   584  			targetUtilization:   30,
   585  			expectedUtilization: 40,
   586  			expectedValue:       numContainersPerPod * 400,
   587  		},
   588  	}
   589  	tc.runTest(t)
   590  }
   591  
   592  func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
   593  	tc := replicaCalcTestCase{
   594  		currentReplicas:  2,
   595  		expectedReplicas: 4,
   596  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   597  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
   598  		resource: &resourceInfo{
   599  			name:     v1.ResourceCPU,
   600  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   601  			levels:   makePodMetricLevels(500, 700),
   602  
   603  			targetUtilization:   30,
   604  			expectedUtilization: 60,
   605  			expectedValue:       numContainersPerPod * 600,
   606  		},
   607  	}
   608  	tc.runTest(t)
   609  }
   610  
   611  func TestReplicaCalcScaleUpContainerIgnoresFailedPods(t *testing.T) {
   612  	tc := replicaCalcTestCase{
   613  		currentReplicas:  2,
   614  		expectedReplicas: 4,
   615  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   616  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
   617  		resource: &resourceInfo{
   618  			name:     v1.ResourceCPU,
   619  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   620  			levels:   [][]int64{{1000, 500}, {9000, 700}},
   621  
   622  			targetUtilization:   30,
   623  			expectedUtilization: 60,
   624  			expectedValue:       600,
   625  		},
   626  		container: "container2",
   627  	}
   628  	tc.runTest(t)
   629  }
   630  
   631  func TestReplicaCalcScaleUpIgnoresDeletionPods(t *testing.T) {
   632  	tc := replicaCalcTestCase{
   633  		currentReplicas:      2,
   634  		expectedReplicas:     4,
   635  		podReadiness:         []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   636  		podPhase:             []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
   637  		podDeletionTimestamp: []bool{false, false, true, true},
   638  		resource: &resourceInfo{
   639  			name:     v1.ResourceCPU,
   640  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   641  			levels:   makePodMetricLevels(500, 700),
   642  
   643  			targetUtilization:   30,
   644  			expectedUtilization: 60,
   645  			expectedValue:       numContainersPerPod * 600,
   646  		},
   647  	}
   648  	tc.runTest(t)
   649  }
   650  
   651  func TestReplicaCalcScaleUpContainerIgnoresDeletionPods(t *testing.T) {
   652  	tc := replicaCalcTestCase{
   653  		currentReplicas:      2,
   654  		expectedReplicas:     4,
   655  		podReadiness:         []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   656  		podPhase:             []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
   657  		podDeletionTimestamp: []bool{false, false, true, true},
   658  		resource: &resourceInfo{
   659  			name:     v1.ResourceCPU,
   660  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   661  			levels:   makePodMetricLevels(500, 700), // TODO: This test is broken and works only because of missing metrics
   662  
   663  			targetUtilization:   30,
   664  			expectedUtilization: 60,
   665  			expectedValue:       600,
   666  		},
   667  		container: "container1",
   668  	}
   669  	tc.runTest(t)
   670  }
   671  
   672  func TestReplicaCalcScaleUpCM(t *testing.T) {
   673  	tc := replicaCalcTestCase{
   674  		currentReplicas:  3,
   675  		expectedReplicas: 4,
   676  		metric: &metricInfo{
   677  			name:          "qps",
   678  			levels:        []int64{20000, 10000, 30000},
   679  			targetUsage:   15000,
   680  			expectedUsage: 20000,
   681  			metricType:    podMetric,
   682  		},
   683  	}
   684  	tc.runTest(t)
   685  }
   686  
   687  func TestReplicaCalcScaleUpCMUnreadyHotCpuNoLessScale(t *testing.T) {
   688  	tc := replicaCalcTestCase{
   689  		currentReplicas:  3,
   690  		expectedReplicas: 6,
   691  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
   692  		podStartTime:     []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
   693  		metric: &metricInfo{
   694  			name:          "qps",
   695  			levels:        []int64{50000, 10000, 30000},
   696  			targetUsage:   15000,
   697  			expectedUsage: 30000,
   698  			metricType:    podMetric,
   699  		},
   700  	}
   701  	tc.runTest(t)
   702  }
   703  
   704  func TestReplicaCalcScaleUpCMUnreadyHotCpuScaleWouldScaleDown(t *testing.T) {
   705  	tc := replicaCalcTestCase{
   706  		currentReplicas:  3,
   707  		expectedReplicas: 7,
   708  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
   709  		podStartTime:     []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime()},
   710  		metric: &metricInfo{
   711  			name:          "qps",
   712  			levels:        []int64{50000, 15000, 30000},
   713  			targetUsage:   15000,
   714  			expectedUsage: 31666,
   715  			metricType:    podMetric,
   716  		},
   717  	}
   718  	tc.runTest(t)
   719  }
   720  
   721  func TestReplicaCalcScaleUpCMObject(t *testing.T) {
   722  	tc := replicaCalcTestCase{
   723  		currentReplicas:  3,
   724  		expectedReplicas: 4,
   725  		metric: &metricInfo{
   726  			name:          "qps",
   727  			levels:        []int64{20000},
   728  			targetUsage:   15000,
   729  			expectedUsage: 20000,
   730  			singleObject: &autoscalingv2.CrossVersionObjectReference{
   731  				Kind:       "Deployment",
   732  				APIVersion: "apps/v1",
   733  				Name:       "some-deployment",
   734  			},
   735  		},
   736  	}
   737  	tc.runTest(t)
   738  }
   739  
   740  func TestReplicaCalcScaleUpCMPerPodObject(t *testing.T) {
   741  	tc := replicaCalcTestCase{
   742  		currentReplicas:  3,
   743  		expectedReplicas: 4,
   744  		metric: &metricInfo{
   745  			metricType:        objectPerPodMetric,
   746  			name:              "qps",
   747  			levels:            []int64{20000},
   748  			perPodTargetUsage: 5000,
   749  			expectedUsage:     6667,
   750  			singleObject: &autoscalingv2.CrossVersionObjectReference{
   751  				Kind:       "Deployment",
   752  				APIVersion: "apps/v1",
   753  				Name:       "some-deployment",
   754  			},
   755  		},
   756  	}
   757  	tc.runTest(t)
   758  }
   759  
   760  func TestReplicaCalcScaleUpCMObjectIgnoresUnreadyPods(t *testing.T) {
   761  	tc := replicaCalcTestCase{
   762  		currentReplicas:  3,
   763  		expectedReplicas: 5, // If we did not ignore unready pods, we'd expect 15 replicas.
   764  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
   765  		metric: &metricInfo{
   766  			name:          "qps",
   767  			levels:        []int64{50000},
   768  			targetUsage:   10000,
   769  			expectedUsage: 50000,
   770  			singleObject: &autoscalingv2.CrossVersionObjectReference{
   771  				Kind:       "Deployment",
   772  				APIVersion: "apps/v1",
   773  				Name:       "some-deployment",
   774  			},
   775  		},
   776  	}
   777  	tc.runTest(t)
   778  }
   779  
   780  func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
   781  	tc := replicaCalcTestCase{
   782  		currentReplicas:  1,
   783  		expectedReplicas: 2,
   784  		metric: &metricInfo{
   785  			name:          "qps",
   786  			levels:        []int64{8600},
   787  			targetUsage:   4400,
   788  			expectedUsage: 8600,
   789  			selector:      &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
   790  			metricType:    podMetric,
   791  		},
   792  	}
   793  	tc.runTest(t)
   794  }
   795  
   796  func TestReplicaCalcScaleUpCMExternalIgnoresUnreadyPods(t *testing.T) {
   797  	tc := replicaCalcTestCase{
   798  		currentReplicas:  3,
   799  		expectedReplicas: 2, // Would expect 6 if we didn't ignore unready pods
   800  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
   801  		metric: &metricInfo{
   802  			name:          "qps",
   803  			levels:        []int64{8600},
   804  			targetUsage:   4400,
   805  			expectedUsage: 8600,
   806  			selector:      &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
   807  			metricType:    externalMetric,
   808  		},
   809  	}
   810  	tc.runTest(t)
   811  }
   812  
   813  func TestReplicaCalcScaleUpCMExternalNoLabels(t *testing.T) {
   814  	tc := replicaCalcTestCase{
   815  		currentReplicas:  1,
   816  		expectedReplicas: 2,
   817  		metric: &metricInfo{
   818  			name:          "qps",
   819  			levels:        []int64{8600},
   820  			targetUsage:   4400,
   821  			expectedUsage: 8600,
   822  			metricType:    podMetric,
   823  		},
   824  	}
   825  	tc.runTest(t)
   826  }
   827  
   828  func TestReplicaCalcScaleUpPerPodCMExternal(t *testing.T) {
   829  	tc := replicaCalcTestCase{
   830  		currentReplicas:  3,
   831  		expectedReplicas: 4,
   832  		metric: &metricInfo{
   833  			name:              "qps",
   834  			levels:            []int64{8600},
   835  			perPodTargetUsage: 2150,
   836  			expectedUsage:     2867,
   837  			selector:          &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
   838  			metricType:        externalPerPodMetric,
   839  		},
   840  	}
   841  	tc.runTest(t)
   842  }
   843  
   844  func TestReplicaCalcScaleDown(t *testing.T) {
   845  	tc := replicaCalcTestCase{
   846  		currentReplicas:  5,
   847  		expectedReplicas: 3,
   848  		resource: &resourceInfo{
   849  			name:     v1.ResourceCPU,
   850  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   851  			levels:   makePodMetricLevels(100, 300, 500, 250, 250),
   852  
   853  			targetUtilization:   50,
   854  			expectedUtilization: 28,
   855  			expectedValue:       numContainersPerPod * 280,
   856  		},
   857  	}
   858  	tc.runTest(t)
   859  }
   860  
   861  func TestReplicaCalcContainerScaleDown(t *testing.T) {
   862  	tc := replicaCalcTestCase{
   863  		currentReplicas:  5,
   864  		expectedReplicas: 3,
   865  		resource: &resourceInfo{
   866  			name:     v1.ResourceCPU,
   867  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   868  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
   869  
   870  			targetUtilization:   50,
   871  			expectedUtilization: 28,
   872  			expectedValue:       280,
   873  		},
   874  		container: "container2",
   875  	}
   876  	tc.runTest(t)
   877  }
   878  
   879  func TestReplicaCalcScaleDownCM(t *testing.T) {
   880  	tc := replicaCalcTestCase{
   881  		currentReplicas:  5,
   882  		expectedReplicas: 3,
   883  		metric: &metricInfo{
   884  			name:          "qps",
   885  			levels:        []int64{12000, 12000, 12000, 12000, 12000},
   886  			targetUsage:   20000,
   887  			expectedUsage: 12000,
   888  			metricType:    podMetric,
   889  		},
   890  	}
   891  	tc.runTest(t)
   892  }
   893  
   894  func TestReplicaCalcScaleDownPerPodCMObject(t *testing.T) {
   895  	tc := replicaCalcTestCase{
   896  		currentReplicas:  5,
   897  		expectedReplicas: 3,
   898  		metric: &metricInfo{
   899  			name:              "qps",
   900  			levels:            []int64{6000},
   901  			perPodTargetUsage: 2000,
   902  			expectedUsage:     1200,
   903  			singleObject: &autoscalingv2.CrossVersionObjectReference{
   904  				Kind:       "Deployment",
   905  				APIVersion: "apps/v1",
   906  				Name:       "some-deployment",
   907  			},
   908  			metricType: objectPerPodMetric,
   909  		},
   910  	}
   911  	tc.runTest(t)
   912  }
   913  
   914  func TestReplicaCalcScaleDownCMObject(t *testing.T) {
   915  	tc := replicaCalcTestCase{
   916  		currentReplicas:  5,
   917  		expectedReplicas: 3,
   918  		metric: &metricInfo{
   919  			name:          "qps",
   920  			levels:        []int64{12000},
   921  			targetUsage:   20000,
   922  			expectedUsage: 12000,
   923  			singleObject: &autoscalingv2.CrossVersionObjectReference{
   924  				Kind:       "Deployment",
   925  				APIVersion: "apps/v1",
   926  				Name:       "some-deployment",
   927  			},
   928  		},
   929  	}
   930  	tc.runTest(t)
   931  }
   932  
   933  func TestReplicaCalcScaleDownCMExternal(t *testing.T) {
   934  	tc := replicaCalcTestCase{
   935  		currentReplicas:  5,
   936  		expectedReplicas: 3,
   937  		metric: &metricInfo{
   938  			name:          "qps",
   939  			levels:        []int64{8600},
   940  			targetUsage:   14334,
   941  			expectedUsage: 8600,
   942  			selector:      &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
   943  			metricType:    externalMetric,
   944  		},
   945  	}
   946  	tc.runTest(t)
   947  }
   948  
   949  func TestReplicaCalcScaleDownPerPodCMExternal(t *testing.T) {
   950  	tc := replicaCalcTestCase{
   951  		currentReplicas:  5,
   952  		expectedReplicas: 3,
   953  		metric: &metricInfo{
   954  			name:              "qps",
   955  			levels:            []int64{8600},
   956  			perPodTargetUsage: 2867,
   957  			expectedUsage:     1720,
   958  			selector:          &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
   959  			metricType:        externalPerPodMetric,
   960  		},
   961  	}
   962  	tc.runTest(t)
   963  }
   964  
   965  func TestReplicaCalcScaleDownExcludeUnreadyPods(t *testing.T) {
   966  	tc := replicaCalcTestCase{
   967  		currentReplicas:  5,
   968  		expectedReplicas: 2,
   969  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   970  		resource: &resourceInfo{
   971  			name:     v1.ResourceCPU,
   972  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   973  			levels:   makePodMetricLevels(100, 300, 500, 250, 250),
   974  
   975  			targetUtilization:   50,
   976  			expectedUtilization: 30,
   977  			expectedValue:       numContainersPerPod * 300,
   978  		},
   979  	}
   980  	tc.runTest(t)
   981  }
   982  
   983  func TestReplicaCalcScaleDownContainerExcludeUnreadyPods(t *testing.T) {
   984  	tc := replicaCalcTestCase{
   985  		currentReplicas:  5,
   986  		expectedReplicas: 2,
   987  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
   988  		resource: &resourceInfo{
   989  			name:     v1.ResourceCPU,
   990  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
   991  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
   992  
   993  			targetUtilization:   50,
   994  			expectedUtilization: 30,
   995  			expectedValue:       300,
   996  		},
   997  		container: "container2",
   998  	}
   999  	tc.runTest(t)
  1000  }
  1001  
  1002  func TestReplicaCalcScaleDownExcludeUnscheduledPods(t *testing.T) {
  1003  	tc := replicaCalcTestCase{
  1004  		currentReplicas:  5,
  1005  		expectedReplicas: 1,
  1006  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
  1007  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodPending, v1.PodPending, v1.PodPending, v1.PodPending},
  1008  		resource: &resourceInfo{
  1009  			name:     v1.ResourceCPU,
  1010  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1011  			levels:   makePodMetricLevels(100),
  1012  
  1013  			targetUtilization:   50,
  1014  			expectedUtilization: 10,
  1015  			expectedValue:       numContainersPerPod * 100,
  1016  		},
  1017  	}
  1018  	tc.runTest(t)
  1019  }
  1020  
  1021  func TestReplicaCalcScaleDownContainerExcludeUnscheduledPods(t *testing.T) {
  1022  	tc := replicaCalcTestCase{
  1023  		currentReplicas:  5,
  1024  		expectedReplicas: 1,
  1025  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
  1026  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodPending, v1.PodPending, v1.PodPending, v1.PodPending},
  1027  		resource: &resourceInfo{
  1028  			name:     v1.ResourceCPU,
  1029  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1030  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
  1031  
  1032  			targetUtilization:   50,
  1033  			expectedUtilization: 10,
  1034  			expectedValue:       100,
  1035  		},
  1036  		container: "container2",
  1037  	}
  1038  	tc.runTest(t)
  1039  }
  1040  
  1041  func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
  1042  	tc := replicaCalcTestCase{
  1043  		currentReplicas:  5,
  1044  		expectedReplicas: 2,
  1045  		podStartTime:     []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
  1046  		resource: &resourceInfo{
  1047  			name:     v1.ResourceCPU,
  1048  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1049  			levels:   makePodMetricLevels(100, 300, 500, 250, 250),
  1050  
  1051  			targetUtilization:   50,
  1052  			expectedUtilization: 30,
  1053  			expectedValue:       numContainersPerPod * 300,
  1054  		},
  1055  	}
  1056  	tc.runTest(t)
  1057  }
  1058  
  1059  func TestReplicaCalcScaleDownContainerIgnoreHotCpuPods(t *testing.T) {
  1060  	tc := replicaCalcTestCase{
  1061  		currentReplicas:  5,
  1062  		expectedReplicas: 2,
  1063  		podStartTime:     []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
  1064  		resource: &resourceInfo{
  1065  			name:     v1.ResourceCPU,
  1066  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1067  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 1000}, {1000, 1000}},
  1068  
  1069  			targetUtilization:   50,
  1070  			expectedUtilization: 30,
  1071  			expectedValue:       300,
  1072  		},
  1073  		container: "container2",
  1074  	}
  1075  	tc.runTest(t)
  1076  }
  1077  
  1078  func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
  1079  	tc := replicaCalcTestCase{
  1080  		currentReplicas:  5,
  1081  		expectedReplicas: 3,
  1082  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  1083  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
  1084  		resource: &resourceInfo{
  1085  			name:     v1.ResourceCPU,
  1086  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1087  			levels:   makePodMetricLevels(100, 300, 500, 250, 250),
  1088  
  1089  			targetUtilization:   50,
  1090  			expectedUtilization: 28,
  1091  			expectedValue:       numContainersPerPod * 280,
  1092  		},
  1093  	}
  1094  	tc.runTest(t)
  1095  }
  1096  
  1097  func TestReplicaCalcScaleDownContainerIgnoresFailedPods(t *testing.T) {
  1098  	tc := replicaCalcTestCase{
  1099  		currentReplicas:  5,
  1100  		expectedReplicas: 3,
  1101  		podReadiness:     []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  1102  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
  1103  		resource: &resourceInfo{
  1104  			name:     v1.ResourceCPU,
  1105  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1106  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}}, //TODO: Test is broken
  1107  
  1108  			targetUtilization:   50,
  1109  			expectedUtilization: 28,
  1110  			expectedValue:       280,
  1111  		},
  1112  		container: "container2",
  1113  	}
  1114  	tc.runTest(t)
  1115  }
  1116  
  1117  func TestReplicaCalcScaleDownIgnoresDeletionPods(t *testing.T) {
  1118  	tc := replicaCalcTestCase{
  1119  		currentReplicas:      5,
  1120  		expectedReplicas:     3,
  1121  		podReadiness:         []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  1122  		podPhase:             []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
  1123  		podDeletionTimestamp: []bool{false, false, false, false, false, true, true},
  1124  		resource: &resourceInfo{
  1125  			name:     v1.ResourceCPU,
  1126  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1127  			levels:   makePodMetricLevels(100, 300, 500, 250, 250),
  1128  
  1129  			targetUtilization:   50,
  1130  			expectedUtilization: 28,
  1131  			expectedValue:       numContainersPerPod * 280,
  1132  		},
  1133  	}
  1134  	tc.runTest(t)
  1135  }
  1136  
  1137  // Regression test for https://github.com/kubernetes/kubernetes/issues/83561
  1138  func TestReplicaCalcScaleDownIgnoresDeletionPods_StillRunning(t *testing.T) {
  1139  	tc := replicaCalcTestCase{
  1140  		currentReplicas:      5,
  1141  		expectedReplicas:     3,
  1142  		podReadiness:         []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
  1143  		podPhase:             []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
  1144  		podDeletionTimestamp: []bool{false, false, false, false, false, true, true},
  1145  		resource: &resourceInfo{
  1146  			name:     v1.ResourceCPU,
  1147  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1148  			levels:   [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
  1149  
  1150  			targetUtilization:   50,
  1151  			expectedUtilization: 28,
  1152  			expectedValue:       280,
  1153  		},
  1154  		container: "container2",
  1155  	}
  1156  	tc.runTest(t)
  1157  }
  1158  
  1159  func TestReplicaCalcTolerance(t *testing.T) {
  1160  	tc := replicaCalcTestCase{
  1161  		currentReplicas:  3,
  1162  		expectedReplicas: 3,
  1163  		resource: &resourceInfo{
  1164  			name:     v1.ResourceCPU,
  1165  			requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
  1166  			levels:   makePodMetricLevels(1010, 1030, 1020),
  1167  
  1168  			targetUtilization:   100,
  1169  			expectedUtilization: 102,
  1170  			expectedValue:       numContainersPerPod * 1020,
  1171  		},
  1172  	}
  1173  	tc.runTest(t)
  1174  }
  1175  
  1176  func TestReplicaCalcToleranceCM(t *testing.T) {
  1177  	tc := replicaCalcTestCase{
  1178  		currentReplicas:  3,
  1179  		expectedReplicas: 3,
  1180  		metric: &metricInfo{
  1181  			name:          "qps",
  1182  			levels:        []int64{20000, 21000, 21000},
  1183  			targetUsage:   20000,
  1184  			expectedUsage: 20666,
  1185  			metricType:    podMetric,
  1186  		},
  1187  	}
  1188  	tc.runTest(t)
  1189  }
  1190  
  1191  func TestReplicaCalcToleranceCMObject(t *testing.T) {
  1192  	tc := replicaCalcTestCase{
  1193  		currentReplicas:  3,
  1194  		expectedReplicas: 3,
  1195  		metric: &metricInfo{
  1196  			name:          "qps",
  1197  			levels:        []int64{20666},
  1198  			targetUsage:   20000,
  1199  			expectedUsage: 20666,
  1200  			singleObject: &autoscalingv2.CrossVersionObjectReference{
  1201  				Kind:       "Deployment",
  1202  				APIVersion: "apps/v1",
  1203  				Name:       "some-deployment",
  1204  			},
  1205  		},
  1206  	}
  1207  	tc.runTest(t)
  1208  }
  1209  
  1210  func TestReplicaCalcTolerancePerPodCMObject(t *testing.T) {
  1211  	tc := replicaCalcTestCase{
  1212  		currentReplicas:  4,
  1213  		expectedReplicas: 4,
  1214  		metric: &metricInfo{
  1215  			metricType:        objectPerPodMetric,
  1216  			name:              "qps",
  1217  			levels:            []int64{20166},
  1218  			perPodTargetUsage: 5000,
  1219  			expectedUsage:     5042,
  1220  			singleObject: &autoscalingv2.CrossVersionObjectReference{
  1221  				Kind:       "Deployment",
  1222  				APIVersion: "apps/v1",
  1223  				Name:       "some-deployment",
  1224  			},
  1225  		},
  1226  	}
  1227  	tc.runTest(t)
  1228  }
  1229  
  1230  func TestReplicaCalcToleranceCMExternal(t *testing.T) {
  1231  	tc := replicaCalcTestCase{
  1232  		currentReplicas:  3,
  1233  		expectedReplicas: 3,
  1234  		metric: &metricInfo{
  1235  			name:          "qps",
  1236  			levels:        []int64{8600},
  1237  			targetUsage:   8888,
  1238  			expectedUsage: 8600,
  1239  			selector:      &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
  1240  			metricType:    externalMetric,
  1241  		},
  1242  	}
  1243  	tc.runTest(t)
  1244  }
  1245  
  1246  func TestReplicaCalcTolerancePerPodCMExternal(t *testing.T) {
  1247  	tc := replicaCalcTestCase{
  1248  		currentReplicas:  3,
  1249  		expectedReplicas: 3,
  1250  		metric: &metricInfo{
  1251  			name:              "qps",
  1252  			levels:            []int64{8600},
  1253  			perPodTargetUsage: 2900,
  1254  			expectedUsage:     2867,
  1255  			selector:          &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
  1256  			metricType:        externalPerPodMetric,
  1257  		},
  1258  	}
  1259  	tc.runTest(t)
  1260  }
  1261  
  1262  func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
  1263  	tc := replicaCalcTestCase{
  1264  		currentReplicas:  4,
  1265  		expectedReplicas: 24,
  1266  		resource: &resourceInfo{
  1267  			name:                v1.ResourceCPU,
  1268  			requests:            []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1269  			levels:              makePodMetricLevels(4000, 9500, 3000, 7000, 3200, 2000),
  1270  			targetUtilization:   100,
  1271  			expectedUtilization: 587,
  1272  			expectedValue:       numContainersPerPod * 5875,
  1273  		},
  1274  	}
  1275  	tc.runTest(t)
  1276  }
  1277  
  1278  func TestReplicaCalcMissingMetrics(t *testing.T) {
  1279  	tc := replicaCalcTestCase{
  1280  		currentReplicas:  4,
  1281  		expectedReplicas: 3,
  1282  		resource: &resourceInfo{
  1283  			name:     v1.ResourceCPU,
  1284  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1285  			levels:   makePodMetricLevels(400, 95),
  1286  
  1287  			targetUtilization:   100,
  1288  			expectedUtilization: 24,
  1289  			expectedValue:       495, // numContainersPerPod * 247, for sufficiently large values of 247
  1290  		},
  1291  	}
  1292  	tc.runTest(t)
  1293  }
  1294  
  1295  func TestReplicaCalcEmptyMetrics(t *testing.T) {
  1296  	tc := replicaCalcTestCase{
  1297  		currentReplicas: 4,
  1298  		expectedError:   fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from resource metrics API"),
  1299  		resource: &resourceInfo{
  1300  			name:     v1.ResourceCPU,
  1301  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1302  			levels:   makePodMetricLevels(),
  1303  
  1304  			targetUtilization: 100,
  1305  		},
  1306  	}
  1307  	tc.runTest(t)
  1308  }
  1309  
  1310  func TestReplicaCalcEmptyCPURequest(t *testing.T) {
  1311  	tc := replicaCalcTestCase{
  1312  		currentReplicas: 1,
  1313  		expectedError:   fmt.Errorf("missing request for"),
  1314  		resource: &resourceInfo{
  1315  			name:     v1.ResourceCPU,
  1316  			requests: []resource.Quantity{},
  1317  			levels:   makePodMetricLevels(200),
  1318  
  1319  			targetUtilization: 100,
  1320  		},
  1321  	}
  1322  	tc.runTest(t)
  1323  }
  1324  
  1325  func TestPlainMetricReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
  1326  	tc := replicaCalcTestCase{
  1327  		currentReplicas:  5,
  1328  		expectedReplicas: 5,
  1329  		metric: &metricInfo{
  1330  			name:          "qps",
  1331  			levels:        []int64{20000, 19000, 21000},
  1332  			targetUsage:   20000,
  1333  			expectedUsage: 20000,
  1334  			metricType:    podMetric,
  1335  		},
  1336  	}
  1337  	tc.runTest(t)
  1338  }
  1339  
  1340  func TestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
  1341  	tc := replicaCalcTestCase{
  1342  		currentReplicas:  2,
  1343  		expectedReplicas: 2,
  1344  		resource: &resourceInfo{
  1345  			name:     v1.ResourceCPU,
  1346  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
  1347  			levels:   makePodMetricLevels(1000),
  1348  
  1349  			targetUtilization:   100,
  1350  			expectedUtilization: 100,
  1351  			expectedValue:       numContainersPerPod * 1000,
  1352  		},
  1353  	}
  1354  	tc.runTest(t)
  1355  }
  1356  
  1357  func TestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
  1358  	tc := replicaCalcTestCase{
  1359  		currentReplicas:  2,
  1360  		expectedReplicas: 2,
  1361  		resource: &resourceInfo{
  1362  			name:     v1.ResourceCPU,
  1363  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
  1364  			levels:   makePodMetricLevels(1900),
  1365  
  1366  			targetUtilization:   100,
  1367  			expectedUtilization: 190,
  1368  			expectedValue:       numContainersPerPod * 1900,
  1369  		},
  1370  	}
  1371  	tc.runTest(t)
  1372  }
  1373  
  1374  func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
  1375  	tc := replicaCalcTestCase{
  1376  		currentReplicas:  2,
  1377  		expectedReplicas: 2,
  1378  		resource: &resourceInfo{
  1379  			name:     v1.ResourceCPU,
  1380  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
  1381  			levels:   makePodMetricLevels(600),
  1382  
  1383  			targetUtilization:   100,
  1384  			expectedUtilization: 60,
  1385  			expectedValue:       numContainersPerPod * 600,
  1386  		},
  1387  	}
  1388  	tc.runTest(t)
  1389  }
  1390  
  1391  func TestReplicaCalcMissingMetricsUnreadyChange(t *testing.T) {
  1392  	tc := replicaCalcTestCase{
  1393  		currentReplicas:  3,
  1394  		expectedReplicas: 3,
  1395  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
  1396  		resource: &resourceInfo{
  1397  			name:     v1.ResourceCPU,
  1398  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1399  			levels:   makePodMetricLevels(100, 450),
  1400  
  1401  			targetUtilization:   50,
  1402  			expectedUtilization: 45,
  1403  			expectedValue:       numContainersPerPod * 450,
  1404  		},
  1405  	}
  1406  	tc.runTest(t)
  1407  }
  1408  
  1409  func TestReplicaCalcMissingMetricsHotCpuNoChange(t *testing.T) {
  1410  	tc := replicaCalcTestCase{
  1411  		currentReplicas:  3,
  1412  		expectedReplicas: 3,
  1413  		podStartTime:     []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
  1414  		resource: &resourceInfo{
  1415  			name:     v1.ResourceCPU,
  1416  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1417  			levels:   makePodMetricLevels(100, 450),
  1418  
  1419  			targetUtilization:   50,
  1420  			expectedUtilization: 45,
  1421  			expectedValue:       numContainersPerPod * 450,
  1422  		},
  1423  	}
  1424  	tc.runTest(t)
  1425  }
  1426  
  1427  func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
  1428  	tc := replicaCalcTestCase{
  1429  		currentReplicas:  3,
  1430  		expectedReplicas: 4,
  1431  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
  1432  		resource: &resourceInfo{
  1433  			name:     v1.ResourceCPU,
  1434  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1435  			levels:   makePodMetricLevels(100, 2000),
  1436  
  1437  			targetUtilization:   50,
  1438  			expectedUtilization: 200,
  1439  			expectedValue:       numContainersPerPod * 2000,
  1440  		},
  1441  	}
  1442  	tc.runTest(t)
  1443  }
  1444  
  1445  func TestReplicaCalcMissingMetricsHotCpuScaleUp(t *testing.T) {
  1446  	tc := replicaCalcTestCase{
  1447  		currentReplicas:  3,
  1448  		expectedReplicas: 4,
  1449  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
  1450  		podStartTime:     []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
  1451  		resource: &resourceInfo{
  1452  			name:     v1.ResourceCPU,
  1453  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1454  			levels:   makePodMetricLevels(100, 2000),
  1455  
  1456  			targetUtilization:   50,
  1457  			expectedUtilization: 200,
  1458  			expectedValue:       numContainersPerPod * 2000,
  1459  		},
  1460  	}
  1461  	tc.runTest(t)
  1462  }
  1463  
  1464  func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
  1465  	tc := replicaCalcTestCase{
  1466  		currentReplicas:  4,
  1467  		expectedReplicas: 3,
  1468  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
  1469  		resource: &resourceInfo{
  1470  			name:     v1.ResourceCPU,
  1471  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1472  			levels:   makePodMetricLevels(100, 100, 100),
  1473  
  1474  			targetUtilization:   50,
  1475  			expectedUtilization: 10,
  1476  			expectedValue:       numContainersPerPod * 100,
  1477  		},
  1478  	}
  1479  	tc.runTest(t)
  1480  }
  1481  
  1482  func TestReplicaCalcMissingMetricsScaleDownTargetOver100(t *testing.T) {
  1483  	tc := replicaCalcTestCase{
  1484  		currentReplicas:  4,
  1485  		expectedReplicas: 2,
  1486  		podReadiness:     []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
  1487  		resource: &resourceInfo{
  1488  			name:     v1.ResourceCPU,
  1489  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("2.0"), resource.MustParse("2.0")},
  1490  			levels:   makePodMetricLevels(200, 100, 100),
  1491  
  1492  			targetUtilization:   300,
  1493  			expectedUtilization: 6,
  1494  			expectedValue:       numContainersPerPod * 100,
  1495  		},
  1496  	}
  1497  	tc.runTest(t)
  1498  }
  1499  
  1500  func TestReplicaCalcDuringRollingUpdateWithMaxSurge(t *testing.T) {
  1501  	tc := replicaCalcTestCase{
  1502  		currentReplicas:  2,
  1503  		expectedReplicas: 2,
  1504  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning},
  1505  		resource: &resourceInfo{
  1506  			name:     v1.ResourceCPU,
  1507  			requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
  1508  			levels:   makePodMetricLevels(100, 100),
  1509  
  1510  			targetUtilization:   50,
  1511  			expectedUtilization: 10,
  1512  			expectedValue:       numContainersPerPod * 100,
  1513  		},
  1514  	}
  1515  	tc.runTest(t)
  1516  }
  1517  
  1518  func TestReplicaCalcDuringRollingUpdateWithMaxSurgeCM(t *testing.T) {
  1519  	tc := replicaCalcTestCase{
  1520  		currentReplicas:  2,
  1521  		expectedReplicas: 2,
  1522  		podPhase:         []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning},
  1523  		metric: &metricInfo{
  1524  			name:          "qps",
  1525  			levels:        []int64{10000, 10000},
  1526  			targetUsage:   17000,
  1527  			expectedUsage: 10000,
  1528  			metricType:    podMetric,
  1529  		},
  1530  	}
  1531  	tc.runTest(t)
  1532  }
  1533  
  1534  // TestComputedToleranceAlgImplementation is a regression test which
  1535  // back-calculates a minimal percentage for downscaling based on a small percentage
  1536  // increase in pod utilization which is calibrated against the tolerance value.
  1537  func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
  1538  
  1539  	startPods := int32(10)
  1540  	// 150 mCPU per pod.
  1541  	totalUsedCPUOfAllPods := int64(startPods * 150)
  1542  	// Each pod starts out asking for 2X what is really needed.
  1543  	// This means we will have a 50% ratio of used/requested
  1544  	totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
  1545  	requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
  1546  	// Spread the amount we ask over 10 pods.  We can add some jitter later in reportedLevels.
  1547  	perPodRequested := totalRequestedCPUOfAllPods / startPods
  1548  
  1549  	// Force a minimal scaling event by satisfying  (tolerance < 1 - resourcesUsedRatio).
  1550  	target := math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .01
  1551  	finalCPUPercentTarget := int32(target * 100)
  1552  	resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
  1553  
  1554  	// i.e. .60 * 20 -> scaled down expectation.
  1555  	finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
  1556  
  1557  	// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
  1558  	tc := replicaCalcTestCase{
  1559  		currentReplicas:  startPods,
  1560  		expectedReplicas: finalPods,
  1561  		resource: &resourceInfo{
  1562  			name: v1.ResourceCPU,
  1563  			levels: makePodMetricLevels(
  1564  				totalUsedCPUOfAllPods/10,
  1565  				totalUsedCPUOfAllPods/10,
  1566  				totalUsedCPUOfAllPods/10,
  1567  				totalUsedCPUOfAllPods/10,
  1568  				totalUsedCPUOfAllPods/10,
  1569  				totalUsedCPUOfAllPods/10,
  1570  				totalUsedCPUOfAllPods/10,
  1571  				totalUsedCPUOfAllPods/10,
  1572  				totalUsedCPUOfAllPods/10,
  1573  				totalUsedCPUOfAllPods/10,
  1574  			),
  1575  			requests: []resource.Quantity{
  1576  				resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
  1577  				resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
  1578  				resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
  1579  				resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
  1580  				resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
  1581  				resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
  1582  				resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
  1583  				resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
  1584  				resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
  1585  				resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
  1586  			},
  1587  
  1588  			targetUtilization:   finalCPUPercentTarget,
  1589  			expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
  1590  			expectedValue:       numContainersPerPod * totalUsedCPUOfAllPods / 10,
  1591  		},
  1592  	}
  1593  
  1594  	tc.runTest(t)
  1595  
  1596  	// Reuse the data structure above, now testing "unscaling".
  1597  	// Now, we test that no scaling happens if we are in a very close margin to the tolerance
  1598  	target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
  1599  	finalCPUPercentTarget = int32(target * 100)
  1600  	tc.resource.targetUtilization = finalCPUPercentTarget
  1601  	tc.currentReplicas = startPods
  1602  	tc.expectedReplicas = startPods
  1603  	tc.runTest(t)
  1604  }
  1605  
  1606  func TestGroupPods(t *testing.T) {
  1607  	tests := []struct {
  1608  		name                string
  1609  		pods                []*v1.Pod
  1610  		metrics             metricsclient.PodMetricsInfo
  1611  		resource            v1.ResourceName
  1612  		expectReadyPodCount int
  1613  		expectUnreadyPods   sets.String
  1614  		expectMissingPods   sets.String
  1615  		expectIgnoredPods   sets.String
  1616  	}{
  1617  		{
  1618  			name:                "void",
  1619  			pods:                []*v1.Pod{},
  1620  			metrics:             metricsclient.PodMetricsInfo{},
  1621  			resource:            v1.ResourceCPU,
  1622  			expectReadyPodCount: 0,
  1623  			expectUnreadyPods:   sets.NewString(),
  1624  			expectMissingPods:   sets.NewString(),
  1625  			expectIgnoredPods:   sets.NewString(),
  1626  		}, {
  1627  			name: "count in a ready pod - memory",
  1628  			pods: []*v1.Pod{
  1629  				{
  1630  					ObjectMeta: metav1.ObjectMeta{
  1631  						Name: "bentham",
  1632  					},
  1633  					Status: v1.PodStatus{
  1634  						Phase: v1.PodSucceeded,
  1635  					},
  1636  				},
  1637  			},
  1638  			metrics: metricsclient.PodMetricsInfo{
  1639  				"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: time.Minute},
  1640  			},
  1641  			resource:            v1.ResourceMemory,
  1642  			expectReadyPodCount: 1,
  1643  			expectUnreadyPods:   sets.NewString(),
  1644  			expectMissingPods:   sets.NewString(),
  1645  			expectIgnoredPods:   sets.NewString(),
  1646  		}, {
  1647  			name: "unready a pod without ready condition - CPU",
  1648  			pods: []*v1.Pod{
  1649  				{
  1650  					ObjectMeta: metav1.ObjectMeta{
  1651  						Name: "lucretius",
  1652  					},
  1653  					Status: v1.PodStatus{
  1654  						Phase: v1.PodSucceeded,
  1655  						StartTime: &metav1.Time{
  1656  							Time: time.Now(),
  1657  						},
  1658  					},
  1659  				},
  1660  			},
  1661  			metrics: metricsclient.PodMetricsInfo{
  1662  				"lucretius": metricsclient.PodMetric{Value: 1},
  1663  			},
  1664  			resource:            v1.ResourceCPU,
  1665  			expectReadyPodCount: 0,
  1666  			expectUnreadyPods:   sets.NewString("lucretius"),
  1667  			expectMissingPods:   sets.NewString(),
  1668  			expectIgnoredPods:   sets.NewString(),
  1669  		}, {
  1670  			name: "count in a ready pod with fresh metrics during initialization period - CPU",
  1671  			pods: []*v1.Pod{
  1672  				{
  1673  					ObjectMeta: metav1.ObjectMeta{
  1674  						Name: "bentham",
  1675  					},
  1676  					Status: v1.PodStatus{
  1677  						Phase: v1.PodSucceeded,
  1678  						StartTime: &metav1.Time{
  1679  							Time: time.Now().Add(-1 * time.Minute),
  1680  						},
  1681  						Conditions: []v1.PodCondition{
  1682  							{
  1683  								Type:               v1.PodReady,
  1684  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-30 * time.Second)},
  1685  								Status:             v1.ConditionTrue,
  1686  							},
  1687  						},
  1688  					},
  1689  				},
  1690  			},
  1691  			metrics: metricsclient.PodMetricsInfo{
  1692  				"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: 30 * time.Second},
  1693  			},
  1694  			resource:            v1.ResourceCPU,
  1695  			expectReadyPodCount: 1,
  1696  			expectUnreadyPods:   sets.NewString(),
  1697  			expectMissingPods:   sets.NewString(),
  1698  			expectIgnoredPods:   sets.NewString(),
  1699  		}, {
  1700  			name: "unready a ready pod without fresh metrics during initialization period - CPU",
  1701  			pods: []*v1.Pod{
  1702  				{
  1703  					ObjectMeta: metav1.ObjectMeta{
  1704  						Name: "bentham",
  1705  					},
  1706  					Status: v1.PodStatus{
  1707  						Phase: v1.PodSucceeded,
  1708  						StartTime: &metav1.Time{
  1709  							Time: time.Now().Add(-1 * time.Minute),
  1710  						},
  1711  						Conditions: []v1.PodCondition{
  1712  							{
  1713  								Type:               v1.PodReady,
  1714  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-30 * time.Second)},
  1715  								Status:             v1.ConditionTrue,
  1716  							},
  1717  						},
  1718  					},
  1719  				},
  1720  			},
  1721  			metrics: metricsclient.PodMetricsInfo{
  1722  				"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now(), Window: 60 * time.Second},
  1723  			},
  1724  			resource:            v1.ResourceCPU,
  1725  			expectReadyPodCount: 0,
  1726  			expectUnreadyPods:   sets.NewString("bentham"),
  1727  			expectMissingPods:   sets.NewString(),
  1728  			expectIgnoredPods:   sets.NewString(),
  1729  		}, {
  1730  			name: "unready an unready pod during initialization period - CPU",
  1731  			pods: []*v1.Pod{
  1732  				{
  1733  					ObjectMeta: metav1.ObjectMeta{
  1734  						Name: "lucretius",
  1735  					},
  1736  					Status: v1.PodStatus{
  1737  						Phase: v1.PodSucceeded,
  1738  						StartTime: &metav1.Time{
  1739  							Time: time.Now().Add(-10 * time.Minute),
  1740  						},
  1741  						Conditions: []v1.PodCondition{
  1742  							{
  1743  								Type:               v1.PodReady,
  1744  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-9*time.Minute - 54*time.Second)},
  1745  								Status:             v1.ConditionFalse,
  1746  							},
  1747  						},
  1748  					},
  1749  				},
  1750  			},
  1751  			metrics: metricsclient.PodMetricsInfo{
  1752  				"lucretius": metricsclient.PodMetric{Value: 1},
  1753  			},
  1754  			resource:            v1.ResourceCPU,
  1755  			expectReadyPodCount: 0,
  1756  			expectUnreadyPods:   sets.NewString("lucretius"),
  1757  			expectMissingPods:   sets.NewString(),
  1758  			expectIgnoredPods:   sets.NewString(),
  1759  		}, {
  1760  			name: "count in a ready pod without fresh metrics after initialization period - CPU",
  1761  			pods: []*v1.Pod{
  1762  				{
  1763  					ObjectMeta: metav1.ObjectMeta{
  1764  						Name: "bentham",
  1765  					},
  1766  					Status: v1.PodStatus{
  1767  						Phase: v1.PodSucceeded,
  1768  						StartTime: &metav1.Time{
  1769  							Time: time.Now().Add(-3 * time.Minute),
  1770  						},
  1771  						Conditions: []v1.PodCondition{
  1772  							{
  1773  								Type:               v1.PodReady,
  1774  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
  1775  								Status:             v1.ConditionTrue,
  1776  							},
  1777  						},
  1778  					},
  1779  				},
  1780  			},
  1781  			metrics: metricsclient.PodMetricsInfo{
  1782  				"bentham": metricsclient.PodMetric{Value: 1, Timestamp: time.Now().Add(-2 * time.Minute), Window: time.Minute},
  1783  			},
  1784  			resource:            v1.ResourceCPU,
  1785  			expectReadyPodCount: 1,
  1786  			expectUnreadyPods:   sets.NewString(),
  1787  			expectMissingPods:   sets.NewString(),
  1788  			expectIgnoredPods:   sets.NewString(),
  1789  		}, {
  1790  			name: "count in an unready pod that was ready after initialization period - CPU",
  1791  			pods: []*v1.Pod{
  1792  				{
  1793  					ObjectMeta: metav1.ObjectMeta{
  1794  						Name: "lucretius",
  1795  					},
  1796  					Status: v1.PodStatus{
  1797  						Phase: v1.PodSucceeded,
  1798  						StartTime: &metav1.Time{
  1799  							Time: time.Now().Add(-10 * time.Minute),
  1800  						},
  1801  						Conditions: []v1.PodCondition{
  1802  							{
  1803  								Type:               v1.PodReady,
  1804  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-9 * time.Minute)},
  1805  								Status:             v1.ConditionFalse,
  1806  							},
  1807  						},
  1808  					},
  1809  				},
  1810  			},
  1811  			metrics: metricsclient.PodMetricsInfo{
  1812  				"lucretius": metricsclient.PodMetric{Value: 1},
  1813  			},
  1814  			resource:            v1.ResourceCPU,
  1815  			expectReadyPodCount: 1,
  1816  			expectUnreadyPods:   sets.NewString(),
  1817  			expectMissingPods:   sets.NewString(),
  1818  			expectIgnoredPods:   sets.NewString(),
  1819  		}, {
  1820  			name: "unready pod that has never been ready after initialization period - CPU",
  1821  			pods: []*v1.Pod{
  1822  				{
  1823  					ObjectMeta: metav1.ObjectMeta{
  1824  						Name: "lucretius",
  1825  					},
  1826  					Status: v1.PodStatus{
  1827  						Phase: v1.PodSucceeded,
  1828  						StartTime: &metav1.Time{
  1829  							Time: time.Now().Add(-10 * time.Minute),
  1830  						},
  1831  						Conditions: []v1.PodCondition{
  1832  							{
  1833  								Type:               v1.PodReady,
  1834  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-9*time.Minute - 50*time.Second)},
  1835  								Status:             v1.ConditionFalse,
  1836  							},
  1837  						},
  1838  					},
  1839  				},
  1840  			},
  1841  			metrics: metricsclient.PodMetricsInfo{
  1842  				"lucretius": metricsclient.PodMetric{Value: 1},
  1843  			},
  1844  			resource:            v1.ResourceCPU,
  1845  			expectReadyPodCount: 1,
  1846  			expectUnreadyPods:   sets.NewString(),
  1847  			expectMissingPods:   sets.NewString(),
  1848  			expectIgnoredPods:   sets.NewString(),
  1849  		}, {
  1850  			name: "a missing pod",
  1851  			pods: []*v1.Pod{
  1852  				{
  1853  					ObjectMeta: metav1.ObjectMeta{
  1854  						Name: "epicurus",
  1855  					},
  1856  					Status: v1.PodStatus{
  1857  						Phase: v1.PodSucceeded,
  1858  						StartTime: &metav1.Time{
  1859  							Time: time.Now().Add(-3 * time.Minute),
  1860  						},
  1861  					},
  1862  				},
  1863  			},
  1864  			metrics:             metricsclient.PodMetricsInfo{},
  1865  			resource:            v1.ResourceCPU,
  1866  			expectReadyPodCount: 0,
  1867  			expectUnreadyPods:   sets.NewString(),
  1868  			expectMissingPods:   sets.NewString("epicurus"),
  1869  			expectIgnoredPods:   sets.NewString(),
  1870  		}, {
  1871  			name: "several pods",
  1872  			pods: []*v1.Pod{
  1873  				{
  1874  					ObjectMeta: metav1.ObjectMeta{
  1875  						Name: "lucretius",
  1876  					},
  1877  					Status: v1.PodStatus{
  1878  						Phase: v1.PodSucceeded,
  1879  						StartTime: &metav1.Time{
  1880  							Time: time.Now(),
  1881  						},
  1882  					},
  1883  				},
  1884  				{
  1885  					ObjectMeta: metav1.ObjectMeta{
  1886  						Name: "niccolo",
  1887  					},
  1888  					Status: v1.PodStatus{
  1889  						Phase: v1.PodSucceeded,
  1890  						StartTime: &metav1.Time{
  1891  							Time: time.Now().Add(-3 * time.Minute),
  1892  						},
  1893  						Conditions: []v1.PodCondition{
  1894  							{
  1895  								Type:               v1.PodReady,
  1896  								LastTransitionTime: metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
  1897  								Status:             v1.ConditionTrue,
  1898  							},
  1899  						},
  1900  					},
  1901  				},
  1902  				{
  1903  					ObjectMeta: metav1.ObjectMeta{
  1904  						Name: "epicurus",
  1905  					},
  1906  					Status: v1.PodStatus{
  1907  						Phase: v1.PodSucceeded,
  1908  						StartTime: &metav1.Time{
  1909  							Time: time.Now().Add(-3 * time.Minute),
  1910  						},
  1911  					},
  1912  				},
  1913  			},
  1914  			metrics: metricsclient.PodMetricsInfo{
  1915  				"lucretius": metricsclient.PodMetric{Value: 1},
  1916  				"niccolo":   metricsclient.PodMetric{Value: 1},
  1917  			},
  1918  			resource:            v1.ResourceCPU,
  1919  			expectReadyPodCount: 1,
  1920  			expectUnreadyPods:   sets.NewString("lucretius"),
  1921  			expectMissingPods:   sets.NewString("epicurus"),
  1922  			expectIgnoredPods:   sets.NewString(),
  1923  		}, {
  1924  			name: "pending pods are unreadied",
  1925  			pods: []*v1.Pod{
  1926  				{
  1927  					ObjectMeta: metav1.ObjectMeta{
  1928  						Name: "unscheduled",
  1929  					},
  1930  					Status: v1.PodStatus{
  1931  						Phase: v1.PodPending,
  1932  					},
  1933  				},
  1934  			},
  1935  			metrics:             metricsclient.PodMetricsInfo{},
  1936  			resource:            v1.ResourceCPU,
  1937  			expectReadyPodCount: 0,
  1938  			expectUnreadyPods:   sets.NewString("unscheduled"),
  1939  			expectMissingPods:   sets.NewString(),
  1940  			expectIgnoredPods:   sets.NewString(),
  1941  		}, {
  1942  			name: "ignore pods with deletion timestamps",
  1943  			pods: []*v1.Pod{
  1944  				{
  1945  					ObjectMeta: metav1.ObjectMeta{
  1946  						Name:              "deleted",
  1947  						DeletionTimestamp: &metav1.Time{Time: time.Unix(1, 0)},
  1948  					},
  1949  					Status: v1.PodStatus{
  1950  						Phase: v1.PodPending,
  1951  					},
  1952  				},
  1953  			},
  1954  			metrics: metricsclient.PodMetricsInfo{
  1955  				"deleted": metricsclient.PodMetric{Value: 1},
  1956  			},
  1957  			resource:            v1.ResourceCPU,
  1958  			expectReadyPodCount: 0,
  1959  			expectUnreadyPods:   sets.NewString(),
  1960  			expectMissingPods:   sets.NewString(),
  1961  			expectIgnoredPods:   sets.NewString("deleted"),
  1962  		}, {
  1963  			name: "ignore pods in a failed state",
  1964  			pods: []*v1.Pod{
  1965  				{
  1966  					ObjectMeta: metav1.ObjectMeta{
  1967  						Name: "failed",
  1968  					},
  1969  					Status: v1.PodStatus{
  1970  						Phase: v1.PodFailed,
  1971  					},
  1972  				},
  1973  			},
  1974  			metrics: metricsclient.PodMetricsInfo{
  1975  				"failed": metricsclient.PodMetric{Value: 1},
  1976  			},
  1977  			resource:            v1.ResourceCPU,
  1978  			expectReadyPodCount: 0,
  1979  			expectUnreadyPods:   sets.NewString(),
  1980  			expectMissingPods:   sets.NewString(),
  1981  			expectIgnoredPods:   sets.NewString("failed"),
  1982  		},
  1983  	}
  1984  	for _, tc := range tests {
  1985  		t.Run(tc.name, func(t *testing.T) {
  1986  			readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
  1987  			if readyPodCount != tc.expectReadyPodCount {
  1988  				t.Errorf("%s got readyPodCount %d, expected %d", tc.name, readyPodCount, tc.expectReadyPodCount)
  1989  			}
  1990  			if !unreadyPods.Equal(tc.expectUnreadyPods) {
  1991  				t.Errorf("%s got unreadyPods %v, expected %v", tc.name, unreadyPods, tc.expectUnreadyPods)
  1992  			}
  1993  			if !missingPods.Equal(tc.expectMissingPods) {
  1994  				t.Errorf("%s got missingPods %v, expected %v", tc.name, missingPods, tc.expectMissingPods)
  1995  			}
  1996  			if !ignoredPods.Equal(tc.expectIgnoredPods) {
  1997  				t.Errorf("%s got ignoredPods %v, expected %v", tc.name, ignoredPods, tc.expectIgnoredPods)
  1998  			}
  1999  		})
  2000  	}
  2001  }
  2002  
  2003  func TestCalculatePodRequests(t *testing.T) {
  2004  	containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
  2005  	testPod := "test-pod"
  2006  
  2007  	tests := []struct {
  2008  		name             string
  2009  		pods             []*v1.Pod
  2010  		container        string
  2011  		resource         v1.ResourceName
  2012  		expectedRequests map[string]int64
  2013  		expectedError    error
  2014  	}{
  2015  		{
  2016  			name:             "void",
  2017  			pods:             []*v1.Pod{},
  2018  			container:        "",
  2019  			resource:         v1.ResourceCPU,
  2020  			expectedRequests: map[string]int64{},
  2021  			expectedError:    nil,
  2022  		},
  2023  		{
  2024  			name: "pod with regular containers",
  2025  			pods: []*v1.Pod{{
  2026  				ObjectMeta: metav1.ObjectMeta{
  2027  					Name:      testPod,
  2028  					Namespace: testNamespace,
  2029  				},
  2030  				Spec: v1.PodSpec{
  2031  					Containers: []v1.Container{
  2032  						{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
  2033  						{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
  2034  					},
  2035  				},
  2036  			}},
  2037  			container:        "",
  2038  			resource:         v1.ResourceCPU,
  2039  			expectedRequests: map[string]int64{testPod: 150},
  2040  			expectedError:    nil,
  2041  		},
  2042  		{
  2043  			name: "calculate requests with special container",
  2044  			pods: []*v1.Pod{{
  2045  				ObjectMeta: metav1.ObjectMeta{
  2046  					Name:      testPod,
  2047  					Namespace: testNamespace,
  2048  				},
  2049  				Spec: v1.PodSpec{
  2050  					Containers: []v1.Container{
  2051  						{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
  2052  						{Name: "container2", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
  2053  					},
  2054  				},
  2055  			}},
  2056  			container:        "container1",
  2057  			resource:         v1.ResourceCPU,
  2058  			expectedRequests: map[string]int64{testPod: 100},
  2059  			expectedError:    nil,
  2060  		},
  2061  		{
  2062  			name: "container missing requests",
  2063  			pods: []*v1.Pod{{
  2064  				ObjectMeta: metav1.ObjectMeta{
  2065  					Name:      testPod,
  2066  					Namespace: testNamespace,
  2067  				},
  2068  				Spec: v1.PodSpec{
  2069  					Containers: []v1.Container{
  2070  						{Name: "container1"},
  2071  					},
  2072  				},
  2073  			}},
  2074  			container:        "",
  2075  			resource:         v1.ResourceCPU,
  2076  			expectedRequests: nil,
  2077  			expectedError:    fmt.Errorf("missing request for %s in container %s of Pod %s", v1.ResourceCPU, "container1", testPod),
  2078  		},
  2079  		{
  2080  			name: "pod with restartable init containers",
  2081  			pods: []*v1.Pod{{
  2082  				ObjectMeta: metav1.ObjectMeta{
  2083  					Name:      testPod,
  2084  					Namespace: testNamespace,
  2085  				},
  2086  				Spec: v1.PodSpec{
  2087  					Containers: []v1.Container{
  2088  						{Name: "container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI)}}},
  2089  					},
  2090  					InitContainers: []v1.Container{
  2091  						{Name: "init-container1", Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI)}}},
  2092  						{Name: "restartable-container1", RestartPolicy: &containerRestartPolicyAlways, Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(50, resource.DecimalSI)}}},
  2093  					},
  2094  				},
  2095  			}},
  2096  			container:        "",
  2097  			resource:         v1.ResourceCPU,
  2098  			expectedRequests: map[string]int64{testPod: 150},
  2099  			expectedError:    nil,
  2100  		},
  2101  	}
  2102  
  2103  	for _, tc := range tests {
  2104  		t.Run(tc.name, func(t *testing.T) {
  2105  			requests, err := calculatePodRequests(tc.pods, tc.container, tc.resource)
  2106  			assert.Equal(t, tc.expectedRequests, requests, "requests should be as expected")
  2107  			assert.Equal(t, tc.expectedError, err, "error should be as expected")
  2108  		})
  2109  	}
  2110  }
  2111  

View as plain text