1
16
17 package autoscaling
18
19 import (
20 "context"
21 "time"
22
23 autoscalingv2 "k8s.io/api/autoscaling/v2"
24 "k8s.io/kubernetes/test/e2e/feature"
25 "k8s.io/kubernetes/test/e2e/framework"
26 e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
27 admissionapi "k8s.io/pod-security-admission/api"
28
29 "github.com/onsi/ginkgo/v2"
30 "github.com/onsi/gomega"
31 )
32
33 var _ = SIGDescribe(feature.HPA, framework.WithSerial(), framework.WithSlow(), "Horizontal pod autoscaling (non-default behavior)", func() {
34 f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
35 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
36
37 hpaName := "consumer"
38
39 podCPURequest := 500
40 targetCPUUtilizationPercent := 25
41
42
43
44
45
46
47 usageForReplicas := func(replicas int) int {
48 usagePerReplica := podCPURequest * targetCPUUtilizationPercent / 100
49 return replicas*usagePerReplica - usagePerReplica/2
50 }
51
52 fullWindowOfNewUsage := 30 * time.Second
53 windowWithOldUsagePasses := 30 * time.Second
54 newPodMetricsDelay := 15 * time.Second
55 metricsAvailableDelay := fullWindowOfNewUsage + windowWithOldUsagePasses + newPodMetricsDelay
56
57 hpaReconciliationInterval := 15 * time.Second
58 actuationDelay := 10 * time.Second
59 maxHPAReactionTime := metricsAvailableDelay + hpaReconciliationInterval + actuationDelay
60
61 maxConsumeCPUDelay := 30 * time.Second
62 waitForReplicasPollInterval := 20 * time.Second
63 maxResourceConsumerDelay := maxConsumeCPUDelay + waitForReplicasPollInterval
64
65 waitBuffer := 1 * time.Minute
66
67 ginkgo.Describe("with short downscale stabilization window", func() {
68 ginkgo.It("should scale down soon after the stabilization period", func(ctx context.Context) {
69 ginkgo.By("setting up resource consumer and HPA")
70 initPods := 1
71 initCPUUsageTotal := usageForReplicas(initPods)
72 upScaleStabilization := 0 * time.Minute
73 downScaleStabilization := 1 * time.Minute
74
75 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
76 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
77 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
78 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
79 )
80 ginkgo.DeferCleanup(rc.CleanUp)
81
82 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
83 rc, int32(targetCPUUtilizationPercent), 1, 5,
84 e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
85 )
86 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
87
88
89
90 ginkgo.By("triggering scale up to record a recommendation")
91 rc.ConsumeCPU(usageForReplicas(3))
92 rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
93
94 ginkgo.By("triggering scale down by lowering consumption")
95 rc.ConsumeCPU(usageForReplicas(2))
96 waitStart := time.Now()
97 rc.WaitForReplicas(ctx, 2, downScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
98 timeWaited := time.Now().Sub(waitStart)
99
100 ginkgo.By("verifying time waited for a scale down")
101 framework.Logf("time waited for scale down: %s", timeWaited)
102 gomega.Expect(timeWaited).To(gomega.BeNumerically(">", downScaleStabilization), "waited %s, wanted more than %s", timeWaited, downScaleStabilization)
103 deadline := downScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
104 gomega.Expect(timeWaited).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaited, deadline)
105 })
106 })
107
108 ginkgo.Describe("with long upscale stabilization window", func() {
109 ginkgo.It("should scale up only after the stabilization period", func(ctx context.Context) {
110 ginkgo.By("setting up resource consumer and HPA")
111 initPods := 2
112 initCPUUsageTotal := usageForReplicas(initPods)
113 upScaleStabilization := 3 * time.Minute
114 downScaleStabilization := 0 * time.Minute
115
116 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
117 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
118 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
119 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
120 )
121 ginkgo.DeferCleanup(rc.CleanUp)
122
123 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
124 rc, int32(targetCPUUtilizationPercent), 1, 10,
125 e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
126 )
127 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
128
129
130
131 ginkgo.By("triggering scale down to record a recommendation")
132 rc.ConsumeCPU(usageForReplicas(1))
133 rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
134
135 ginkgo.By("triggering scale up by increasing consumption")
136 rc.ConsumeCPU(usageForReplicas(3))
137 waitStart := time.Now()
138 rc.WaitForReplicas(ctx, 3, upScaleStabilization+maxHPAReactionTime+maxResourceConsumerDelay+waitBuffer)
139 timeWaited := time.Now().Sub(waitStart)
140
141 ginkgo.By("verifying time waited for a scale up")
142 framework.Logf("time waited for scale up: %s", timeWaited)
143 gomega.Expect(timeWaited).To(gomega.BeNumerically(">", upScaleStabilization), "waited %s, wanted more than %s", timeWaited, upScaleStabilization)
144 deadline := upScaleStabilization + maxHPAReactionTime + maxResourceConsumerDelay
145 gomega.Expect(timeWaited).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaited, deadline)
146 })
147 })
148
149 ginkgo.Describe("with autoscaling disabled", func() {
150 ginkgo.It("shouldn't scale up", func(ctx context.Context) {
151 ginkgo.By("setting up resource consumer and HPA")
152 initPods := 1
153 initCPUUsageTotal := usageForReplicas(initPods)
154
155 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
156 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
157 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
158 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
159 )
160 ginkgo.DeferCleanup(rc.CleanUp)
161
162 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
163 rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleUpDirection),
164 )
165 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
166
167 waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
168
169 ginkgo.By("trying to trigger scale up")
170 rc.ConsumeCPU(usageForReplicas(8))
171 waitStart := time.Now()
172
173 rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
174 timeWaited := time.Now().Sub(waitStart)
175
176 ginkgo.By("verifying time waited for a scale up")
177 framework.Logf("time waited for scale up: %s", timeWaited)
178 gomega.Expect(timeWaited).To(gomega.BeNumerically(">", waitDeadline), "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
179
180 ginkgo.By("verifying number of replicas")
181 replicas, err := rc.GetReplicas(ctx)
182 framework.ExpectNoError(err)
183 gomega.Expect(replicas).To(gomega.BeNumerically("==", initPods), "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
184 })
185
186 ginkgo.It("shouldn't scale down", func(ctx context.Context) {
187 ginkgo.By("setting up resource consumer and HPA")
188 initPods := 3
189 initCPUUsageTotal := usageForReplicas(initPods)
190
191 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
192 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
193 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
194 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
195 )
196 ginkgo.DeferCleanup(rc.CleanUp)
197
198 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
199 rc, int32(targetCPUUtilizationPercent), 1, 10, e2eautoscaling.HPABehaviorWithScaleDisabled(e2eautoscaling.ScaleDownDirection),
200 )
201 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
202
203 defaultDownscaleStabilisation := 5 * time.Minute
204 waitDeadline := maxHPAReactionTime + maxResourceConsumerDelay + defaultDownscaleStabilisation
205
206 ginkgo.By("trying to trigger scale down")
207 rc.ConsumeCPU(usageForReplicas(1))
208 waitStart := time.Now()
209
210 rc.EnsureDesiredReplicasInRange(ctx, initPods, initPods, waitDeadline, hpa.Name)
211 timeWaited := time.Now().Sub(waitStart)
212
213 ginkgo.By("verifying time waited for a scale down")
214 framework.Logf("time waited for scale down: %s", timeWaited)
215 gomega.Expect(timeWaited).To(gomega.BeNumerically(">", waitDeadline), "waited %s, wanted to wait more than %s", timeWaited, waitDeadline)
216
217 ginkgo.By("verifying number of replicas")
218 replicas, err := rc.GetReplicas(ctx)
219 framework.ExpectNoError(err)
220 gomega.Expect(replicas).To(gomega.BeNumerically("==", initPods), "had %s replicas, still have %s replicas after time deadline", initPods, replicas)
221 })
222
223 })
224
225 ginkgo.Describe("with scale limited by number of Pods rate", func() {
226 ginkgo.It("should scale up no more than given number of Pods per minute", func(ctx context.Context) {
227 ginkgo.By("setting up resource consumer and HPA")
228 initPods := 1
229 initCPUUsageTotal := usageForReplicas(initPods)
230 limitWindowLength := 1 * time.Minute
231 podsLimitPerMinute := 1
232
233 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
234 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
235 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
236 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
237 )
238 ginkgo.DeferCleanup(rc.CleanUp)
239
240 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
241 rc, int32(targetCPUUtilizationPercent), 1, 10,
242 e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleUpDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
243 )
244 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
245
246 ginkgo.By("triggering scale up by increasing consumption")
247 rc.ConsumeCPU(usageForReplicas(3))
248
249 waitStart := time.Now()
250 rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
251 timeWaitedFor2 := time.Now().Sub(waitStart)
252
253 waitStart = time.Now()
254 rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
255 timeWaitedFor3 := time.Now().Sub(waitStart)
256
257 ginkgo.By("verifying time waited for a scale up to 2 replicas")
258 deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
259
260 gomega.Expect(timeWaitedFor2).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor2, deadline)
261
262 ginkgo.By("verifying time waited for a scale up to 3 replicas")
263
264 gomega.Expect(timeWaitedFor3).To(gomega.BeNumerically(">", limitWindowLength), "waited %s, wanted to wait more than %s", timeWaitedFor3, limitWindowLength)
265 gomega.Expect(timeWaitedFor3).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor3, deadline)
266 })
267
268 ginkgo.It("should scale down no more than given number of Pods per minute", func(ctx context.Context) {
269 ginkgo.By("setting up resource consumer and HPA")
270 initPods := 3
271 initCPUUsageTotal := usageForReplicas(initPods)
272 limitWindowLength := 1 * time.Minute
273 podsLimitPerMinute := 1
274
275 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
276 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
277 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
278 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
279 )
280 ginkgo.DeferCleanup(rc.CleanUp)
281
282 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
283 rc, int32(targetCPUUtilizationPercent), 1, 10,
284 e2eautoscaling.HPABehaviorWithScaleLimitedByNumberOfPods(e2eautoscaling.ScaleDownDirection, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds())),
285 )
286 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
287
288 ginkgo.By("triggering scale down by lowering consumption")
289 rc.ConsumeCPU(usageForReplicas(1))
290
291 waitStart := time.Now()
292 rc.WaitForReplicas(ctx, 2, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
293 timeWaitedFor2 := time.Now().Sub(waitStart)
294
295 waitStart = time.Now()
296 rc.WaitForReplicas(ctx, 1, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
297 timeWaitedFor1 := time.Now().Sub(waitStart)
298
299 ginkgo.By("verifying time waited for a scale down to 2 replicas")
300 deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
301
302 gomega.Expect(timeWaitedFor2).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor2, deadline)
303
304 ginkgo.By("verifying time waited for a scale down to 1 replicas")
305
306 gomega.Expect(timeWaitedFor1).To(gomega.BeNumerically(">", limitWindowLength), "waited %s, wanted more than %s", timeWaitedFor1, limitWindowLength)
307 gomega.Expect(timeWaitedFor1).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor1, deadline)
308 })
309 })
310
311 ginkgo.Describe("with scale limited by percentage", func() {
312 ginkgo.It("should scale up no more than given percentage of current Pods per minute", func(ctx context.Context) {
313 ginkgo.By("setting up resource consumer and HPA")
314 initPods := 2
315 initCPUUsageTotal := usageForReplicas(initPods)
316 limitWindowLength := 1 * time.Minute
317 percentageLimitPerMinute := 50
318
319 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
320 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
321 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
322 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
323 )
324 ginkgo.DeferCleanup(rc.CleanUp)
325
326 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
327 rc, int32(targetCPUUtilizationPercent), 1, 10,
328 e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleUpDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
329 )
330 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
331
332 ginkgo.By("triggering scale up by increasing consumption")
333 rc.ConsumeCPU(usageForReplicas(8))
334
335 waitStart := time.Now()
336 rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
337 timeWaitedFor3 := time.Now().Sub(waitStart)
338
339 waitStart = time.Now()
340
341 rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
342 timeWaitedFor5 := time.Now().Sub(waitStart)
343
344 ginkgo.By("verifying time waited for a scale up to 3 replicas")
345 deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
346
347 gomega.Expect(timeWaitedFor3).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor3, deadline)
348
349 ginkgo.By("verifying time waited for a scale up to 5 replicas")
350
351 gomega.Expect(timeWaitedFor5).To(gomega.BeNumerically(">", limitWindowLength), "waited %s, wanted to wait more than %s", timeWaitedFor5, limitWindowLength)
352 gomega.Expect(timeWaitedFor5).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor5, deadline)
353 })
354
355 ginkgo.It("should scale down no more than given percentage of current Pods per minute", func(ctx context.Context) {
356 ginkgo.By("setting up resource consumer and HPA")
357 initPods := 7
358 initCPUUsageTotal := usageForReplicas(initPods)
359 limitWindowLength := 1 * time.Minute
360 percentageLimitPerMinute := 25
361
362 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
363 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
364 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
365 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
366 )
367 ginkgo.DeferCleanup(rc.CleanUp)
368
369 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
370 rc, int32(targetCPUUtilizationPercent), 1, 10,
371 e2eautoscaling.HPABehaviorWithScaleLimitedByPercentage(e2eautoscaling.ScaleDownDirection, int32(percentageLimitPerMinute), int32(limitWindowLength.Seconds())),
372 )
373 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
374
375 ginkgo.By("triggering scale down by lowering consumption")
376 rc.ConsumeCPU(usageForReplicas(1))
377
378 waitStart := time.Now()
379 rc.WaitForReplicas(ctx, 5, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
380 timeWaitedFor5 := time.Now().Sub(waitStart)
381
382 waitStart = time.Now()
383
384 rc.WaitForReplicas(ctx, 3, maxHPAReactionTime+maxResourceConsumerDelay+limitWindowLength)
385 timeWaitedFor3 := time.Now().Sub(waitStart)
386
387 ginkgo.By("verifying time waited for a scale down to 5 replicas")
388 deadline := limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay
389
390 gomega.Expect(timeWaitedFor5).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor5, deadline)
391
392 ginkgo.By("verifying time waited for a scale down to 3 replicas")
393
394 gomega.Expect(timeWaitedFor3).To(gomega.BeNumerically(">", limitWindowLength), "waited %s, wanted more than %s", timeWaitedFor3, limitWindowLength)
395 gomega.Expect(timeWaitedFor3).To(gomega.BeNumerically("<", deadline), "waited %s, wanted less than %s", timeWaitedFor3, deadline)
396 })
397 })
398
399 ginkgo.Describe("with both scale up and down controls configured", func() {
400 waitBuffer := 2 * time.Minute
401
402 ginkgo.It("should keep recommendation within the range over two stabilization windows", func(ctx context.Context) {
403 ginkgo.By("setting up resource consumer and HPA")
404 initPods := 1
405 initCPUUsageTotal := usageForReplicas(initPods)
406 upScaleStabilization := 3 * time.Minute
407 downScaleStabilization := 3 * time.Minute
408
409 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
410 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
411 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
412 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
413 )
414 ginkgo.DeferCleanup(rc.CleanUp)
415
416 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
417 rc, int32(targetCPUUtilizationPercent), 1, 5,
418 e2eautoscaling.HPABehaviorWithStabilizationWindows(upScaleStabilization, downScaleStabilization),
419 )
420 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
421
422 ginkgo.By("triggering scale up by increasing consumption")
423 rc.ConsumeCPU(usageForReplicas(3))
424 waitDeadline := upScaleStabilization
425
426 ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
427 rc.EnsureDesiredReplicasInRange(ctx, 1, 1, waitDeadline, hpa.Name)
428
429 ginkgo.By("waiting for replicas to scale up after stabilisation window passed")
430 waitStart := time.Now()
431 waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
432 rc.WaitForReplicas(ctx, 3, waitDeadline)
433 timeWaited := time.Now().Sub(waitStart)
434 framework.Logf("time waited for scale up: %s", timeWaited)
435 gomega.Expect(timeWaited).To(gomega.BeNumerically("<", waitDeadline), "waited %s, wanted less than %s", timeWaited, waitDeadline)
436
437 ginkgo.By("triggering scale down by lowering consumption")
438 rc.ConsumeCPU(usageForReplicas(2))
439 waitDeadline = downScaleStabilization
440
441 ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
442 rc.EnsureDesiredReplicasInRange(ctx, 3, 3, waitDeadline, hpa.Name)
443
444 ginkgo.By("waiting for replicas to scale down after stabilisation window passed")
445 waitStart = time.Now()
446 waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
447 rc.WaitForReplicas(ctx, 2, waitDeadline)
448 timeWaited = time.Now().Sub(waitStart)
449 framework.Logf("time waited for scale down: %s", timeWaited)
450 gomega.Expect(timeWaited).To(gomega.BeNumerically("<", waitDeadline), "waited %s, wanted less than %s", timeWaited, waitDeadline)
451 })
452
453 ginkgo.It("should keep recommendation within the range with stabilization window and pod limit rate", func(ctx context.Context) {
454 ginkgo.By("setting up resource consumer and HPA")
455 initPods := 2
456 initCPUUsageTotal := usageForReplicas(initPods)
457 downScaleStabilization := 3 * time.Minute
458 limitWindowLength := 2 * time.Minute
459 podsLimitPerMinute := 1
460
461 rc := e2eautoscaling.NewDynamicResourceConsumer(ctx,
462 hpaName, f.Namespace.Name, e2eautoscaling.KindDeployment, initPods,
463 initCPUUsageTotal, 0, 0, int64(podCPURequest), 200,
464 f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle,
465 )
466 ginkgo.DeferCleanup(rc.CleanUp)
467
468 scaleUpRule := e2eautoscaling.HPAScalingRuleWithScalingPolicy(autoscalingv2.PodsScalingPolicy, int32(podsLimitPerMinute), int32(limitWindowLength.Seconds()))
469 scaleDownRule := e2eautoscaling.HPAScalingRuleWithStabilizationWindow(int32(downScaleStabilization.Seconds()))
470 hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscalerWithBehavior(ctx,
471 rc, int32(targetCPUUtilizationPercent), 2, 5,
472 e2eautoscaling.HPABehaviorWithScaleUpAndDownRules(scaleUpRule, scaleDownRule),
473 )
474 ginkgo.DeferCleanup(e2eautoscaling.DeleteHPAWithBehavior, rc, hpa.Name)
475
476 ginkgo.By("triggering scale up by increasing consumption")
477 rc.ConsumeCPU(usageForReplicas(4))
478 waitDeadline := limitWindowLength
479
480 ginkgo.By("verifying number of replicas stay in desired range with pod limit rate")
481 rc.EnsureDesiredReplicasInRange(ctx, 2, 3, waitDeadline, hpa.Name)
482
483 ginkgo.By("waiting for replicas to scale up")
484 waitStart := time.Now()
485 waitDeadline = limitWindowLength + maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
486 rc.WaitForReplicas(ctx, 4, waitDeadline)
487 timeWaited := time.Now().Sub(waitStart)
488 framework.Logf("time waited for scale up: %s", timeWaited)
489 gomega.Default.Expect(timeWaited).To(gomega.BeNumerically("<", waitDeadline), "waited %s, wanted less than %s", timeWaited, waitDeadline)
490
491 ginkgo.By("triggering scale down by lowering consumption")
492 rc.ConsumeCPU(usageForReplicas(2))
493
494 ginkgo.By("verifying number of replicas stay in desired range within stabilisation window")
495 waitDeadline = downScaleStabilization
496 rc.EnsureDesiredReplicasInRange(ctx, 4, 4, waitDeadline, hpa.Name)
497
498 ginkgo.By("waiting for replicas to scale down after stabilisation window passed")
499 waitStart = time.Now()
500 waitDeadline = maxHPAReactionTime + maxResourceConsumerDelay + waitBuffer
501 rc.WaitForReplicas(ctx, 2, waitDeadline)
502 timeWaited = time.Now().Sub(waitStart)
503 framework.Logf("time waited for scale down: %s", timeWaited)
504 gomega.Expect(timeWaited).To(gomega.BeNumerically("<", waitDeadline), "waited %s, wanted less than %s", timeWaited, waitDeadline)
505 })
506 })
507 })
508
View as plain text