1
16
17 package nodevolumelimits
18
19 import (
20 "errors"
21 "fmt"
22 "reflect"
23 "strings"
24 "testing"
25
26 "github.com/google/go-cmp/cmp"
27 v1 "k8s.io/api/core/v1"
28 storagev1 "k8s.io/api/storage/v1"
29 "k8s.io/apimachinery/pkg/api/resource"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/apimachinery/pkg/util/rand"
32 "k8s.io/apimachinery/pkg/util/sets"
33 csitrans "k8s.io/csi-translation-lib"
34 csilibplugins "k8s.io/csi-translation-lib/plugins"
35 "k8s.io/kubernetes/pkg/scheduler/framework"
36 st "k8s.io/kubernetes/pkg/scheduler/testing"
37 tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
38 volumeutil "k8s.io/kubernetes/pkg/volume/util"
39 "k8s.io/kubernetes/test/utils/ktesting"
40 "k8s.io/utils/ptr"
41 )
42
43 const (
44 ebsCSIDriverName = csilibplugins.AWSEBSDriverName
45 gceCSIDriverName = csilibplugins.GCEPDDriverName
46
47 hostpathInTreePluginName = "kubernetes.io/hostpath"
48 )
49
50 var (
51 scName = "csi-sc"
52 )
53
54
55 func getVolumeLimitKey(filterType string) v1.ResourceName {
56 switch filterType {
57 case ebsVolumeFilterType:
58 return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
59 case gcePDVolumeFilterType:
60 return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
61 case azureDiskVolumeFilterType:
62 return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
63 case cinderVolumeFilterType:
64 return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
65 default:
66 return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
67 }
68 }
69
70 func TestCSILimits(t *testing.T) {
71 runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj()
72 pendingVolumePod := st.MakePod().PVC("csi-4").Obj()
73
74
75 unboundPVCPod2 := st.MakePod().PVC("csi-4").Obj()
76
77 missingPVPod := st.MakePod().PVC("csi-6").Obj()
78 noSCPVCPod := st.MakePod().PVC("csi-5").Obj()
79
80 gceTwoVolPod := st.MakePod().PVC("csi-pd.csi.storage.gke.io-1").PVC("csi-pd.csi.storage.gke.io-2").Obj()
81
82
83 inTreeOneVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-0").Obj()
84 inTreeTwoVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-1").PVC("csi-kubernetes.io/aws-ebs-2").Obj()
85
86
87 csiEBSOneVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-0").Obj()
88 csiEBSTwoVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-1").PVC("csi-ebs.csi.aws.com-2").Obj()
89
90 inTreeNonMigratableOneVolPod := st.MakePod().PVC("csi-kubernetes.io/hostpath-0").Obj()
91
92 ephemeralVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume(
93 v1.Volume{
94 Name: "xyz",
95 VolumeSource: v1.VolumeSource{
96 Ephemeral: &v1.EphemeralVolumeSource{},
97 },
98 }).Obj()
99
100 controller := true
101 ephemeralClaim := &v1.PersistentVolumeClaim{
102 ObjectMeta: metav1.ObjectMeta{
103 Namespace: ephemeralVolumePod.Namespace,
104 Name: ephemeralVolumePod.Name + "-" + ephemeralVolumePod.Spec.Volumes[0].Name,
105 OwnerReferences: []metav1.OwnerReference{
106 {
107 Kind: "Pod",
108 Name: ephemeralVolumePod.Name,
109 UID: ephemeralVolumePod.UID,
110 Controller: &controller,
111 },
112 },
113 },
114 Spec: v1.PersistentVolumeClaimSpec{
115 StorageClassName: &scName,
116 },
117 }
118 conflictingClaim := ephemeralClaim.DeepCopy()
119 conflictingClaim.OwnerReferences = nil
120
121 ephemeralTwoVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345II").Volume(v1.Volume{
122 Name: "x",
123 VolumeSource: v1.VolumeSource{
124 Ephemeral: &v1.EphemeralVolumeSource{},
125 },
126 }).Volume(v1.Volume{
127 Name: "y",
128 VolumeSource: v1.VolumeSource{
129 Ephemeral: &v1.EphemeralVolumeSource{},
130 },
131 }).Obj()
132
133 ephemeralClaimX := &v1.PersistentVolumeClaim{
134 ObjectMeta: metav1.ObjectMeta{
135 Namespace: ephemeralTwoVolumePod.Namespace,
136 Name: ephemeralTwoVolumePod.Name + "-" + ephemeralTwoVolumePod.Spec.Volumes[0].Name,
137 OwnerReferences: []metav1.OwnerReference{
138 {
139 Kind: "Pod",
140 Name: ephemeralTwoVolumePod.Name,
141 UID: ephemeralTwoVolumePod.UID,
142 Controller: &controller,
143 },
144 },
145 },
146 Spec: v1.PersistentVolumeClaimSpec{
147 StorageClassName: &scName,
148 },
149 }
150 ephemeralClaimY := &v1.PersistentVolumeClaim{
151 ObjectMeta: metav1.ObjectMeta{
152 Namespace: ephemeralTwoVolumePod.Namespace,
153 Name: ephemeralTwoVolumePod.Name + "-" + ephemeralTwoVolumePod.Spec.Volumes[1].Name,
154 OwnerReferences: []metav1.OwnerReference{
155 {
156 Kind: "Pod",
157 Name: ephemeralTwoVolumePod.Name,
158 UID: ephemeralTwoVolumePod.UID,
159 Controller: &controller,
160 },
161 },
162 },
163 Spec: v1.PersistentVolumeClaimSpec{
164 StorageClassName: &scName,
165 },
166 }
167 inTreeInlineVolPod := &v1.Pod{
168 Spec: v1.PodSpec{
169 Volumes: []v1.Volume{
170 {
171 VolumeSource: v1.VolumeSource{
172 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
173 VolumeID: "aws-inline1",
174 },
175 },
176 },
177 },
178 },
179 }
180 inTreeInlineVolPodWithSameCSIVolumeID := &v1.Pod{
181 Spec: v1.PodSpec{
182 Volumes: []v1.Volume{
183 {
184 VolumeSource: v1.VolumeSource{
185 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
186 VolumeID: "csi-ebs.csi.aws.com-1",
187 },
188 },
189 },
190 },
191 },
192 }
193 onlyConfigmapAndSecretVolPod := &v1.Pod{
194 Spec: v1.PodSpec{
195 Volumes: []v1.Volume{
196 {
197 VolumeSource: v1.VolumeSource{
198 ConfigMap: &v1.ConfigMapVolumeSource{},
199 },
200 },
201 {
202 VolumeSource: v1.VolumeSource{
203 Secret: &v1.SecretVolumeSource{},
204 },
205 },
206 },
207 },
208 }
209 pvcPodWithConfigmapAndSecret := &v1.Pod{
210 Spec: v1.PodSpec{
211 Volumes: []v1.Volume{
212 {
213 VolumeSource: v1.VolumeSource{
214 ConfigMap: &v1.ConfigMapVolumeSource{},
215 },
216 },
217 {
218 VolumeSource: v1.VolumeSource{
219 Secret: &v1.SecretVolumeSource{},
220 },
221 },
222 {
223 VolumeSource: v1.VolumeSource{
224 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "csi-ebs.csi.aws.com-0"},
225 },
226 },
227 },
228 },
229 }
230 ephemeralPodWithConfigmapAndSecret := &v1.Pod{
231 ObjectMeta: metav1.ObjectMeta{
232 Namespace: ephemeralVolumePod.Namespace,
233 Name: ephemeralVolumePod.Name,
234 },
235 Spec: v1.PodSpec{
236 Volumes: []v1.Volume{
237 {
238 VolumeSource: v1.VolumeSource{
239 ConfigMap: &v1.ConfigMapVolumeSource{},
240 },
241 },
242 {
243 VolumeSource: v1.VolumeSource{
244 Secret: &v1.SecretVolumeSource{},
245 },
246 },
247 {
248 Name: "xyz",
249 VolumeSource: v1.VolumeSource{
250 Ephemeral: &v1.EphemeralVolumeSource{},
251 },
252 },
253 },
254 },
255 }
256 inlineMigratablePodWithConfigmapAndSecret := &v1.Pod{
257 Spec: v1.PodSpec{
258 Volumes: []v1.Volume{
259 {
260 VolumeSource: v1.VolumeSource{
261 ConfigMap: &v1.ConfigMapVolumeSource{},
262 },
263 },
264 {
265 VolumeSource: v1.VolumeSource{
266 Secret: &v1.SecretVolumeSource{},
267 },
268 },
269 {
270 VolumeSource: v1.VolumeSource{
271 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
272 VolumeID: "aws-inline1",
273 },
274 },
275 },
276 },
277 },
278 }
279 tests := []struct {
280 newPod *v1.Pod
281 existingPods []*v1.Pod
282 extraClaims []v1.PersistentVolumeClaim
283 filterName string
284 maxVols int32
285 driverNames []string
286 test string
287 migrationEnabled bool
288 ephemeralEnabled bool
289 limitSource string
290 wantStatus *framework.Status
291 wantPreFilterStatus *framework.Status
292 }{
293 {
294 newPod: csiEBSOneVolPod,
295 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
296 filterName: "csi",
297 maxVols: 4,
298 driverNames: []string{ebsCSIDriverName},
299 test: "fits when node volume limit >= new pods CSI volume",
300 limitSource: "node",
301 },
302 {
303 newPod: csiEBSOneVolPod,
304 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
305 filterName: "csi",
306 maxVols: 2,
307 driverNames: []string{ebsCSIDriverName},
308 test: "doesn't when node volume limit <= pods CSI volume",
309 limitSource: "node",
310 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
311 },
312 {
313 newPod: csiEBSOneVolPod,
314 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
315 filterName: "csi",
316 maxVols: 2,
317 driverNames: []string{ebsCSIDriverName},
318 test: "should when driver does not support volume limits",
319 limitSource: "csinode-with-no-limit",
320 },
321
322 {
323 newPod: csiEBSOneVolPod,
324 existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
325 filterName: "csi",
326 maxVols: 2,
327 driverNames: []string{ebsCSIDriverName},
328 test: "count pending PVCs towards volume limit <= pods CSI volume",
329 limitSource: "node",
330 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
331 },
332
333 {
334 newPod: csiEBSOneVolPod,
335 existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
336 filterName: "csi",
337 maxVols: 4,
338 driverNames: []string{ebsCSIDriverName},
339 test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
340 limitSource: "node",
341 },
342
343 {
344 newPod: csiEBSOneVolPod,
345 existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
346 filterName: "csi",
347 maxVols: 2,
348 driverNames: []string{ebsCSIDriverName},
349 test: "should count PVCs with invalid PV name but valid SC",
350 limitSource: "node",
351 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
352 },
353
354 {
355 newPod: csiEBSOneVolPod,
356 existingPods: []*v1.Pod{runningPod, noSCPVCPod},
357 filterName: "csi",
358 maxVols: 2,
359 driverNames: []string{ebsCSIDriverName},
360 test: "don't count pvcs with missing SC towards volume limit",
361 limitSource: "node",
362 },
363
364 {
365 newPod: csiEBSOneVolPod,
366 existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
367 filterName: "csi",
368 maxVols: 2,
369 driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
370 test: "count pvcs with the same type towards volume limit",
371 limitSource: "node",
372 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
373 },
374 {
375 newPod: gceTwoVolPod,
376 existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
377 filterName: "csi",
378 maxVols: 2,
379 driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
380 test: "don't count pvcs with different type towards volume limit",
381 limitSource: "node",
382 },
383
384 {
385 newPod: inTreeOneVolPod,
386 existingPods: []*v1.Pod{inTreeTwoVolPod},
387 filterName: "csi",
388 maxVols: 2,
389 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
390 migrationEnabled: true,
391 limitSource: "csinode",
392 test: "should count in-tree volumes if migration is enabled",
393 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
394 },
395 {
396 newPod: inTreeInlineVolPod,
397 existingPods: []*v1.Pod{inTreeTwoVolPod},
398 filterName: "csi",
399 maxVols: 2,
400 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
401 migrationEnabled: true,
402 limitSource: "node",
403 test: "nil csi node",
404 },
405 {
406 newPod: pendingVolumePod,
407 existingPods: []*v1.Pod{inTreeTwoVolPod},
408 filterName: "csi",
409 maxVols: 2,
410 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
411 migrationEnabled: true,
412 limitSource: "csinode",
413 test: "should count unbound in-tree volumes if migration is enabled",
414 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
415 },
416 {
417 newPod: inTreeOneVolPod,
418 existingPods: []*v1.Pod{inTreeTwoVolPod},
419 filterName: "csi",
420 maxVols: 2,
421 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
422 migrationEnabled: true,
423 limitSource: "csinode-with-no-limit",
424 test: "should not limit pod if volume used does not report limits",
425 },
426 {
427 newPod: inTreeNonMigratableOneVolPod,
428 existingPods: []*v1.Pod{csiEBSTwoVolPod},
429 filterName: "csi",
430 maxVols: 2,
431 driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
432 migrationEnabled: true,
433 limitSource: "csinode",
434 test: "should not count non-migratable in-tree volumes",
435 },
436 {
437 newPod: inTreeInlineVolPod,
438 existingPods: []*v1.Pod{inTreeTwoVolPod},
439 filterName: "csi",
440 maxVols: 2,
441 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
442 migrationEnabled: true,
443 limitSource: "csinode",
444 test: "should count in-tree inline volumes if migration is enabled",
445 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
446 },
447
448 {
449 newPod: inTreeOneVolPod,
450 existingPods: []*v1.Pod{csiEBSTwoVolPod},
451 filterName: "csi",
452 maxVols: 2,
453 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
454 migrationEnabled: true,
455 limitSource: "csinode",
456 test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
457 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
458 },
459 {
460 newPod: inTreeInlineVolPod,
461 existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeOneVolPod},
462 filterName: "csi",
463 maxVols: 3,
464 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
465 migrationEnabled: true,
466 limitSource: "csinode",
467 test: "should count in-tree, inline and csi volumes if migration is enabled (when scheduling in-tree volumes)",
468 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
469 },
470 {
471 newPod: inTreeInlineVolPodWithSameCSIVolumeID,
472 existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeOneVolPod},
473 filterName: "csi",
474 maxVols: 3,
475 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
476 migrationEnabled: true,
477 limitSource: "csinode",
478 test: "should not count in-tree, inline and csi volumes if migration is enabled (when scheduling in-tree volumes)",
479 },
480 {
481 newPod: csiEBSOneVolPod,
482 existingPods: []*v1.Pod{inTreeTwoVolPod},
483 filterName: "csi",
484 maxVols: 2,
485 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
486 migrationEnabled: true,
487 limitSource: "csinode",
488 test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
489 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
490 },
491
492 {
493 newPod: ephemeralVolumePod,
494 filterName: "csi",
495 ephemeralEnabled: true,
496 driverNames: []string{ebsCSIDriverName},
497 test: "ephemeral volume missing",
498 wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
499 },
500 {
501 newPod: ephemeralVolumePod,
502 filterName: "csi",
503 ephemeralEnabled: true,
504 extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim},
505 driverNames: []string{ebsCSIDriverName},
506 test: "ephemeral volume not owned",
507 wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")),
508 },
509 {
510 newPod: ephemeralVolumePod,
511 filterName: "csi",
512 ephemeralEnabled: true,
513 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
514 driverNames: []string{ebsCSIDriverName},
515 test: "ephemeral volume unbound",
516 },
517 {
518 newPod: ephemeralVolumePod,
519 filterName: "csi",
520 ephemeralEnabled: true,
521 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
522 driverNames: []string{ebsCSIDriverName},
523 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
524 maxVols: 2,
525 limitSource: "node",
526 test: "ephemeral doesn't when node volume limit <= pods CSI volume",
527 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
528 },
529 {
530 newPod: csiEBSOneVolPod,
531 filterName: "csi",
532 ephemeralEnabled: true,
533 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaimX, *ephemeralClaimY},
534 driverNames: []string{ebsCSIDriverName},
535 existingPods: []*v1.Pod{runningPod, ephemeralTwoVolumePod},
536 maxVols: 2,
537 limitSource: "node",
538 test: "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume",
539 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
540 },
541 {
542 newPod: csiEBSOneVolPod,
543 filterName: "csi",
544 ephemeralEnabled: false,
545 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
546 driverNames: []string{ebsCSIDriverName},
547 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
548 maxVols: 3,
549 limitSource: "node",
550 test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled",
551 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
552 },
553 {
554 newPod: csiEBSOneVolPod,
555 filterName: "csi",
556 ephemeralEnabled: true,
557 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
558 driverNames: []string{ebsCSIDriverName},
559 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
560 maxVols: 3,
561 limitSource: "node",
562 test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume",
563 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
564 },
565 {
566 newPod: csiEBSOneVolPod,
567 filterName: "csi",
568 ephemeralEnabled: true,
569 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim},
570 driverNames: []string{ebsCSIDriverName},
571 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod},
572 maxVols: 4,
573 test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume",
574 },
575 {
576 newPod: onlyConfigmapAndSecretVolPod,
577 filterName: "csi",
578 maxVols: 2,
579 driverNames: []string{ebsCSIDriverName},
580 test: "skip Filter when the pod only uses secrets and configmaps",
581 limitSource: "node",
582 wantPreFilterStatus: framework.NewStatus(framework.Skip),
583 },
584 {
585 newPod: pvcPodWithConfigmapAndSecret,
586 filterName: "csi",
587 maxVols: 2,
588 driverNames: []string{ebsCSIDriverName},
589 test: "don't skip Filter when the pod has pvcs",
590 limitSource: "node",
591 },
592 {
593 newPod: ephemeralPodWithConfigmapAndSecret,
594 filterName: "csi",
595 ephemeralEnabled: true,
596 driverNames: []string{ebsCSIDriverName},
597 test: "don't skip Filter when the pod has ephemeral volumes",
598 wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`),
599 },
600 {
601 newPod: inlineMigratablePodWithConfigmapAndSecret,
602 existingPods: []*v1.Pod{inTreeTwoVolPod},
603 filterName: "csi",
604 maxVols: 2,
605 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
606 migrationEnabled: true,
607 limitSource: "csinode",
608 test: "don't skip Filter when the pod has inline migratable volumes",
609 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded),
610 },
611 }
612
613
614 for _, test := range tests {
615 t.Run(test.test, func(t *testing.T) {
616 node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, test.maxVols, test.driverNames...)
617 if csiNode != nil {
618 enableMigrationOnNode(csiNode, csilibplugins.AWSEBSInTreePluginName)
619 }
620 csiTranslator := csitrans.New()
621 p := &CSILimits{
622 csiNodeLister: getFakeCSINodeLister(csiNode),
623 pvLister: getFakeCSIPVLister(test.filterName, test.driverNames...),
624 pvcLister: append(getFakeCSIPVCLister(test.filterName, scName, test.driverNames...), test.extraClaims...),
625 scLister: getFakeCSIStorageClassLister(scName, test.driverNames[0]),
626 randomVolumeIDPrefix: rand.String(32),
627 translator: csiTranslator,
628 }
629 _, ctx := ktesting.NewTestContext(t)
630 _, gotPreFilterStatus := p.PreFilter(ctx, nil, test.newPod)
631 if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" {
632 t.Errorf("PreFilter status does not match (-want, +got): %s", diff)
633 }
634 if gotPreFilterStatus.Code() != framework.Skip {
635 gotStatus := p.Filter(ctx, nil, test.newPod, node)
636 if !reflect.DeepEqual(gotStatus, test.wantStatus) {
637 t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus)
638 }
639 }
640 })
641 }
642 }
643
644 func getFakeCSIPVLister(volumeName string, driverNames ...string) tf.PersistentVolumeLister {
645 pvLister := tf.PersistentVolumeLister{}
646 for _, driver := range driverNames {
647 for j := 0; j < 4; j++ {
648 volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
649 pv := v1.PersistentVolume{
650 ObjectMeta: metav1.ObjectMeta{Name: volumeHandle},
651 Spec: v1.PersistentVolumeSpec{
652 PersistentVolumeSource: v1.PersistentVolumeSource{
653 CSI: &v1.CSIPersistentVolumeSource{
654 Driver: driver,
655 VolumeHandle: volumeHandle,
656 },
657 },
658 },
659 }
660
661 switch driver {
662 case csilibplugins.AWSEBSInTreePluginName:
663 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
664 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
665 VolumeID: volumeHandle,
666 },
667 }
668 case hostpathInTreePluginName:
669 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
670 HostPath: &v1.HostPathVolumeSource{
671 Path: "/tmp",
672 },
673 }
674 default:
675 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
676 CSI: &v1.CSIPersistentVolumeSource{
677 Driver: driver,
678 VolumeHandle: volumeHandle,
679 },
680 }
681 }
682 pvLister = append(pvLister, pv)
683 }
684 }
685
686 return pvLister
687 }
688
689 func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) tf.PersistentVolumeClaimLister {
690 pvcLister := tf.PersistentVolumeClaimLister{}
691 for _, driver := range driverNames {
692 for j := 0; j < 4; j++ {
693 v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
694 pvc := v1.PersistentVolumeClaim{
695 ObjectMeta: metav1.ObjectMeta{Name: v},
696 Spec: v1.PersistentVolumeClaimSpec{VolumeName: v},
697 }
698 pvcLister = append(pvcLister, pvc)
699 }
700 }
701
702 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
703 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-4"},
704 Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName},
705 })
706 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
707 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-5"},
708 Spec: v1.PersistentVolumeClaimSpec{},
709 })
710
711 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{
712 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-6"},
713 Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName, VolumeName: "missing-in-action"},
714 })
715 return pvcLister
716 }
717
718 func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
719 nodeInfoAnnotations := csiNode.GetAnnotations()
720 if nodeInfoAnnotations == nil {
721 nodeInfoAnnotations = map[string]string{}
722 }
723
724 newAnnotationSet := sets.New[string]()
725 newAnnotationSet.Insert(pluginName)
726 nas := strings.Join(sets.List(newAnnotationSet), ",")
727 nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
728
729 csiNode.Annotations = nodeInfoAnnotations
730 }
731
732 func getFakeCSIStorageClassLister(scName, provisionerName string) tf.StorageClassLister {
733 return tf.StorageClassLister{
734 {
735 ObjectMeta: metav1.ObjectMeta{Name: scName},
736 Provisioner: provisionerName,
737 },
738 }
739 }
740
741 func getFakeCSINodeLister(csiNode *storagev1.CSINode) tf.CSINodeLister {
742 csiNodeLister := tf.CSINodeLister{}
743 if csiNode != nil {
744 csiNodeLister = append(csiNodeLister, *csiNode.DeepCopy())
745 }
746 return csiNodeLister
747 }
748
749 func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int32, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) {
750 nodeInfo := framework.NewNodeInfo(pods...)
751 node := &v1.Node{
752 ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
753 Status: v1.NodeStatus{
754 Allocatable: v1.ResourceList{},
755 },
756 }
757 var csiNode *storagev1.CSINode
758
759 addLimitToNode := func() {
760 for _, driver := range driverNames {
761 node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI)
762 }
763 }
764
765 initCSINode := func() {
766 csiNode = &storagev1.CSINode{
767 ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
768 Spec: storagev1.CSINodeSpec{
769 Drivers: []storagev1.CSINodeDriver{},
770 },
771 }
772 }
773
774 addDriversCSINode := func(addLimits bool) {
775 initCSINode()
776 for _, driver := range driverNames {
777 driver := storagev1.CSINodeDriver{
778 Name: driver,
779 NodeID: "node-for-max-pd-test-1",
780 }
781 if addLimits {
782 driver.Allocatable = &storagev1.VolumeNodeResources{
783 Count: ptr.To(limit),
784 }
785 }
786 csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
787 }
788 }
789
790 switch limitSource {
791 case "node":
792 addLimitToNode()
793 case "csinode":
794 addDriversCSINode(true)
795 case "both":
796 addLimitToNode()
797 addDriversCSINode(true)
798 case "csinode-with-no-limit":
799 addDriversCSINode(false)
800 case "no-csi-driver":
801 initCSINode()
802 default:
803
804 }
805
806 nodeInfo.SetNode(node)
807 return nodeInfo, csiNode
808 }
809
View as plain text