1
16
17 package volumebinding
18
19 import (
20 "context"
21 "testing"
22
23 "github.com/google/go-cmp/cmp"
24 "github.com/google/go-cmp/cmp/cmpopts"
25 "github.com/stretchr/testify/assert"
26 v1 "k8s.io/api/core/v1"
27 storagev1 "k8s.io/api/storage/v1"
28 "k8s.io/apimachinery/pkg/api/resource"
29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
30 "k8s.io/apimachinery/pkg/util/sets"
31 "k8s.io/client-go/informers"
32 "k8s.io/client-go/kubernetes/fake"
33 "k8s.io/klog/v2/ktesting"
34 "k8s.io/kubernetes/pkg/scheduler/apis/config"
35 "k8s.io/kubernetes/pkg/scheduler/framework"
36 "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
37 "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
38 tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
39 )
40
41 var (
42 immediate = storagev1.VolumeBindingImmediate
43 waitForFirstConsumer = storagev1.VolumeBindingWaitForFirstConsumer
44 immediateSC = &storagev1.StorageClass{
45 ObjectMeta: metav1.ObjectMeta{
46 Name: "immediate-sc",
47 },
48 VolumeBindingMode: &immediate,
49 }
50 waitSC = &storagev1.StorageClass{
51 ObjectMeta: metav1.ObjectMeta{
52 Name: "wait-sc",
53 },
54 VolumeBindingMode: &waitForFirstConsumer,
55 }
56 waitHDDSC = &storagev1.StorageClass{
57 ObjectMeta: metav1.ObjectMeta{
58 Name: "wait-hdd-sc",
59 },
60 VolumeBindingMode: &waitForFirstConsumer,
61 }
62
63 defaultShapePoint = []config.UtilizationShapePoint{
64 {
65 Utilization: 0,
66 Score: 0,
67 },
68 {
69 Utilization: 100,
70 Score: int32(config.MaxCustomPriorityScore),
71 },
72 }
73 )
74
75 func TestVolumeBinding(t *testing.T) {
76 table := []struct {
77 name string
78 pod *v1.Pod
79 nodes []*v1.Node
80 pvcs []*v1.PersistentVolumeClaim
81 pvs []*v1.PersistentVolume
82 fts feature.Features
83 args *config.VolumeBindingArgs
84 wantPreFilterResult *framework.PreFilterResult
85 wantPreFilterStatus *framework.Status
86 wantStateAfterPreFilter *stateData
87 wantFilterStatus []*framework.Status
88 wantScores []int64
89 wantPreScoreStatus *framework.Status
90 }{
91 {
92 name: "pod has not pvcs",
93 pod: makePod("pod-a").Pod,
94 nodes: []*v1.Node{
95 makeNode("node-a").Node,
96 },
97 wantPreFilterStatus: framework.NewStatus(framework.Skip),
98 wantFilterStatus: []*framework.Status{
99 nil,
100 },
101 wantPreScoreStatus: framework.NewStatus(framework.Skip),
102 },
103 {
104 name: "all bound",
105 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
106 nodes: []*v1.Node{
107 makeNode("node-a").Node,
108 },
109 pvcs: []*v1.PersistentVolumeClaim{
110 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
111 },
112 pvs: []*v1.PersistentVolume{
113 makePV("pv-a", waitSC.Name).withPhase(v1.VolumeAvailable).PersistentVolume,
114 },
115 wantStateAfterPreFilter: &stateData{
116 podVolumeClaims: &PodVolumeClaims{
117 boundClaims: []*v1.PersistentVolumeClaim{
118 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
119 },
120 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
121 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
122 },
123 podVolumesByNode: map[string]*PodVolumes{},
124 },
125 wantFilterStatus: []*framework.Status{
126 nil,
127 },
128 wantPreScoreStatus: framework.NewStatus(framework.Skip),
129 },
130 {
131 name: "all bound with local volumes",
132 pod: makePod("pod-a").withPVCVolume("pvc-a", "volume-a").withPVCVolume("pvc-b", "volume-b").Pod,
133 nodes: []*v1.Node{
134 makeNode("node-a").Node,
135 },
136 pvcs: []*v1.PersistentVolumeClaim{
137 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
138 makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
139 },
140 pvs: []*v1.PersistentVolume{
141 makePV("pv-a", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
142 v1.LabelHostname: {"node-a"},
143 }).PersistentVolume,
144 makePV("pv-b", waitSC.Name).withPhase(v1.VolumeBound).withNodeAffinity(map[string][]string{
145 v1.LabelHostname: {"node-a"},
146 }).PersistentVolume,
147 },
148 wantPreFilterResult: &framework.PreFilterResult{
149 NodeNames: sets.New("node-a"),
150 },
151 wantStateAfterPreFilter: &stateData{
152 podVolumeClaims: &PodVolumeClaims{
153 boundClaims: []*v1.PersistentVolumeClaim{
154 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
155 makePVC("pvc-b", waitSC.Name).withBoundPV("pv-b").PersistentVolumeClaim,
156 },
157 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
158 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
159 },
160 podVolumesByNode: map[string]*PodVolumes{},
161 },
162 wantFilterStatus: []*framework.Status{
163 nil,
164 },
165 wantPreScoreStatus: framework.NewStatus(framework.Skip),
166 },
167 {
168 name: "PVC does not exist",
169 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
170 nodes: []*v1.Node{
171 makeNode("node-a").Node,
172 },
173 pvcs: []*v1.PersistentVolumeClaim{},
174 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
175 wantFilterStatus: []*framework.Status{
176 nil,
177 },
178 wantScores: []int64{
179 0,
180 },
181 },
182 {
183 name: "Part of PVCs do not exist",
184 pod: makePod("pod-a").withPVCVolume("pvc-a", "").withPVCVolume("pvc-b", "").Pod,
185 nodes: []*v1.Node{
186 makeNode("node-a").Node,
187 },
188 pvcs: []*v1.PersistentVolumeClaim{
189 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
190 },
191 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-b" not found`),
192 wantFilterStatus: []*framework.Status{
193 nil,
194 },
195 wantScores: []int64{
196 0,
197 },
198 },
199 {
200 name: "immediate claims not bound",
201 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
202 nodes: []*v1.Node{
203 makeNode("node-a").Node,
204 },
205 pvcs: []*v1.PersistentVolumeClaim{
206 makePVC("pvc-a", immediateSC.Name).PersistentVolumeClaim,
207 },
208 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
209 wantFilterStatus: []*framework.Status{
210 nil,
211 },
212 wantScores: []int64{
213 0,
214 },
215 },
216 {
217 name: "unbound claims no matches",
218 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
219 nodes: []*v1.Node{
220 makeNode("node-a").Node,
221 },
222 pvcs: []*v1.PersistentVolumeClaim{
223 makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
224 },
225 wantStateAfterPreFilter: &stateData{
226 podVolumeClaims: &PodVolumeClaims{
227 boundClaims: []*v1.PersistentVolumeClaim{},
228 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
229 makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
230 },
231 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSC.Name: {}},
232 },
233 podVolumesByNode: map[string]*PodVolumes{},
234 },
235 wantFilterStatus: []*framework.Status{
236 framework.NewStatus(framework.UnschedulableAndUnresolvable, string(ErrReasonBindConflict)),
237 },
238 wantPreScoreStatus: framework.NewStatus(framework.Skip),
239 },
240 {
241 name: "bound and unbound unsatisfied",
242 pod: makePod("pod-a").withPVCVolume("pvc-a", "").withPVCVolume("pvc-b", "").Pod,
243 nodes: []*v1.Node{
244 makeNode("node-a").withLabel("foo", "barbar").Node,
245 },
246 pvcs: []*v1.PersistentVolumeClaim{
247 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
248 makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
249 },
250 pvs: []*v1.PersistentVolume{
251 makePV("pv-a", waitSC.Name).
252 withPhase(v1.VolumeAvailable).
253 withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
254 },
255 wantStateAfterPreFilter: &stateData{
256 podVolumeClaims: &PodVolumeClaims{
257 boundClaims: []*v1.PersistentVolumeClaim{
258 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
259 },
260 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
261 makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
262 },
263 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
264 waitSC.Name: {
265 makePV("pv-a", waitSC.Name).
266 withPhase(v1.VolumeAvailable).
267 withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
268 },
269 },
270 },
271 podVolumesByNode: map[string]*PodVolumes{},
272 },
273 wantFilterStatus: []*framework.Status{
274 framework.NewStatus(framework.UnschedulableAndUnresolvable, string(ErrReasonNodeConflict), string(ErrReasonBindConflict)),
275 },
276 wantPreScoreStatus: framework.NewStatus(framework.Skip),
277 },
278 {
279 name: "pvc not found",
280 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
281 nodes: []*v1.Node{
282 makeNode("node-a").Node,
283 },
284 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
285 wantFilterStatus: []*framework.Status{
286 nil,
287 },
288 wantScores: []int64{
289 0,
290 },
291 },
292 {
293 name: "pv not found",
294 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
295 nodes: []*v1.Node{
296 makeNode("node-a").Node,
297 },
298 pvcs: []*v1.PersistentVolumeClaim{
299 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
300 },
301 wantPreFilterStatus: nil,
302 wantStateAfterPreFilter: &stateData{
303 podVolumeClaims: &PodVolumeClaims{
304 boundClaims: []*v1.PersistentVolumeClaim{
305 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
306 },
307 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{},
308 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{},
309 },
310 podVolumesByNode: map[string]*PodVolumes{},
311 },
312 wantFilterStatus: []*framework.Status{
313 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) unavailable due to one or more pvc(s) bound to non-existent pv(s)`),
314 },
315 wantPreScoreStatus: framework.NewStatus(framework.Skip),
316 },
317 {
318 name: "pv not found claim lost",
319 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
320 nodes: []*v1.Node{
321 makeNode("node-a").Node,
322 },
323 pvcs: []*v1.PersistentVolumeClaim{
324 makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").withPhase(v1.ClaimLost).PersistentVolumeClaim,
325 },
326 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" bound to non-existent persistentvolume "pv-a"`),
327 wantFilterStatus: []*framework.Status{
328 nil,
329 },
330 wantScores: []int64{
331 0,
332 },
333 },
334 {
335 name: "local volumes with close capacity are preferred",
336 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
337 nodes: []*v1.Node{
338 makeNode("node-a").Node,
339 makeNode("node-b").Node,
340 makeNode("node-c").Node,
341 },
342 pvcs: []*v1.PersistentVolumeClaim{
343 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
344 },
345 pvs: []*v1.PersistentVolume{
346 makePV("pv-a-0", waitSC.Name).
347 withPhase(v1.VolumeAvailable).
348 withCapacity(resource.MustParse("200Gi")).
349 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
350 makePV("pv-a-1", waitSC.Name).
351 withPhase(v1.VolumeAvailable).
352 withCapacity(resource.MustParse("200Gi")).
353 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
354 makePV("pv-b-0", waitSC.Name).
355 withPhase(v1.VolumeAvailable).
356 withCapacity(resource.MustParse("100Gi")).
357 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
358 makePV("pv-b-1", waitSC.Name).
359 withPhase(v1.VolumeAvailable).
360 withCapacity(resource.MustParse("100Gi")).
361 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
362 },
363 fts: feature.Features{
364 EnableVolumeCapacityPriority: true,
365 },
366 wantPreFilterStatus: nil,
367 wantStateAfterPreFilter: &stateData{
368 podVolumeClaims: &PodVolumeClaims{
369 boundClaims: []*v1.PersistentVolumeClaim{},
370 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
371 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
372 },
373 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
374 waitSC.Name: {
375 makePV("pv-a-0", waitSC.Name).
376 withPhase(v1.VolumeAvailable).
377 withCapacity(resource.MustParse("200Gi")).
378 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
379 makePV("pv-a-1", waitSC.Name).
380 withPhase(v1.VolumeAvailable).
381 withCapacity(resource.MustParse("200Gi")).
382 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
383 makePV("pv-b-0", waitSC.Name).
384 withPhase(v1.VolumeAvailable).
385 withCapacity(resource.MustParse("100Gi")).
386 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
387 makePV("pv-b-1", waitSC.Name).
388 withPhase(v1.VolumeAvailable).
389 withCapacity(resource.MustParse("100Gi")).
390 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
391 },
392 },
393 },
394 podVolumesByNode: map[string]*PodVolumes{},
395 },
396 wantFilterStatus: []*framework.Status{
397 nil,
398 nil,
399 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
400 },
401 wantScores: []int64{
402 25,
403 50,
404 0,
405 },
406 },
407 {
408 name: "local volumes with close capacity are preferred (multiple pvcs)",
409 pod: makePod("pod-a").withPVCVolume("pvc-0", "").withPVCVolume("pvc-1", "").Pod,
410 nodes: []*v1.Node{
411 makeNode("node-a").Node,
412 makeNode("node-b").Node,
413 makeNode("node-c").Node,
414 },
415 pvcs: []*v1.PersistentVolumeClaim{
416 makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
417 makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
418 },
419 pvs: []*v1.PersistentVolume{
420 makePV("pv-a-0", waitSC.Name).
421 withPhase(v1.VolumeAvailable).
422 withCapacity(resource.MustParse("200Gi")).
423 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
424 makePV("pv-a-1", waitSC.Name).
425 withPhase(v1.VolumeAvailable).
426 withCapacity(resource.MustParse("200Gi")).
427 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
428 makePV("pv-a-2", waitHDDSC.Name).
429 withPhase(v1.VolumeAvailable).
430 withCapacity(resource.MustParse("200Gi")).
431 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
432 makePV("pv-a-3", waitHDDSC.Name).
433 withPhase(v1.VolumeAvailable).
434 withCapacity(resource.MustParse("200Gi")).
435 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
436 makePV("pv-b-0", waitSC.Name).
437 withPhase(v1.VolumeAvailable).
438 withCapacity(resource.MustParse("100Gi")).
439 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
440 makePV("pv-b-1", waitSC.Name).
441 withPhase(v1.VolumeAvailable).
442 withCapacity(resource.MustParse("100Gi")).
443 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
444 makePV("pv-b-2", waitHDDSC.Name).
445 withPhase(v1.VolumeAvailable).
446 withCapacity(resource.MustParse("100Gi")).
447 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
448 makePV("pv-b-3", waitHDDSC.Name).
449 withPhase(v1.VolumeAvailable).
450 withCapacity(resource.MustParse("100Gi")).
451 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
452 },
453 fts: feature.Features{
454 EnableVolumeCapacityPriority: true,
455 },
456 wantPreFilterStatus: nil,
457 wantStateAfterPreFilter: &stateData{
458 podVolumeClaims: &PodVolumeClaims{
459 boundClaims: []*v1.PersistentVolumeClaim{},
460 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
461 makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
462 makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
463 },
464 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
465 waitHDDSC.Name: {
466 makePV("pv-a-2", waitHDDSC.Name).
467 withPhase(v1.VolumeAvailable).
468 withCapacity(resource.MustParse("200Gi")).
469 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
470 makePV("pv-a-3", waitHDDSC.Name).
471 withPhase(v1.VolumeAvailable).
472 withCapacity(resource.MustParse("200Gi")).
473 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
474 makePV("pv-b-2", waitHDDSC.Name).
475 withPhase(v1.VolumeAvailable).
476 withCapacity(resource.MustParse("100Gi")).
477 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
478 makePV("pv-b-3", waitHDDSC.Name).
479 withPhase(v1.VolumeAvailable).
480 withCapacity(resource.MustParse("100Gi")).
481 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
482 },
483 waitSC.Name: {
484 makePV("pv-a-0", waitSC.Name).
485 withPhase(v1.VolumeAvailable).
486 withCapacity(resource.MustParse("200Gi")).
487 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
488 makePV("pv-a-1", waitSC.Name).
489 withPhase(v1.VolumeAvailable).
490 withCapacity(resource.MustParse("200Gi")).
491 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
492 makePV("pv-b-0", waitSC.Name).
493 withPhase(v1.VolumeAvailable).
494 withCapacity(resource.MustParse("100Gi")).
495 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
496 makePV("pv-b-1", waitSC.Name).
497 withPhase(v1.VolumeAvailable).
498 withCapacity(resource.MustParse("100Gi")).
499 withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
500 },
501 },
502 },
503 podVolumesByNode: map[string]*PodVolumes{},
504 },
505 wantFilterStatus: []*framework.Status{
506 nil,
507 nil,
508 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
509 },
510 wantScores: []int64{
511 38,
512 75,
513 0,
514 },
515 },
516 {
517 name: "zonal volumes with close capacity are preferred",
518 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
519 nodes: []*v1.Node{
520 makeNode("zone-a-node-a").
521 withLabel("topology.kubernetes.io/region", "region-a").
522 withLabel("topology.kubernetes.io/zone", "zone-a").Node,
523 makeNode("zone-a-node-b").
524 withLabel("topology.kubernetes.io/region", "region-a").
525 withLabel("topology.kubernetes.io/zone", "zone-a").Node,
526 makeNode("zone-b-node-a").
527 withLabel("topology.kubernetes.io/region", "region-b").
528 withLabel("topology.kubernetes.io/zone", "zone-b").Node,
529 makeNode("zone-b-node-b").
530 withLabel("topology.kubernetes.io/region", "region-b").
531 withLabel("topology.kubernetes.io/zone", "zone-b").Node,
532 makeNode("zone-c-node-a").
533 withLabel("topology.kubernetes.io/region", "region-c").
534 withLabel("topology.kubernetes.io/zone", "zone-c").Node,
535 makeNode("zone-c-node-b").
536 withLabel("topology.kubernetes.io/region", "region-c").
537 withLabel("topology.kubernetes.io/zone", "zone-c").Node,
538 },
539 pvcs: []*v1.PersistentVolumeClaim{
540 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
541 },
542 pvs: []*v1.PersistentVolume{
543 makePV("pv-a-0", waitSC.Name).
544 withPhase(v1.VolumeAvailable).
545 withCapacity(resource.MustParse("200Gi")).
546 withNodeAffinity(map[string][]string{
547 "topology.kubernetes.io/region": {"region-a"},
548 "topology.kubernetes.io/zone": {"zone-a"},
549 }).PersistentVolume,
550 makePV("pv-a-1", waitSC.Name).
551 withPhase(v1.VolumeAvailable).
552 withCapacity(resource.MustParse("200Gi")).
553 withNodeAffinity(map[string][]string{
554 "topology.kubernetes.io/region": {"region-a"},
555 "topology.kubernetes.io/zone": {"zone-a"},
556 }).PersistentVolume,
557 makePV("pv-b-0", waitSC.Name).
558 withPhase(v1.VolumeAvailable).
559 withCapacity(resource.MustParse("100Gi")).
560 withNodeAffinity(map[string][]string{
561 "topology.kubernetes.io/region": {"region-b"},
562 "topology.kubernetes.io/zone": {"zone-b"},
563 }).PersistentVolume,
564 makePV("pv-b-1", waitSC.Name).
565 withPhase(v1.VolumeAvailable).
566 withCapacity(resource.MustParse("100Gi")).
567 withNodeAffinity(map[string][]string{
568 "topology.kubernetes.io/region": {"region-b"},
569 "topology.kubernetes.io/zone": {"zone-b"},
570 }).PersistentVolume,
571 },
572 fts: feature.Features{
573 EnableVolumeCapacityPriority: true,
574 },
575 wantPreFilterStatus: nil,
576 wantStateAfterPreFilter: &stateData{
577 podVolumeClaims: &PodVolumeClaims{
578 boundClaims: []*v1.PersistentVolumeClaim{},
579 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
580 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
581 },
582 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
583 waitSC.Name: {
584 makePV("pv-a-0", waitSC.Name).
585 withPhase(v1.VolumeAvailable).
586 withCapacity(resource.MustParse("200Gi")).
587 withNodeAffinity(map[string][]string{
588 "topology.kubernetes.io/region": {"region-a"},
589 "topology.kubernetes.io/zone": {"zone-a"},
590 }).PersistentVolume,
591 makePV("pv-a-1", waitSC.Name).
592 withPhase(v1.VolumeAvailable).
593 withCapacity(resource.MustParse("200Gi")).
594 withNodeAffinity(map[string][]string{
595 "topology.kubernetes.io/region": {"region-a"},
596 "topology.kubernetes.io/zone": {"zone-a"},
597 }).PersistentVolume,
598 makePV("pv-b-0", waitSC.Name).
599 withPhase(v1.VolumeAvailable).
600 withCapacity(resource.MustParse("100Gi")).
601 withNodeAffinity(map[string][]string{
602 "topology.kubernetes.io/region": {"region-b"},
603 "topology.kubernetes.io/zone": {"zone-b"},
604 }).PersistentVolume,
605 makePV("pv-b-1", waitSC.Name).
606 withPhase(v1.VolumeAvailable).
607 withCapacity(resource.MustParse("100Gi")).
608 withNodeAffinity(map[string][]string{
609 "topology.kubernetes.io/region": {"region-b"},
610 "topology.kubernetes.io/zone": {"zone-b"},
611 }).PersistentVolume,
612 },
613 },
614 },
615 podVolumesByNode: map[string]*PodVolumes{},
616 },
617 wantFilterStatus: []*framework.Status{
618 nil,
619 nil,
620 nil,
621 nil,
622 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
623 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
624 },
625 wantScores: []int64{
626 25,
627 25,
628 50,
629 50,
630 0,
631 0,
632 },
633 },
634 {
635 name: "zonal volumes with close capacity are preferred (custom shape)",
636 pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
637 nodes: []*v1.Node{
638 makeNode("zone-a-node-a").
639 withLabel("topology.kubernetes.io/region", "region-a").
640 withLabel("topology.kubernetes.io/zone", "zone-a").Node,
641 makeNode("zone-a-node-b").
642 withLabel("topology.kubernetes.io/region", "region-a").
643 withLabel("topology.kubernetes.io/zone", "zone-a").Node,
644 makeNode("zone-b-node-a").
645 withLabel("topology.kubernetes.io/region", "region-b").
646 withLabel("topology.kubernetes.io/zone", "zone-b").Node,
647 makeNode("zone-b-node-b").
648 withLabel("topology.kubernetes.io/region", "region-b").
649 withLabel("topology.kubernetes.io/zone", "zone-b").Node,
650 makeNode("zone-c-node-a").
651 withLabel("topology.kubernetes.io/region", "region-c").
652 withLabel("topology.kubernetes.io/zone", "zone-c").Node,
653 makeNode("zone-c-node-b").
654 withLabel("topology.kubernetes.io/region", "region-c").
655 withLabel("topology.kubernetes.io/zone", "zone-c").Node,
656 },
657 pvcs: []*v1.PersistentVolumeClaim{
658 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
659 },
660 pvs: []*v1.PersistentVolume{
661 makePV("pv-a-0", waitSC.Name).
662 withPhase(v1.VolumeAvailable).
663 withCapacity(resource.MustParse("200Gi")).
664 withNodeAffinity(map[string][]string{
665 "topology.kubernetes.io/region": {"region-a"},
666 "topology.kubernetes.io/zone": {"zone-a"},
667 }).PersistentVolume,
668 makePV("pv-a-1", waitSC.Name).
669 withPhase(v1.VolumeAvailable).
670 withCapacity(resource.MustParse("200Gi")).
671 withNodeAffinity(map[string][]string{
672 "topology.kubernetes.io/region": {"region-a"},
673 "topology.kubernetes.io/zone": {"zone-a"},
674 }).PersistentVolume,
675 makePV("pv-b-0", waitSC.Name).
676 withPhase(v1.VolumeAvailable).
677 withCapacity(resource.MustParse("100Gi")).
678 withNodeAffinity(map[string][]string{
679 "topology.kubernetes.io/region": {"region-b"},
680 "topology.kubernetes.io/zone": {"zone-b"},
681 }).PersistentVolume,
682 makePV("pv-b-1", waitSC.Name).
683 withPhase(v1.VolumeAvailable).
684 withCapacity(resource.MustParse("100Gi")).
685 withNodeAffinity(map[string][]string{
686 "topology.kubernetes.io/region": {"region-b"},
687 "topology.kubernetes.io/zone": {"zone-b"},
688 }).PersistentVolume,
689 },
690 fts: feature.Features{
691 EnableVolumeCapacityPriority: true,
692 },
693 args: &config.VolumeBindingArgs{
694 BindTimeoutSeconds: 300,
695 Shape: []config.UtilizationShapePoint{
696 {
697 Utilization: 0,
698 Score: 0,
699 },
700 {
701 Utilization: 50,
702 Score: 3,
703 },
704 {
705 Utilization: 100,
706 Score: 5,
707 },
708 },
709 },
710 wantPreFilterStatus: nil,
711 wantStateAfterPreFilter: &stateData{
712 podVolumeClaims: &PodVolumeClaims{
713 boundClaims: []*v1.PersistentVolumeClaim{},
714 unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
715 makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
716 },
717 unboundClaimsImmediate: nil,
718 unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
719 waitSC.Name: {
720 makePV("pv-a-0", waitSC.Name).
721 withPhase(v1.VolumeAvailable).
722 withCapacity(resource.MustParse("200Gi")).
723 withNodeAffinity(map[string][]string{
724 "topology.kubernetes.io/region": {"region-a"},
725 "topology.kubernetes.io/zone": {"zone-a"},
726 }).PersistentVolume,
727 makePV("pv-a-1", waitSC.Name).
728 withPhase(v1.VolumeAvailable).
729 withCapacity(resource.MustParse("200Gi")).
730 withNodeAffinity(map[string][]string{
731 "topology.kubernetes.io/region": {"region-a"},
732 "topology.kubernetes.io/zone": {"zone-a"},
733 }).PersistentVolume,
734 makePV("pv-b-0", waitSC.Name).
735 withPhase(v1.VolumeAvailable).
736 withCapacity(resource.MustParse("100Gi")).
737 withNodeAffinity(map[string][]string{
738 "topology.kubernetes.io/region": {"region-b"},
739 "topology.kubernetes.io/zone": {"zone-b"},
740 }).PersistentVolume,
741 makePV("pv-b-1", waitSC.Name).
742 withPhase(v1.VolumeAvailable).
743 withCapacity(resource.MustParse("100Gi")).
744 withNodeAffinity(map[string][]string{
745 "topology.kubernetes.io/region": {"region-b"},
746 "topology.kubernetes.io/zone": {"zone-b"},
747 }).PersistentVolume,
748 },
749 },
750 },
751 podVolumesByNode: map[string]*PodVolumes{},
752 },
753 wantFilterStatus: []*framework.Status{
754 nil,
755 nil,
756 nil,
757 nil,
758 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
759 framework.NewStatus(framework.UnschedulableAndUnresolvable, `node(s) didn't find available persistent volumes to bind`),
760 },
761 wantScores: []int64{
762 15,
763 15,
764 30,
765 30,
766 0,
767 0,
768 },
769 },
770 }
771
772 for _, item := range table {
773 t.Run(item.name, func(t *testing.T) {
774 _, ctx := ktesting.NewTestContext(t)
775 ctx, cancel := context.WithCancel(ctx)
776 defer cancel()
777 client := fake.NewSimpleClientset()
778 informerFactory := informers.NewSharedInformerFactory(client, 0)
779 opts := []runtime.Option{
780 runtime.WithClientSet(client),
781 runtime.WithInformerFactory(informerFactory),
782 }
783 fh, err := runtime.NewFramework(ctx, nil, nil, opts...)
784 if err != nil {
785 t.Fatal(err)
786 }
787
788 args := item.args
789 if args == nil {
790
791 args = &config.VolumeBindingArgs{
792 BindTimeoutSeconds: 300,
793 }
794 if item.fts.EnableVolumeCapacityPriority {
795 args.Shape = defaultShapePoint
796 }
797 }
798
799 pl, err := New(ctx, args, fh, item.fts)
800 if err != nil {
801 t.Fatal(err)
802 }
803
804 t.Log("Feed testing data and wait for them to be synced")
805 client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
806 client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
807 client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{})
808 for _, node := range item.nodes {
809 client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
810 }
811 for _, pvc := range item.pvcs {
812 client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
813 }
814 for _, pv := range item.pvs {
815 client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
816 }
817
818 t.Log("Start informer factory after initialization")
819 informerFactory.Start(ctx.Done())
820
821 t.Log("Wait for all started informers' cache were synced")
822 informerFactory.WaitForCacheSync(ctx.Done())
823
824 t.Log("Verify")
825
826 p := pl.(*VolumeBinding)
827 nodeInfos := make([]*framework.NodeInfo, 0)
828 for _, node := range item.nodes {
829 nodeInfo := framework.NewNodeInfo()
830 nodeInfo.SetNode(node)
831 nodeInfos = append(nodeInfos, nodeInfo)
832 }
833 state := framework.NewCycleState()
834
835 t.Logf("Verify: call PreFilter and check status")
836 gotPreFilterResult, gotPreFilterStatus := p.PreFilter(ctx, state, item.pod)
837 assert.Equal(t, item.wantPreFilterStatus, gotPreFilterStatus)
838 assert.Equal(t, item.wantPreFilterResult, gotPreFilterResult)
839
840 if !gotPreFilterStatus.IsSuccess() {
841
842 return
843 }
844
845 t.Logf("Verify: check state after prefilter phase")
846 got, err := getStateData(state)
847 if err != nil {
848 t.Fatal(err)
849 }
850 stateCmpOpts := []cmp.Option{
851 cmp.AllowUnexported(stateData{}),
852 cmp.AllowUnexported(PodVolumeClaims{}),
853 cmpopts.IgnoreFields(stateData{}, "Mutex"),
854 cmpopts.SortSlices(func(a *v1.PersistentVolume, b *v1.PersistentVolume) bool {
855 return a.Name < b.Name
856 }),
857 cmpopts.SortSlices(func(a v1.NodeSelectorRequirement, b v1.NodeSelectorRequirement) bool {
858 return a.Key < b.Key
859 }),
860 }
861 if diff := cmp.Diff(item.wantStateAfterPreFilter, got, stateCmpOpts...); diff != "" {
862 t.Errorf("state got after prefilter does not match (-want,+got):\n%s", diff)
863 }
864
865 t.Logf("Verify: call Filter and check status")
866 for i, nodeInfo := range nodeInfos {
867 gotStatus := p.Filter(ctx, state, item.pod, nodeInfo)
868 assert.Equal(t, item.wantFilterStatus[i], gotStatus)
869 }
870
871 t.Logf("Verify: call PreScore and check status")
872 gotPreScoreStatus := p.PreScore(ctx, state, item.pod, tf.BuildNodeInfos(item.nodes))
873 if diff := cmp.Diff(item.wantPreScoreStatus, gotPreScoreStatus); diff != "" {
874 t.Errorf("state got after prescore does not match (-want,+got):\n%s", diff)
875 }
876 if !gotPreScoreStatus.IsSuccess() {
877 return
878 }
879
880 t.Logf("Verify: Score")
881 for i, node := range item.nodes {
882 score, status := p.Score(ctx, state, item.pod, node.Name)
883 if !status.IsSuccess() {
884 t.Errorf("Score expects success status, got: %v", status)
885 }
886 if score != item.wantScores[i] {
887 t.Errorf("Score expects score %d for node %q, got: %d", item.wantScores[i], node.Name, score)
888 }
889 }
890 })
891 }
892 }
893
View as plain text