1
16
17 package resourceclaim
18
19 import (
20 "context"
21 "errors"
22 "fmt"
23 "sort"
24 "sync"
25 "testing"
26
27 "github.com/stretchr/testify/assert"
28
29 v1 "k8s.io/api/core/v1"
30 resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
31 apierrors "k8s.io/apimachinery/pkg/api/errors"
32 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33 "k8s.io/apimachinery/pkg/runtime"
34 "k8s.io/apimachinery/pkg/types"
35 "k8s.io/client-go/informers"
36 "k8s.io/client-go/kubernetes/fake"
37 k8stesting "k8s.io/client-go/testing"
38 "k8s.io/client-go/tools/cache"
39 "k8s.io/component-base/metrics/testutil"
40 "k8s.io/klog/v2"
41 "k8s.io/kubernetes/pkg/controller"
42 ephemeralvolumemetrics "k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
43 "k8s.io/utils/pointer"
44 )
45
46 var (
47 testPodName = "test-pod"
48 testNamespace = "my-namespace"
49 testPodUID = types.UID("uidpod1")
50 otherNamespace = "not-my-namespace"
51 podResourceClaimName = "acme-resource"
52 templateName = "my-template"
53 className = "my-resource-class"
54 nodeName = "worker"
55
56 testPod = makePod(testPodName, testNamespace, testPodUID)
57 testPodWithResource = makePod(testPodName, testNamespace, testPodUID, *makePodResourceClaim(podResourceClaimName, templateName))
58 otherTestPod = makePod(testPodName+"-II", testNamespace, testPodUID+"-II")
59
60 testClaim = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, makeOwnerReference(testPodWithResource, true))
61 testClaimAllocated = allocateClaim(testClaim)
62 testClaimReserved = reserveClaim(testClaimAllocated, testPodWithResource)
63 testClaimReservedTwice = reserveClaim(testClaimReserved, otherTestPod)
64
65 generatedTestClaim = makeGeneratedClaim(podResourceClaimName, testPodName+"-"+podResourceClaimName+"-", testNamespace, className, 1, makeOwnerReference(testPodWithResource, true))
66 generatedTestClaimAllocated = allocateClaim(generatedTestClaim)
67 generatedTestClaimReserved = reserveClaim(generatedTestClaimAllocated, testPodWithResource)
68
69 conflictingClaim = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, nil)
70 otherNamespaceClaim = makeClaim(testPodName+"-"+podResourceClaimName, otherNamespace, className, nil)
71 template = makeTemplate(templateName, testNamespace, className)
72
73 testPodWithNodeName = func() *v1.Pod {
74 pod := testPodWithResource.DeepCopy()
75 pod.Spec.NodeName = nodeName
76 pod.Status.ResourceClaimStatuses = append(pod.Status.ResourceClaimStatuses, v1.PodResourceClaimStatus{
77 Name: pod.Spec.ResourceClaims[0].Name,
78 ResourceClaimName: &generatedTestClaim.Name,
79 })
80 return pod
81 }()
82
83 podSchedulingContext = resourcev1alpha2.PodSchedulingContext{
84 ObjectMeta: metav1.ObjectMeta{
85 Name: testPodName,
86 Namespace: testNamespace,
87 OwnerReferences: []metav1.OwnerReference{
88 {
89 APIVersion: "v1",
90 Kind: "Pod",
91 Name: testPodName,
92 UID: testPodUID,
93 Controller: pointer.Bool(true),
94 },
95 },
96 },
97 Spec: resourcev1alpha2.PodSchedulingContextSpec{
98 SelectedNode: nodeName,
99 },
100 }
101 )
102
103 func init() {
104 klog.InitFlags(nil)
105 }
106
107 func TestSyncHandler(t *testing.T) {
108 tests := []struct {
109 name string
110 key string
111 claims []*resourcev1alpha2.ResourceClaim
112 claimsInCache []*resourcev1alpha2.ResourceClaim
113 pods []*v1.Pod
114 podsLater []*v1.Pod
115 templates []*resourcev1alpha2.ResourceClaimTemplate
116 expectedClaims []resourcev1alpha2.ResourceClaim
117 expectedPodSchedulingContexts []resourcev1alpha2.PodSchedulingContext
118 expectedStatuses map[string][]v1.PodResourceClaimStatus
119 expectedError bool
120 expectedMetrics expectedMetrics
121 }{
122 {
123 name: "create",
124 pods: []*v1.Pod{testPodWithResource},
125 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
126 key: podKey(testPodWithResource),
127 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
128 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
129 testPodWithResource.Name: {
130 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
131 },
132 },
133 expectedMetrics: expectedMetrics{1, 0},
134 },
135 {
136 name: "nop",
137 pods: []*v1.Pod{func() *v1.Pod {
138 pod := testPodWithResource.DeepCopy()
139 pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
140 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
141 }
142 return pod
143 }()},
144 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
145 key: podKey(testPodWithResource),
146 claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
147 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
148 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
149 testPodWithResource.Name: {
150 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
151 },
152 },
153 expectedMetrics: expectedMetrics{0, 0},
154 },
155 {
156 name: "recreate",
157 pods: []*v1.Pod{func() *v1.Pod {
158 pod := testPodWithResource.DeepCopy()
159 pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
160 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
161 }
162 return pod
163 }()},
164 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
165 key: podKey(testPodWithResource),
166 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
167 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
168 testPodWithResource.Name: {
169 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
170 },
171 },
172 expectedMetrics: expectedMetrics{1, 0},
173 },
174 {
175 name: "missing-template",
176 pods: []*v1.Pod{testPodWithResource},
177 templates: nil,
178 key: podKey(testPodWithResource),
179 expectedError: true,
180 },
181 {
182 name: "find-existing-claim-by-label",
183 pods: []*v1.Pod{testPodWithResource},
184 key: podKey(testPodWithResource),
185 claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
186 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
187 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
188 testPodWithResource.Name: {
189 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
190 },
191 },
192 expectedMetrics: expectedMetrics{0, 0},
193 },
194 {
195 name: "find-existing-claim-by-name",
196 pods: []*v1.Pod{testPodWithResource},
197 key: podKey(testPodWithResource),
198 claims: []*resourcev1alpha2.ResourceClaim{testClaim},
199 expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaim},
200 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
201 testPodWithResource.Name: {
202 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &testClaim.Name},
203 },
204 },
205 expectedMetrics: expectedMetrics{0, 0},
206 },
207 {
208 name: "find-created-claim-in-cache",
209 pods: []*v1.Pod{testPodWithResource},
210 key: podKey(testPodWithResource),
211 claimsInCache: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
212 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
213 testPodWithResource.Name: {
214 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
215 },
216 },
217 expectedMetrics: expectedMetrics{0, 0},
218 },
219 {
220 name: "no-such-pod",
221 key: podKey(testPodWithResource),
222 },
223 {
224 name: "pod-deleted",
225 pods: func() []*v1.Pod {
226 deleted := metav1.Now()
227 pods := []*v1.Pod{testPodWithResource.DeepCopy()}
228 pods[0].DeletionTimestamp = &deleted
229 return pods
230 }(),
231 key: podKey(testPodWithResource),
232 },
233 {
234 name: "no-volumes",
235 pods: []*v1.Pod{testPod},
236 key: podKey(testPod),
237 },
238 {
239 name: "create-with-other-claim",
240 pods: []*v1.Pod{testPodWithResource},
241 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
242 key: podKey(testPodWithResource),
243 claims: []*resourcev1alpha2.ResourceClaim{otherNamespaceClaim},
244 expectedClaims: []resourcev1alpha2.ResourceClaim{*otherNamespaceClaim, *generatedTestClaim},
245 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
246 testPodWithResource.Name: {
247 {Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
248 },
249 },
250 expectedMetrics: expectedMetrics{1, 0},
251 },
252 {
253 name: "wrong-claim-owner",
254 pods: []*v1.Pod{testPodWithResource},
255 key: podKey(testPodWithResource),
256 claims: []*resourcev1alpha2.ResourceClaim{conflictingClaim},
257 expectedClaims: []resourcev1alpha2.ResourceClaim{*conflictingClaim},
258 expectedError: true,
259 },
260 {
261 name: "create-conflict",
262 pods: []*v1.Pod{testPodWithResource},
263 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
264 key: podKey(testPodWithResource),
265 expectedMetrics: expectedMetrics{1, 1},
266 expectedError: true,
267 },
268 {
269 name: "stay-reserved-seen",
270 pods: []*v1.Pod{testPodWithResource},
271 key: claimKey(testClaimReserved),
272 claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
273 expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
274 expectedMetrics: expectedMetrics{0, 0},
275 },
276 {
277 name: "stay-reserved-not-seen",
278 podsLater: []*v1.Pod{testPodWithResource},
279 key: claimKey(testClaimReserved),
280 claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
281 expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
282 expectedMetrics: expectedMetrics{0, 0},
283 },
284 {
285 name: "clear-reserved-delayed-allocation",
286 pods: []*v1.Pod{},
287 key: claimKey(testClaimReserved),
288 claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
289 expectedClaims: func() []resourcev1alpha2.ResourceClaim {
290 claim := testClaimAllocated.DeepCopy()
291 claim.Status.DeallocationRequested = true
292 return []resourcev1alpha2.ResourceClaim{*claim}
293 }(),
294 expectedMetrics: expectedMetrics{0, 0},
295 },
296 {
297 name: "clear-reserved-immediate-allocation",
298 pods: []*v1.Pod{},
299 key: claimKey(testClaimReserved),
300 claims: func() []*resourcev1alpha2.ResourceClaim {
301 claim := testClaimReserved.DeepCopy()
302 claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
303 return []*resourcev1alpha2.ResourceClaim{claim}
304 }(),
305 expectedClaims: func() []resourcev1alpha2.ResourceClaim {
306 claim := testClaimAllocated.DeepCopy()
307 claim.Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
308 return []resourcev1alpha2.ResourceClaim{*claim}
309 }(),
310 expectedMetrics: expectedMetrics{0, 0},
311 },
312 {
313 name: "clear-reserved-when-done-delayed-allocation",
314 pods: func() []*v1.Pod {
315 pods := []*v1.Pod{testPodWithResource.DeepCopy()}
316 pods[0].Status.Phase = v1.PodSucceeded
317 return pods
318 }(),
319 key: claimKey(testClaimReserved),
320 claims: func() []*resourcev1alpha2.ResourceClaim {
321 claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
322 claims[0].OwnerReferences = nil
323 return claims
324 }(),
325 expectedClaims: func() []resourcev1alpha2.ResourceClaim {
326 claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
327 claims[0].OwnerReferences = nil
328 claims[0].Status.DeallocationRequested = true
329 return claims
330 }(),
331 expectedMetrics: expectedMetrics{0, 0},
332 },
333 {
334 name: "clear-reserved-when-done-immediate-allocation",
335 pods: func() []*v1.Pod {
336 pods := []*v1.Pod{testPodWithResource.DeepCopy()}
337 pods[0].Status.Phase = v1.PodSucceeded
338 return pods
339 }(),
340 key: claimKey(testClaimReserved),
341 claims: func() []*resourcev1alpha2.ResourceClaim {
342 claims := []*resourcev1alpha2.ResourceClaim{testClaimReserved.DeepCopy()}
343 claims[0].OwnerReferences = nil
344 claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
345 return claims
346 }(),
347 expectedClaims: func() []resourcev1alpha2.ResourceClaim {
348 claims := []resourcev1alpha2.ResourceClaim{*testClaimAllocated.DeepCopy()}
349 claims[0].OwnerReferences = nil
350 claims[0].Spec.AllocationMode = resourcev1alpha2.AllocationModeImmediate
351 return claims
352 }(),
353 expectedMetrics: expectedMetrics{0, 0},
354 },
355 {
356 name: "remove-reserved",
357 pods: []*v1.Pod{testPod},
358 key: claimKey(testClaimReservedTwice),
359 claims: []*resourcev1alpha2.ResourceClaim{testClaimReservedTwice},
360 expectedClaims: []resourcev1alpha2.ResourceClaim{*testClaimReserved},
361 expectedMetrics: expectedMetrics{0, 0},
362 },
363 {
364 name: "delete-claim-when-done",
365 pods: func() []*v1.Pod {
366 pods := []*v1.Pod{testPodWithResource.DeepCopy()}
367 pods[0].Status.Phase = v1.PodSucceeded
368 return pods
369 }(),
370 key: claimKey(testClaimReserved),
371 claims: []*resourcev1alpha2.ResourceClaim{testClaimReserved},
372 expectedClaims: nil,
373 expectedMetrics: expectedMetrics{0, 0},
374 },
375 {
376 name: "trigger-allocation",
377 pods: []*v1.Pod{testPodWithNodeName},
378 key: podKey(testPodWithNodeName),
379 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
380 claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaim},
381 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaim},
382 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
383 testPodWithNodeName.Name: {
384 {Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
385 },
386 },
387 expectedPodSchedulingContexts: []resourcev1alpha2.PodSchedulingContext{podSchedulingContext},
388 expectedMetrics: expectedMetrics{0, 0},
389 },
390 {
391 name: "add-reserved",
392 pods: []*v1.Pod{testPodWithNodeName},
393 key: podKey(testPodWithNodeName),
394 templates: []*resourcev1alpha2.ResourceClaimTemplate{template},
395 claims: []*resourcev1alpha2.ResourceClaim{generatedTestClaimAllocated},
396 expectedClaims: []resourcev1alpha2.ResourceClaim{*generatedTestClaimReserved},
397 expectedStatuses: map[string][]v1.PodResourceClaimStatus{
398 testPodWithNodeName.Name: {
399 {Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
400 },
401 },
402 expectedMetrics: expectedMetrics{0, 0},
403 },
404 }
405
406 for _, tc := range tests {
407
408 t.Run(tc.name, func(t *testing.T) {
409 ctx, cancel := context.WithCancel(context.Background())
410 defer cancel()
411
412 var objects []runtime.Object
413 for _, pod := range tc.pods {
414 objects = append(objects, pod)
415 }
416 for _, claim := range tc.claims {
417 objects = append(objects, claim)
418 }
419 for _, template := range tc.templates {
420 objects = append(objects, template)
421 }
422
423 fakeKubeClient := createTestClient(objects...)
424 if tc.expectedMetrics.numFailures > 0 {
425 fakeKubeClient.PrependReactor("create", "resourceclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
426 return true, nil, apierrors.NewConflict(action.GetResource().GroupResource(), "fake name", errors.New("fake conflict"))
427 })
428 }
429 setupMetrics()
430 informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
431 podInformer := informerFactory.Core().V1().Pods()
432 podSchedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
433 claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
434 templateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates()
435
436 ec, err := NewController(klog.FromContext(ctx), fakeKubeClient, podInformer, podSchedulingInformer, claimInformer, templateInformer)
437 if err != nil {
438 t.Fatalf("error creating ephemeral controller : %v", err)
439 }
440
441
442 go informerFactory.Start(ctx.Done())
443 stopInformers := func() {
444 cancel()
445 informerFactory.Shutdown()
446 }
447 defer stopInformers()
448 informerFactory.WaitForCacheSync(ctx.Done())
449 cache.WaitForCacheSync(ctx.Done(), podInformer.Informer().HasSynced, claimInformer.Informer().HasSynced, templateInformer.Informer().HasSynced)
450
451
452 for _, claim := range tc.claimsInCache {
453 ec.claimCache.Mutation(claim)
454 }
455
456
457 stopInformers()
458 for _, pod := range tc.podsLater {
459 _, err := fakeKubeClient.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
460 if err != nil {
461 t.Fatalf("unexpected error while creating pod: %v", err)
462 }
463 }
464
465 err = ec.syncHandler(context.TODO(), tc.key)
466 if err != nil && !tc.expectedError {
467 t.Fatalf("unexpected error while running handler: %v", err)
468 }
469 if err == nil && tc.expectedError {
470 t.Fatalf("unexpected success")
471 }
472
473 claims, err := fakeKubeClient.ResourceV1alpha2().ResourceClaims("").List(ctx, metav1.ListOptions{})
474 if err != nil {
475 t.Fatalf("unexpected error while listing claims: %v", err)
476 }
477 assert.Equal(t, normalizeClaims(tc.expectedClaims), normalizeClaims(claims.Items))
478
479 pods, err := fakeKubeClient.CoreV1().Pods("").List(ctx, metav1.ListOptions{})
480 if err != nil {
481 t.Fatalf("unexpected error while listing pods: %v", err)
482 }
483 var actualStatuses map[string][]v1.PodResourceClaimStatus
484 for _, pod := range pods.Items {
485 if len(pod.Status.ResourceClaimStatuses) == 0 {
486 continue
487 }
488 if actualStatuses == nil {
489 actualStatuses = make(map[string][]v1.PodResourceClaimStatus)
490 }
491 actualStatuses[pod.Name] = pod.Status.ResourceClaimStatuses
492 }
493 assert.Equal(t, tc.expectedStatuses, actualStatuses, "pod resource claim statuses")
494
495 scheduling, err := fakeKubeClient.ResourceV1alpha2().PodSchedulingContexts("").List(ctx, metav1.ListOptions{})
496 if err != nil {
497 t.Fatalf("unexpected error while listing claims: %v", err)
498 }
499 assert.Equal(t, normalizeScheduling(tc.expectedPodSchedulingContexts), normalizeScheduling(scheduling.Items))
500
501 expectMetrics(t, tc.expectedMetrics)
502 })
503 }
504 }
505
506 func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
507 claim := &resourcev1alpha2.ResourceClaim{
508 ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
509 Spec: resourcev1alpha2.ResourceClaimSpec{
510 ResourceClassName: classname,
511 AllocationMode: resourcev1alpha2.AllocationModeWaitForFirstConsumer,
512 },
513 }
514 if owner != nil {
515 claim.OwnerReferences = []metav1.OwnerReference{*owner}
516 }
517
518 return claim
519 }
520
521 func makeGeneratedClaim(podClaimName, generateName, namespace, classname string, createCounter int, owner *metav1.OwnerReference) *resourcev1alpha2.ResourceClaim {
522 claim := &resourcev1alpha2.ResourceClaim{
523 ObjectMeta: metav1.ObjectMeta{
524 Name: fmt.Sprintf("%s-%d", generateName, createCounter),
525 GenerateName: generateName,
526 Namespace: namespace,
527 Annotations: map[string]string{"resource.kubernetes.io/pod-claim-name": podClaimName},
528 },
529 Spec: resourcev1alpha2.ResourceClaimSpec{
530 ResourceClassName: classname,
531 AllocationMode: resourcev1alpha2.AllocationModeWaitForFirstConsumer,
532 },
533 }
534 if owner != nil {
535 claim.OwnerReferences = []metav1.OwnerReference{*owner}
536 }
537
538 return claim
539 }
540
541 func allocateClaim(claim *resourcev1alpha2.ResourceClaim) *resourcev1alpha2.ResourceClaim {
542 claim = claim.DeepCopy()
543 claim.Status.Allocation = &resourcev1alpha2.AllocationResult{
544 Shareable: true,
545 }
546 return claim
547 }
548
549 func reserveClaim(claim *resourcev1alpha2.ResourceClaim, pod *v1.Pod) *resourcev1alpha2.ResourceClaim {
550 claim = claim.DeepCopy()
551 claim.Status.ReservedFor = append(claim.Status.ReservedFor,
552 resourcev1alpha2.ResourceClaimConsumerReference{
553 Resource: "pods",
554 Name: pod.Name,
555 UID: pod.UID,
556 },
557 )
558 return claim
559 }
560
561 func makePodResourceClaim(name, templateName string) *v1.PodResourceClaim {
562 return &v1.PodResourceClaim{
563 Name: name,
564 Source: v1.ClaimSource{
565 ResourceClaimTemplateName: &templateName,
566 },
567 }
568 }
569
570 func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceClaim) *v1.Pod {
571 pod := &v1.Pod{
572 ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, UID: uid},
573 Spec: v1.PodSpec{
574 ResourceClaims: podClaims,
575 },
576 }
577
578 return pod
579 }
580
581 func makeTemplate(name, namespace, classname string) *resourcev1alpha2.ResourceClaimTemplate {
582 template := &resourcev1alpha2.ResourceClaimTemplate{
583 ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
584 Spec: resourcev1alpha2.ResourceClaimTemplateSpec{
585 Spec: resourcev1alpha2.ResourceClaimSpec{
586 ResourceClassName: classname,
587 },
588 },
589 }
590 return template
591 }
592
593 func podKey(pod *v1.Pod) string {
594 return podKeyPrefix + pod.Namespace + "/" + pod.Name
595 }
596
597 func claimKey(claim *resourcev1alpha2.ResourceClaim) string {
598 return claimKeyPrefix + claim.Namespace + "/" + claim.Name
599 }
600
601 func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
602 isTrue := true
603 return &metav1.OwnerReference{
604 APIVersion: "v1",
605 Kind: "Pod",
606 Name: pod.Name,
607 UID: pod.UID,
608 Controller: &isController,
609 BlockOwnerDeletion: &isTrue,
610 }
611 }
612
613 func normalizeClaims(claims []resourcev1alpha2.ResourceClaim) []resourcev1alpha2.ResourceClaim {
614 sort.Slice(claims, func(i, j int) bool {
615 if claims[i].Namespace < claims[j].Namespace {
616 return true
617 }
618 if claims[i].Namespace > claims[j].Namespace {
619 return false
620 }
621 return claims[i].Name < claims[j].Name
622 })
623 for i := range claims {
624 if len(claims[i].Status.ReservedFor) == 0 {
625 claims[i].Status.ReservedFor = nil
626 }
627 if claims[i].Spec.AllocationMode == "" {
628
629 claims[i].Spec.AllocationMode = resourcev1alpha2.AllocationModeWaitForFirstConsumer
630 }
631 }
632 return claims
633 }
634
635 func normalizeScheduling(scheduling []resourcev1alpha2.PodSchedulingContext) []resourcev1alpha2.PodSchedulingContext {
636 sort.Slice(scheduling, func(i, j int) bool {
637 return scheduling[i].Namespace < scheduling[j].Namespace ||
638 scheduling[i].Name < scheduling[j].Name
639 })
640 return scheduling
641 }
642
643 func createTestClient(objects ...runtime.Object) *fake.Clientset {
644 fakeClient := fake.NewSimpleClientset(objects...)
645 fakeClient.PrependReactor("create", "resourceclaims", createResourceClaimReactor())
646 return fakeClient
647 }
648
649
650
651 func createResourceClaimReactor() func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
652 nameCounter := 1
653 var mutex sync.Mutex
654 return func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
655 mutex.Lock()
656 defer mutex.Unlock()
657 claim := action.(k8stesting.CreateAction).GetObject().(*resourcev1alpha2.ResourceClaim)
658 if claim.Name == "" && claim.GenerateName != "" {
659 claim.Name = fmt.Sprintf("%s-%d", claim.GenerateName, nameCounter)
660 }
661 nameCounter++
662 return false, nil, nil
663 }
664 }
665
666
667
668 type expectedMetrics struct {
669 numCreated int
670 numFailures int
671 }
672
673 func expectMetrics(t *testing.T, em expectedMetrics) {
674 t.Helper()
675
676 actualCreated, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateAttempts)
677 handleErr(t, err, "ResourceClaimCreate")
678 if actualCreated != float64(em.numCreated) {
679 t.Errorf("Expected claims to be created %d, got %v", em.numCreated, actualCreated)
680 }
681 actualConflicts, err := testutil.GetCounterMetricValue(ephemeralvolumemetrics.ResourceClaimCreateFailures)
682 handleErr(t, err, "ResourceClaimCreate/Conflict")
683 if actualConflicts != float64(em.numFailures) {
684 t.Errorf("Expected claims to have conflicts %d, got %v", em.numFailures, actualConflicts)
685 }
686 }
687
688 func handleErr(t *testing.T, err error, metricName string) {
689 if err != nil {
690 t.Errorf("Failed to get %s value, err: %v", metricName, err)
691 }
692 }
693
694 func setupMetrics() {
695 ephemeralvolumemetrics.RegisterMetrics()
696 ephemeralvolumemetrics.ResourceClaimCreateAttempts.Reset()
697 ephemeralvolumemetrics.ResourceClaimCreateFailures.Reset()
698 }
699
View as plain text