1
16
17 package scheduler
18
19 import (
20 "context"
21 "reflect"
22 "testing"
23 "time"
24
25 "github.com/google/go-cmp/cmp"
26 appsv1 "k8s.io/api/apps/v1"
27 batchv1 "k8s.io/api/batch/v1"
28 v1 "k8s.io/api/core/v1"
29 storagev1 "k8s.io/api/storage/v1"
30 "k8s.io/apimachinery/pkg/api/resource"
31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32 "k8s.io/klog/v2/ktesting"
33
34 "k8s.io/apimachinery/pkg/runtime"
35 "k8s.io/apimachinery/pkg/runtime/schema"
36 "k8s.io/client-go/dynamic/dynamicinformer"
37 dyfake "k8s.io/client-go/dynamic/fake"
38 "k8s.io/client-go/informers"
39 "k8s.io/client-go/kubernetes/fake"
40
41 "k8s.io/kubernetes/pkg/scheduler/framework"
42 "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
43 "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
44 "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
45 "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
46 "k8s.io/kubernetes/pkg/scheduler/internal/cache"
47 "k8s.io/kubernetes/pkg/scheduler/internal/queue"
48 st "k8s.io/kubernetes/pkg/scheduler/testing"
49 )
50
51 func TestNodeAllocatableChanged(t *testing.T) {
52 newQuantity := func(value int64) resource.Quantity {
53 return *resource.NewQuantity(value, resource.BinarySI)
54 }
55 for _, test := range []struct {
56 Name string
57 Changed bool
58 OldAllocatable v1.ResourceList
59 NewAllocatable v1.ResourceList
60 }{
61 {
62 Name: "no allocatable resources changed",
63 Changed: false,
64 OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
65 NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
66 },
67 {
68 Name: "new node has more allocatable resources",
69 Changed: true,
70 OldAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024)},
71 NewAllocatable: v1.ResourceList{v1.ResourceMemory: newQuantity(1024), v1.ResourceStorage: newQuantity(1024)},
72 },
73 } {
74 t.Run(test.Name, func(t *testing.T) {
75 oldNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.OldAllocatable}}
76 newNode := &v1.Node{Status: v1.NodeStatus{Allocatable: test.NewAllocatable}}
77 changed := nodeAllocatableChanged(newNode, oldNode)
78 if changed != test.Changed {
79 t.Errorf("nodeAllocatableChanged should be %t, got %t", test.Changed, changed)
80 }
81 })
82 }
83 }
84
85 func TestNodeLabelsChanged(t *testing.T) {
86 for _, test := range []struct {
87 Name string
88 Changed bool
89 OldLabels map[string]string
90 NewLabels map[string]string
91 }{
92 {
93 Name: "no labels changed",
94 Changed: false,
95 OldLabels: map[string]string{"foo": "bar"},
96 NewLabels: map[string]string{"foo": "bar"},
97 },
98
99 {
100 Name: "new node has more labels",
101 Changed: true,
102 OldLabels: map[string]string{"foo": "bar"},
103 NewLabels: map[string]string{"foo": "bar", "test": "value"},
104 },
105 } {
106 t.Run(test.Name, func(t *testing.T) {
107 oldNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.OldLabels}}
108 newNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.NewLabels}}
109 changed := nodeLabelsChanged(newNode, oldNode)
110 if changed != test.Changed {
111 t.Errorf("Test case %q failed: should be %t, got %t", test.Name, test.Changed, changed)
112 }
113 })
114 }
115 }
116
117 func TestNodeTaintsChanged(t *testing.T) {
118 for _, test := range []struct {
119 Name string
120 Changed bool
121 OldTaints []v1.Taint
122 NewTaints []v1.Taint
123 }{
124 {
125 Name: "no taint changed",
126 Changed: false,
127 OldTaints: []v1.Taint{{Key: "key", Value: "value"}},
128 NewTaints: []v1.Taint{{Key: "key", Value: "value"}},
129 },
130 {
131 Name: "taint value changed",
132 Changed: true,
133 OldTaints: []v1.Taint{{Key: "key", Value: "value1"}},
134 NewTaints: []v1.Taint{{Key: "key", Value: "value2"}},
135 },
136 } {
137 t.Run(test.Name, func(t *testing.T) {
138 oldNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.OldTaints}}
139 newNode := &v1.Node{Spec: v1.NodeSpec{Taints: test.NewTaints}}
140 changed := nodeTaintsChanged(newNode, oldNode)
141 if changed != test.Changed {
142 t.Errorf("Test case %q failed: should be %t, not %t", test.Name, test.Changed, changed)
143 }
144 })
145 }
146 }
147
148 func TestNodeConditionsChanged(t *testing.T) {
149 nodeConditionType := reflect.TypeOf(v1.NodeCondition{})
150 if nodeConditionType.NumField() != 6 {
151 t.Errorf("NodeCondition type has changed. The nodeConditionsChanged() function must be reevaluated.")
152 }
153
154 for _, test := range []struct {
155 Name string
156 Changed bool
157 OldConditions []v1.NodeCondition
158 NewConditions []v1.NodeCondition
159 }{
160 {
161 Name: "no condition changed",
162 Changed: false,
163 OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
164 NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
165 },
166 {
167 Name: "only LastHeartbeatTime changed",
168 Changed: false,
169 OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(1, 0)}},
170 NewConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue, LastHeartbeatTime: metav1.Unix(2, 0)}},
171 },
172 {
173 Name: "new node has more healthy conditions",
174 Changed: true,
175 OldConditions: []v1.NodeCondition{},
176 NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
177 },
178 {
179 Name: "new node has less unhealthy conditions",
180 Changed: true,
181 OldConditions: []v1.NodeCondition{{Type: v1.NodeDiskPressure, Status: v1.ConditionTrue}},
182 NewConditions: []v1.NodeCondition{},
183 },
184 {
185 Name: "condition status changed",
186 Changed: true,
187 OldConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}},
188 NewConditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}},
189 },
190 } {
191 t.Run(test.Name, func(t *testing.T) {
192 oldNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.OldConditions}}
193 newNode := &v1.Node{Status: v1.NodeStatus{Conditions: test.NewConditions}}
194 changed := nodeConditionsChanged(newNode, oldNode)
195 if changed != test.Changed {
196 t.Errorf("Test case %q failed: should be %t, got %t", test.Name, test.Changed, changed)
197 }
198 })
199 }
200 }
201
202 func TestUpdatePodInCache(t *testing.T) {
203 ttl := 10 * time.Second
204 nodeName := "node"
205
206 tests := []struct {
207 name string
208 oldObj interface{}
209 newObj interface{}
210 }{
211 {
212 name: "pod updated with the same UID",
213 oldObj: withPodName(podWithPort("oldUID", nodeName, 80), "pod"),
214 newObj: withPodName(podWithPort("oldUID", nodeName, 8080), "pod"),
215 },
216 {
217 name: "pod updated with different UIDs",
218 oldObj: withPodName(podWithPort("oldUID", nodeName, 80), "pod"),
219 newObj: withPodName(podWithPort("newUID", nodeName, 8080), "pod"),
220 },
221 }
222 for _, tt := range tests {
223 t.Run(tt.name, func(t *testing.T) {
224 logger, ctx := ktesting.NewTestContext(t)
225 ctx, cancel := context.WithCancel(ctx)
226 defer cancel()
227 sched := &Scheduler{
228 Cache: cache.New(ctx, ttl),
229 SchedulingQueue: queue.NewTestQueue(ctx, nil),
230 logger: logger,
231 }
232 sched.addPodToCache(tt.oldObj)
233 sched.updatePodInCache(tt.oldObj, tt.newObj)
234
235 if tt.oldObj.(*v1.Pod).UID != tt.newObj.(*v1.Pod).UID {
236 if pod, err := sched.Cache.GetPod(tt.oldObj.(*v1.Pod)); err == nil {
237 t.Errorf("Get pod UID %v from cache but it should not happen", pod.UID)
238 }
239 }
240 pod, err := sched.Cache.GetPod(tt.newObj.(*v1.Pod))
241 if err != nil {
242 t.Errorf("Failed to get pod from scheduler: %v", err)
243 }
244 if pod.UID != tt.newObj.(*v1.Pod).UID {
245 t.Errorf("Want pod UID %v, got %v", tt.newObj.(*v1.Pod).UID, pod.UID)
246 }
247 })
248 }
249 }
250
251 func withPodName(pod *v1.Pod, name string) *v1.Pod {
252 pod.Name = name
253 return pod
254 }
255
256 func TestPreCheckForNode(t *testing.T) {
257 cpu4 := map[v1.ResourceName]string{v1.ResourceCPU: "4"}
258 cpu8 := map[v1.ResourceName]string{v1.ResourceCPU: "8"}
259 cpu16 := map[v1.ResourceName]string{v1.ResourceCPU: "16"}
260 tests := []struct {
261 name string
262 nodeFn func() *v1.Node
263 existingPods, pods []*v1.Pod
264 want []bool
265 }{
266 {
267 name: "regular node, pods with a single constraint",
268 nodeFn: func() *v1.Node {
269 return st.MakeNode().Name("fake-node").Label("hostname", "fake-node").Capacity(cpu8).Obj()
270 },
271 existingPods: []*v1.Pod{
272 st.MakePod().Name("p").HostPort(80).Obj(),
273 },
274 pods: []*v1.Pod{
275 st.MakePod().Name("p1").Req(cpu4).Obj(),
276 st.MakePod().Name("p2").Req(cpu16).Obj(),
277 st.MakePod().Name("p3").Req(cpu4).Req(cpu8).Obj(),
278 st.MakePod().Name("p4").NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
279 st.MakePod().Name("p5").NodeAffinityNotIn("hostname", []string{"fake-node"}).Obj(),
280 st.MakePod().Name("p6").Obj(),
281 st.MakePod().Name("p7").Node("invalid-node").Obj(),
282 st.MakePod().Name("p8").HostPort(8080).Obj(),
283 st.MakePod().Name("p9").HostPort(80).Obj(),
284 },
285 want: []bool{true, false, false, true, false, true, false, true, false},
286 },
287 {
288 name: "tainted node, pods with a single constraint",
289 nodeFn: func() *v1.Node {
290 node := st.MakeNode().Name("fake-node").Obj()
291 node.Spec.Taints = []v1.Taint{
292 {Key: "foo", Effect: v1.TaintEffectNoSchedule},
293 {Key: "bar", Effect: v1.TaintEffectPreferNoSchedule},
294 }
295 return node
296 },
297 pods: []*v1.Pod{
298 st.MakePod().Name("p1").Obj(),
299 st.MakePod().Name("p2").Toleration("foo").Obj(),
300 st.MakePod().Name("p3").Toleration("bar").Obj(),
301 st.MakePod().Name("p4").Toleration("bar").Toleration("foo").Obj(),
302 },
303 want: []bool{false, true, false, true},
304 },
305 {
306 name: "regular node, pods with multiple constraints",
307 nodeFn: func() *v1.Node {
308 return st.MakeNode().Name("fake-node").Label("hostname", "fake-node").Capacity(cpu8).Obj()
309 },
310 existingPods: []*v1.Pod{
311 st.MakePod().Name("p").HostPort(80).Obj(),
312 },
313 pods: []*v1.Pod{
314 st.MakePod().Name("p1").Req(cpu4).NodeAffinityNotIn("hostname", []string{"fake-node"}).Obj(),
315 st.MakePod().Name("p2").Req(cpu16).NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
316 st.MakePod().Name("p3").Req(cpu8).NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
317 st.MakePod().Name("p4").HostPort(8080).Node("invalid-node").Obj(),
318 st.MakePod().Name("p5").Req(cpu4).NodeAffinityIn("hostname", []string{"fake-node"}).HostPort(80).Obj(),
319 },
320 want: []bool{false, false, true, false, false},
321 },
322 {
323 name: "tainted node, pods with multiple constraints",
324 nodeFn: func() *v1.Node {
325 node := st.MakeNode().Name("fake-node").Label("hostname", "fake-node").Capacity(cpu8).Obj()
326 node.Spec.Taints = []v1.Taint{
327 {Key: "foo", Effect: v1.TaintEffectNoSchedule},
328 {Key: "bar", Effect: v1.TaintEffectPreferNoSchedule},
329 }
330 return node
331 },
332 pods: []*v1.Pod{
333 st.MakePod().Name("p1").Req(cpu4).Toleration("bar").Obj(),
334 st.MakePod().Name("p2").Req(cpu4).Toleration("bar").Toleration("foo").Obj(),
335 st.MakePod().Name("p3").Req(cpu16).Toleration("foo").Obj(),
336 st.MakePod().Name("p3").Req(cpu16).Toleration("bar").Obj(),
337 },
338 want: []bool{false, true, false, false},
339 },
340 }
341
342 for _, tt := range tests {
343 t.Run(tt.name, func(t *testing.T) {
344 nodeInfo := framework.NewNodeInfo(tt.existingPods...)
345 nodeInfo.SetNode(tt.nodeFn())
346 preCheckFn := preCheckForNode(nodeInfo)
347
348 var got []bool
349 for _, pod := range tt.pods {
350 got = append(got, preCheckFn(pod))
351 }
352
353 if diff := cmp.Diff(tt.want, got); diff != "" {
354 t.Errorf("Unexpected diff (-want, +got):\n%s", diff)
355 }
356 })
357 }
358 }
359
360
361 func TestAddAllEventHandlers(t *testing.T) {
362 tests := []struct {
363 name string
364 gvkMap map[framework.GVK]framework.ActionType
365 expectStaticInformers map[reflect.Type]bool
366 expectDynamicInformers map[schema.GroupVersionResource]bool
367 }{
368 {
369 name: "default handlers in framework",
370 gvkMap: map[framework.GVK]framework.ActionType{},
371 expectStaticInformers: map[reflect.Type]bool{
372 reflect.TypeOf(&v1.Pod{}): true,
373 reflect.TypeOf(&v1.Node{}): true,
374 reflect.TypeOf(&v1.Namespace{}): true,
375 },
376 expectDynamicInformers: map[schema.GroupVersionResource]bool{},
377 },
378 {
379 name: "add GVKs handlers defined in framework dynamically",
380 gvkMap: map[framework.GVK]framework.ActionType{
381 "Pod": framework.Add | framework.Delete,
382 "PersistentVolume": framework.Delete,
383 "storage.k8s.io/CSIStorageCapacity": framework.Update,
384 },
385 expectStaticInformers: map[reflect.Type]bool{
386 reflect.TypeOf(&v1.Pod{}): true,
387 reflect.TypeOf(&v1.Node{}): true,
388 reflect.TypeOf(&v1.Namespace{}): true,
389 reflect.TypeOf(&v1.PersistentVolume{}): true,
390 reflect.TypeOf(&storagev1.CSIStorageCapacity{}): true,
391 },
392 expectDynamicInformers: map[schema.GroupVersionResource]bool{},
393 },
394 {
395 name: "add GVKs handlers defined in plugins dynamically",
396 gvkMap: map[framework.GVK]framework.ActionType{
397 "daemonsets.v1.apps": framework.Add | framework.Delete,
398 "cronjobs.v1.batch": framework.Delete,
399 },
400 expectStaticInformers: map[reflect.Type]bool{
401 reflect.TypeOf(&v1.Pod{}): true,
402 reflect.TypeOf(&v1.Node{}): true,
403 reflect.TypeOf(&v1.Namespace{}): true,
404 },
405 expectDynamicInformers: map[schema.GroupVersionResource]bool{
406 {Group: "apps", Version: "v1", Resource: "daemonsets"}: true,
407 {Group: "batch", Version: "v1", Resource: "cronjobs"}: true,
408 },
409 },
410 {
411 name: "add GVKs handlers defined in plugins dynamically, with one illegal GVK form",
412 gvkMap: map[framework.GVK]framework.ActionType{
413 "daemonsets.v1.apps": framework.Add | framework.Delete,
414 "custommetrics.v1beta1": framework.Update,
415 },
416 expectStaticInformers: map[reflect.Type]bool{
417 reflect.TypeOf(&v1.Pod{}): true,
418 reflect.TypeOf(&v1.Node{}): true,
419 reflect.TypeOf(&v1.Namespace{}): true,
420 },
421 expectDynamicInformers: map[schema.GroupVersionResource]bool{
422 {Group: "apps", Version: "v1", Resource: "daemonsets"}: true,
423 },
424 },
425 }
426
427 scheme := runtime.NewScheme()
428 var localSchemeBuilder = runtime.SchemeBuilder{
429 appsv1.AddToScheme,
430 batchv1.AddToScheme,
431 }
432 localSchemeBuilder.AddToScheme(scheme)
433
434 for _, tt := range tests {
435 t.Run(tt.name, func(t *testing.T) {
436 logger, ctx := ktesting.NewTestContext(t)
437 ctx, cancel := context.WithCancel(ctx)
438 defer cancel()
439
440 informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0)
441 schedulingQueue := queue.NewTestQueueWithInformerFactory(ctx, nil, informerFactory)
442 testSched := Scheduler{
443 StopEverything: ctx.Done(),
444 SchedulingQueue: schedulingQueue,
445 logger: logger,
446 }
447
448 dynclient := dyfake.NewSimpleDynamicClient(scheme)
449 dynInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(dynclient, 0)
450
451 if err := addAllEventHandlers(&testSched, informerFactory, dynInformerFactory, tt.gvkMap); err != nil {
452 t.Fatalf("Add event handlers failed, error = %v", err)
453 }
454
455 informerFactory.Start(testSched.StopEverything)
456 dynInformerFactory.Start(testSched.StopEverything)
457 staticInformers := informerFactory.WaitForCacheSync(testSched.StopEverything)
458 dynamicInformers := dynInformerFactory.WaitForCacheSync(testSched.StopEverything)
459
460 if diff := cmp.Diff(tt.expectStaticInformers, staticInformers); diff != "" {
461 t.Errorf("Unexpected diff (-want, +got):\n%s", diff)
462 }
463 if diff := cmp.Diff(tt.expectDynamicInformers, dynamicInformers); diff != "" {
464 t.Errorf("Unexpected diff (-want, +got):\n%s", diff)
465 }
466 })
467 }
468 }
469
470 func TestAdmissionCheck(t *testing.T) {
471 nodeaffinityError := AdmissionResult{Name: nodeaffinity.Name, Reason: nodeaffinity.ErrReasonPod}
472 nodenameError := AdmissionResult{Name: nodename.Name, Reason: nodename.ErrReason}
473 nodeportsError := AdmissionResult{Name: nodeports.Name, Reason: nodeports.ErrReason}
474 podOverheadError := AdmissionResult{InsufficientResource: &noderesources.InsufficientResource{ResourceName: v1.ResourceCPU, Reason: "Insufficient cpu", Requested: 2000, Used: 7000, Capacity: 8000}}
475 cpu := map[v1.ResourceName]string{v1.ResourceCPU: "8"}
476 tests := []struct {
477 name string
478 node *v1.Node
479 existingPods []*v1.Pod
480 pod *v1.Pod
481 wantAdmissionResults [][]AdmissionResult
482 }{
483 {
484 name: "check nodeAffinity and nodeports, nodeAffinity need fail quickly if includeAllFailures is false",
485 node: st.MakeNode().Name("fake-node").Label("foo", "bar").Obj(),
486 pod: st.MakePod().Name("pod2").HostPort(80).NodeSelector(map[string]string{"foo": "bar1"}).Obj(),
487 existingPods: []*v1.Pod{
488 st.MakePod().Name("pod1").HostPort(80).Obj(),
489 },
490 wantAdmissionResults: [][]AdmissionResult{{nodeaffinityError, nodeportsError}, {nodeaffinityError}},
491 },
492 {
493 name: "check PodOverhead and nodeAffinity, PodOverhead need fail quickly if includeAllFailures is false",
494 node: st.MakeNode().Name("fake-node").Label("foo", "bar").Capacity(cpu).Obj(),
495 pod: st.MakePod().Name("pod2").Container("c").Overhead(v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}).Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).NodeSelector(map[string]string{"foo": "bar1"}).Obj(),
496 existingPods: []*v1.Pod{
497 st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "7"}).Node("fake-node").Obj(),
498 },
499 wantAdmissionResults: [][]AdmissionResult{{podOverheadError, nodeaffinityError}, {podOverheadError}},
500 },
501 {
502 name: "check nodename and nodeports, nodename need fail quickly if includeAllFailures is false",
503 node: st.MakeNode().Name("fake-node").Obj(),
504 pod: st.MakePod().Name("pod2").HostPort(80).Node("fake-node1").Obj(),
505 existingPods: []*v1.Pod{
506 st.MakePod().Name("pod1").HostPort(80).Node("fake-node").Obj(),
507 },
508 wantAdmissionResults: [][]AdmissionResult{{nodenameError, nodeportsError}, {nodenameError}},
509 },
510 }
511 for _, tt := range tests {
512 t.Run(tt.name, func(t *testing.T) {
513 nodeInfo := framework.NewNodeInfo(tt.existingPods...)
514 nodeInfo.SetNode(tt.node)
515
516 flags := []bool{true, false}
517 for i := range flags {
518 admissionResults := AdmissionCheck(tt.pod, nodeInfo, flags[i])
519
520 if diff := cmp.Diff(tt.wantAdmissionResults[i], admissionResults); diff != "" {
521 t.Errorf("Unexpected admissionResults (-want, +got):\n%s", diff)
522 }
523 }
524 })
525 }
526 }
527
528 func TestNodeSchedulingPropertiesChange(t *testing.T) {
529 testCases := []struct {
530 name string
531 newNode *v1.Node
532 oldNode *v1.Node
533 wantEvents []framework.ClusterEvent
534 }{
535 {
536 name: "no specific changed applied",
537 newNode: st.MakeNode().Unschedulable(false).Obj(),
538 oldNode: st.MakeNode().Unschedulable(false).Obj(),
539 wantEvents: nil,
540 },
541 {
542 name: "only node spec unavailable changed",
543 newNode: st.MakeNode().Unschedulable(false).Obj(),
544 oldNode: st.MakeNode().Unschedulable(true).Obj(),
545 wantEvents: []framework.ClusterEvent{queue.NodeSpecUnschedulableChange},
546 },
547 {
548 name: "only node allocatable changed",
549 newNode: st.MakeNode().Capacity(map[v1.ResourceName]string{
550 v1.ResourceCPU: "1000m",
551 v1.ResourceMemory: "100m",
552 v1.ResourceName("example.com/foo"): "1"},
553 ).Obj(),
554 oldNode: st.MakeNode().Capacity(map[v1.ResourceName]string{
555 v1.ResourceCPU: "1000m",
556 v1.ResourceMemory: "100m",
557 v1.ResourceName("example.com/foo"): "2"},
558 ).Obj(),
559 wantEvents: []framework.ClusterEvent{queue.NodeAllocatableChange},
560 },
561 {
562 name: "only node label changed",
563 newNode: st.MakeNode().Label("foo", "bar").Obj(),
564 oldNode: st.MakeNode().Label("foo", "fuz").Obj(),
565 wantEvents: []framework.ClusterEvent{queue.NodeLabelChange},
566 },
567 {
568 name: "only node taint changed",
569 newNode: st.MakeNode().Taints([]v1.Taint{
570 {Key: v1.TaintNodeUnschedulable, Value: "", Effect: v1.TaintEffectNoSchedule},
571 }).Obj(),
572 oldNode: st.MakeNode().Taints([]v1.Taint{
573 {Key: v1.TaintNodeUnschedulable, Value: "foo", Effect: v1.TaintEffectNoSchedule},
574 }).Obj(),
575 wantEvents: []framework.ClusterEvent{queue.NodeTaintChange},
576 },
577 {
578 name: "only node annotation changed",
579 newNode: st.MakeNode().Annotation("foo", "bar").Obj(),
580 oldNode: st.MakeNode().Annotation("foo", "fuz").Obj(),
581 wantEvents: []framework.ClusterEvent{queue.NodeAnnotationChange},
582 },
583 {
584 name: "only node condition changed",
585 newNode: st.MakeNode().Obj(),
586 oldNode: st.MakeNode().Condition(
587 v1.NodeReady,
588 v1.ConditionTrue,
589 "Ready",
590 "Ready",
591 ).Obj(),
592 wantEvents: []framework.ClusterEvent{queue.NodeConditionChange},
593 },
594 {
595 name: "both node label and node taint changed",
596 newNode: st.MakeNode().
597 Label("foo", "bar").
598 Taints([]v1.Taint{
599 {Key: v1.TaintNodeUnschedulable, Value: "", Effect: v1.TaintEffectNoSchedule},
600 }).Obj(),
601 oldNode: st.MakeNode().Taints([]v1.Taint{
602 {Key: v1.TaintNodeUnschedulable, Value: "foo", Effect: v1.TaintEffectNoSchedule},
603 }).Obj(),
604 wantEvents: []framework.ClusterEvent{queue.NodeLabelChange, queue.NodeTaintChange},
605 },
606 }
607
608 for _, tc := range testCases {
609 gotEvents := nodeSchedulingPropertiesChange(tc.newNode, tc.oldNode)
610 if diff := cmp.Diff(tc.wantEvents, gotEvents); diff != "" {
611 t.Errorf("unexpected event (-want, +got):\n%s", diff)
612 }
613 }
614 }
615
View as plain text