1
16
17 package noderesources
18
19 import (
20 "context"
21 "fmt"
22 "reflect"
23 "testing"
24
25 "github.com/google/go-cmp/cmp"
26 "github.com/stretchr/testify/require"
27 v1 "k8s.io/api/core/v1"
28 "k8s.io/apimachinery/pkg/api/resource"
29 "k8s.io/klog/v2/ktesting"
30 _ "k8s.io/klog/v2/ktesting/init"
31 "k8s.io/kubernetes/pkg/scheduler/apis/config"
32 "k8s.io/kubernetes/pkg/scheduler/framework"
33 plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
34 plugintesting "k8s.io/kubernetes/pkg/scheduler/framework/plugins/testing"
35 "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
36 "k8s.io/kubernetes/pkg/scheduler/internal/cache"
37 st "k8s.io/kubernetes/pkg/scheduler/testing"
38 tf "k8s.io/kubernetes/pkg/scheduler/testing/framework"
39 )
40
41 var (
42 extendedResourceA = v1.ResourceName("example.com/aaa")
43 extendedResourceB = v1.ResourceName("example.com/bbb")
44 kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
45 kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
46 hugePageResourceA = v1.ResourceName(v1.ResourceHugePagesPrefix + "2Mi")
47 )
48
49 func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
50 return v1.ResourceList{
51 v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
52 v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
53 v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
54 extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
55 v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
56 hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
57 }
58 }
59
60 func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
61 return v1.ResourceList{
62 v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
63 v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
64 v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
65 extendedResourceA: *resource.NewQuantity(extendedA, resource.DecimalSI),
66 v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
67 hugePageResourceA: *resource.NewQuantity(hugePageA, resource.BinarySI),
68 }
69 }
70
71 func newResourcePod(usage ...framework.Resource) *v1.Pod {
72 var containers []v1.Container
73 for _, req := range usage {
74 rl := v1.ResourceList{
75 v1.ResourceCPU: *resource.NewMilliQuantity(req.MilliCPU, resource.DecimalSI),
76 v1.ResourceMemory: *resource.NewQuantity(req.Memory, resource.BinarySI),
77 v1.ResourcePods: *resource.NewQuantity(int64(req.AllowedPodNumber), resource.BinarySI),
78 v1.ResourceEphemeralStorage: *resource.NewQuantity(req.EphemeralStorage, resource.BinarySI),
79 }
80 for rName, rQuant := range req.ScalarResources {
81 if rName == hugePageResourceA {
82 rl[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
83 } else {
84 rl[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
85 }
86 }
87 containers = append(containers, v1.Container{
88 Resources: v1.ResourceRequirements{Requests: rl},
89 })
90 }
91 return &v1.Pod{
92 Spec: v1.PodSpec{
93 Containers: containers,
94 },
95 }
96 }
97
98 func newResourceInitPod(pod *v1.Pod, usage ...framework.Resource) *v1.Pod {
99 pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
100 return pod
101 }
102
103 func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
104 pod.Spec.Overhead = overhead
105 return pod
106 }
107
108 func getErrReason(rn v1.ResourceName) string {
109 return fmt.Sprintf("Insufficient %v", rn)
110 }
111
112 var defaultScoringStrategy = &config.ScoringStrategy{
113 Type: config.LeastAllocated,
114 Resources: []config.ResourceSpec{
115 {Name: "cpu", Weight: 1},
116 {Name: "memory", Weight: 1},
117 },
118 }
119
120 func TestEnoughRequests(t *testing.T) {
121 enoughPodsTests := []struct {
122 pod *v1.Pod
123 nodeInfo *framework.NodeInfo
124 name string
125 args config.NodeResourcesFitArgs
126 wantInsufficientResources []InsufficientResource
127 wantStatus *framework.Status
128 }{
129 {
130 pod: &v1.Pod{},
131 nodeInfo: framework.NewNodeInfo(
132 newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
133 name: "no resources requested always fits",
134 wantInsufficientResources: []InsufficientResource{},
135 },
136 {
137 pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
138 nodeInfo: framework.NewNodeInfo(
139 newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
140 name: "too many resources fails",
141 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
142 wantInsufficientResources: []InsufficientResource{
143 {ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 1, Used: 10, Capacity: 10},
144 {ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 1, Used: 20, Capacity: 20},
145 },
146 },
147 {
148 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}),
149 nodeInfo: framework.NewNodeInfo(
150 newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
151 name: "too many resources fails due to init container cpu",
152 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
153 wantInsufficientResources: []InsufficientResource{
154 {ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
155 },
156 },
157 {
158 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}),
159 nodeInfo: framework.NewNodeInfo(
160 newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
161 name: "too many resources fails due to highest init container cpu",
162 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
163 wantInsufficientResources: []InsufficientResource{
164 {ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 3, Used: 8, Capacity: 10},
165 },
166 },
167 {
168 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}),
169 nodeInfo: framework.NewNodeInfo(
170 newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
171 name: "too many resources fails due to init container memory",
172 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
173 wantInsufficientResources: []InsufficientResource{
174 {ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
175 },
176 },
177 {
178 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}),
179 nodeInfo: framework.NewNodeInfo(
180 newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
181 name: "too many resources fails due to highest init container memory",
182 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
183 wantInsufficientResources: []InsufficientResource{
184 {ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 3, Used: 19, Capacity: 20},
185 },
186 },
187 {
188 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}),
189 nodeInfo: framework.NewNodeInfo(
190 newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
191 name: "init container fits because it's the max, not sum, of containers and init containers",
192 wantInsufficientResources: []InsufficientResource{},
193 },
194 {
195 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}, framework.Resource{MilliCPU: 1, Memory: 1}),
196 nodeInfo: framework.NewNodeInfo(
197 newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
198 name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
199 wantInsufficientResources: []InsufficientResource{},
200 },
201 {
202 pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
203 nodeInfo: framework.NewNodeInfo(
204 newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
205 name: "both resources fit",
206 wantInsufficientResources: []InsufficientResource{},
207 },
208 {
209 pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}),
210 nodeInfo: framework.NewNodeInfo(
211 newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})),
212 name: "one resource memory fits",
213 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
214 wantInsufficientResources: []InsufficientResource{
215 {ResourceName: v1.ResourceCPU, Reason: getErrReason(v1.ResourceCPU), Requested: 2, Used: 9, Capacity: 10},
216 },
217 },
218 {
219 pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}),
220 nodeInfo: framework.NewNodeInfo(
221 newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
222 name: "one resource cpu fits",
223 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
224 wantInsufficientResources: []InsufficientResource{
225 {ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 2, Used: 19, Capacity: 20},
226 },
227 },
228 {
229 pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
230 nodeInfo: framework.NewNodeInfo(
231 newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
232 name: "equal edge case",
233 wantInsufficientResources: []InsufficientResource{},
234 },
235 {
236 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 4, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
237 nodeInfo: framework.NewNodeInfo(
238 newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
239 name: "equal edge case for init container",
240 wantInsufficientResources: []InsufficientResource{},
241 },
242 {
243 pod: newResourcePod(framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
244 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
245 name: "extended resource fits",
246 wantInsufficientResources: []InsufficientResource{},
247 },
248 {
249 pod: newResourceInitPod(newResourcePod(framework.Resource{}), framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
250 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
251 name: "extended resource fits for init container",
252 wantInsufficientResources: []InsufficientResource{},
253 },
254 {
255 pod: newResourcePod(
256 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
257 nodeInfo: framework.NewNodeInfo(
258 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
259 name: "extended resource capacity enforced",
260 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
261 wantInsufficientResources: []InsufficientResource{
262 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
263 },
264 },
265 {
266 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
267 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
268 nodeInfo: framework.NewNodeInfo(
269 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
270 name: "extended resource capacity enforced for init container",
271 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
272 wantInsufficientResources: []InsufficientResource{
273 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 10, Used: 0, Capacity: 5},
274 },
275 },
276 {
277 pod: newResourcePod(
278 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
279 nodeInfo: framework.NewNodeInfo(
280 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
281 name: "extended resource allocatable enforced",
282 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
283 wantInsufficientResources: []InsufficientResource{
284 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
285 },
286 },
287 {
288 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
289 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
290 nodeInfo: framework.NewNodeInfo(
291 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
292 name: "extended resource allocatable enforced for init container",
293 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
294 wantInsufficientResources: []InsufficientResource{
295 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 1, Used: 5, Capacity: 5},
296 },
297 },
298 {
299 pod: newResourcePod(
300 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
301 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
302 nodeInfo: framework.NewNodeInfo(
303 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
304 name: "extended resource allocatable enforced for multiple containers",
305 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
306 wantInsufficientResources: []InsufficientResource{
307 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
308 },
309 },
310 {
311 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
312 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
313 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
314 nodeInfo: framework.NewNodeInfo(
315 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
316 name: "extended resource allocatable admits multiple init containers",
317 wantInsufficientResources: []InsufficientResource{},
318 },
319 {
320 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
321 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
322 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
323 nodeInfo: framework.NewNodeInfo(
324 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
325 name: "extended resource allocatable enforced for multiple init containers",
326 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
327 wantInsufficientResources: []InsufficientResource{
328 {ResourceName: extendedResourceA, Reason: getErrReason(extendedResourceA), Requested: 6, Used: 2, Capacity: 5},
329 },
330 },
331 {
332 pod: newResourcePod(
333 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
334 nodeInfo: framework.NewNodeInfo(
335 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
336 name: "extended resource allocatable enforced for unknown resource",
337 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
338 wantInsufficientResources: []InsufficientResource{
339 {ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
340 },
341 },
342 {
343 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
344 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
345 nodeInfo: framework.NewNodeInfo(
346 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
347 name: "extended resource allocatable enforced for unknown resource for init container",
348 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
349 wantInsufficientResources: []InsufficientResource{
350 {ResourceName: extendedResourceB, Reason: getErrReason(extendedResourceB), Requested: 1, Used: 0, Capacity: 0},
351 },
352 },
353 {
354 pod: newResourcePod(
355 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
356 nodeInfo: framework.NewNodeInfo(
357 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
358 name: "kubernetes.io resource capacity enforced",
359 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
360 wantInsufficientResources: []InsufficientResource{
361 {ResourceName: kubernetesIOResourceA, Reason: getErrReason(kubernetesIOResourceA), Requested: 10, Used: 0, Capacity: 0},
362 },
363 },
364 {
365 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
366 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
367 nodeInfo: framework.NewNodeInfo(
368 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
369 name: "kubernetes.io resource capacity enforced for init container",
370 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
371 wantInsufficientResources: []InsufficientResource{
372 {ResourceName: kubernetesIOResourceB, Reason: getErrReason(kubernetesIOResourceB), Requested: 10, Used: 0, Capacity: 0},
373 },
374 },
375 {
376 pod: newResourcePod(
377 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
378 nodeInfo: framework.NewNodeInfo(
379 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
380 name: "hugepages resource capacity enforced",
381 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
382 wantInsufficientResources: []InsufficientResource{
383 {ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
384 },
385 },
386 {
387 pod: newResourceInitPod(newResourcePod(framework.Resource{}),
388 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
389 nodeInfo: framework.NewNodeInfo(
390 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
391 name: "hugepages resource capacity enforced for init container",
392 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
393 wantInsufficientResources: []InsufficientResource{
394 {ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 10, Used: 0, Capacity: 5},
395 },
396 },
397 {
398 pod: newResourcePod(
399 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
400 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
401 nodeInfo: framework.NewNodeInfo(
402 newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
403 name: "hugepages resource allocatable enforced for multiple containers",
404 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
405 wantInsufficientResources: []InsufficientResource{
406 {ResourceName: hugePageResourceA, Reason: getErrReason(hugePageResourceA), Requested: 6, Used: 2, Capacity: 5},
407 },
408 },
409 {
410 pod: newResourcePod(
411 framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
412 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
413 args: config.NodeResourcesFitArgs{
414 IgnoredResources: []string{"example.com/bbb"},
415 },
416 name: "skip checking ignored extended resource",
417 wantInsufficientResources: []InsufficientResource{},
418 },
419 {
420 pod: newResourceOverheadPod(
421 newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
422 v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
423 ),
424 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
425 name: "resources + pod overhead fits",
426 wantInsufficientResources: []InsufficientResource{},
427 },
428 {
429 pod: newResourceOverheadPod(
430 newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
431 v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
432 ),
433 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
434 name: "requests + overhead does not fit for memory",
435 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
436 wantInsufficientResources: []InsufficientResource{
437 {ResourceName: v1.ResourceMemory, Reason: getErrReason(v1.ResourceMemory), Requested: 16, Used: 5, Capacity: 20},
438 },
439 },
440 {
441 pod: newResourcePod(
442 framework.Resource{
443 MilliCPU: 1,
444 Memory: 1,
445 ScalarResources: map[v1.ResourceName]int64{
446 extendedResourceB: 1,
447 kubernetesIOResourceA: 1,
448 }}),
449 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
450 args: config.NodeResourcesFitArgs{
451 IgnoredResourceGroups: []string{"example.com"},
452 },
453 name: "skip checking ignored extended resource via resource groups",
454 wantStatus: framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", kubernetesIOResourceA)),
455 wantInsufficientResources: []InsufficientResource{
456 {
457 ResourceName: kubernetesIOResourceA,
458 Reason: fmt.Sprintf("Insufficient %v", kubernetesIOResourceA),
459 Requested: 1,
460 Used: 0,
461 Capacity: 0,
462 },
463 },
464 },
465 {
466 pod: newResourcePod(
467 framework.Resource{
468 MilliCPU: 1,
469 Memory: 1,
470 ScalarResources: map[v1.ResourceName]int64{
471 extendedResourceA: 0,
472 }}),
473 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{
474 MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}})),
475 name: "skip checking extended resource request with quantity zero via resource groups",
476 wantInsufficientResources: []InsufficientResource{},
477 },
478 {
479 pod: newResourcePod(
480 framework.Resource{
481 ScalarResources: map[v1.ResourceName]int64{
482 extendedResourceA: 1,
483 }}),
484 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{
485 MilliCPU: 20, Memory: 30, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}})),
486 name: "skip checking resource request with quantity zero",
487 wantInsufficientResources: []InsufficientResource{},
488 },
489 }
490
491 for _, test := range enoughPodsTests {
492 t.Run(test.name, func(t *testing.T) {
493 node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5), Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
494 test.nodeInfo.SetNode(&node)
495
496 if test.args.ScoringStrategy == nil {
497 test.args.ScoringStrategy = defaultScoringStrategy
498 }
499
500 _, ctx := ktesting.NewTestContext(t)
501 ctx, cancel := context.WithCancel(ctx)
502 defer cancel()
503 p, err := NewFit(ctx, &test.args, nil, plfeature.Features{})
504 if err != nil {
505 t.Fatal(err)
506 }
507 cycleState := framework.NewCycleState()
508 _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
509 if !preFilterStatus.IsSuccess() {
510 t.Errorf("prefilter failed with status: %v", preFilterStatus)
511 }
512
513 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
514 if !reflect.DeepEqual(gotStatus, test.wantStatus) {
515 t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
516 }
517
518 gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
519 if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
520 t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
521 }
522 })
523 }
524 }
525
526 func TestPreFilterDisabled(t *testing.T) {
527 _, ctx := ktesting.NewTestContext(t)
528 ctx, cancel := context.WithCancel(ctx)
529 defer cancel()
530 pod := &v1.Pod{}
531 nodeInfo := framework.NewNodeInfo()
532 node := v1.Node{}
533 nodeInfo.SetNode(&node)
534 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
535 if err != nil {
536 t.Fatal(err)
537 }
538 cycleState := framework.NewCycleState()
539 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
540 wantStatus := framework.AsStatus(fmt.Errorf(`error reading "PreFilterNodeResourcesFit" from cycleState: %w`, framework.ErrNotFound))
541 if !reflect.DeepEqual(gotStatus, wantStatus) {
542 t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
543 }
544 }
545
546 func TestNotEnoughRequests(t *testing.T) {
547 notEnoughPodsTests := []struct {
548 pod *v1.Pod
549 nodeInfo *framework.NodeInfo
550 fits bool
551 name string
552 wantStatus *framework.Status
553 }{
554 {
555 pod: &v1.Pod{},
556 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
557 name: "even without specified resources, predicate fails when there's no space for additional pod",
558 wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
559 },
560 {
561 pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
562 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
563 name: "even if both resources fit, predicate fails when there's no space for additional pod",
564 wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
565 },
566 {
567 pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
568 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
569 name: "even for equal edge case, predicate fails when there's no space for additional pod",
570 wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
571 },
572 {
573 pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
574 nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
575 name: "even for equal edge case, predicate fails when there's no space for additional pod due to init container",
576 wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
577 },
578 }
579 for _, test := range notEnoughPodsTests {
580 t.Run(test.name, func(t *testing.T) {
581 _, ctx := ktesting.NewTestContext(t)
582 ctx, cancel := context.WithCancel(ctx)
583 defer cancel()
584 node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
585 test.nodeInfo.SetNode(&node)
586
587 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
588 if err != nil {
589 t.Fatal(err)
590 }
591 cycleState := framework.NewCycleState()
592 _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
593 if !preFilterStatus.IsSuccess() {
594 t.Errorf("prefilter failed with status: %v", preFilterStatus)
595 }
596
597 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
598 if !reflect.DeepEqual(gotStatus, test.wantStatus) {
599 t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
600 }
601 })
602 }
603
604 }
605
606 func TestStorageRequests(t *testing.T) {
607 storagePodsTests := []struct {
608 pod *v1.Pod
609 nodeInfo *framework.NodeInfo
610 name string
611 wantStatus *framework.Status
612 }{
613 {
614 pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
615 nodeInfo: framework.NewNodeInfo(
616 newResourcePod(framework.Resource{MilliCPU: 2, Memory: 10})),
617 name: "empty storage requested, and pod fits",
618 },
619 {
620 pod: newResourcePod(framework.Resource{EphemeralStorage: 25}),
621 nodeInfo: framework.NewNodeInfo(
622 newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
623 name: "storage ephemeral local storage request exceeds allocatable",
624 wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
625 },
626 {
627 pod: newResourceInitPod(newResourcePod(framework.Resource{EphemeralStorage: 5})),
628 nodeInfo: framework.NewNodeInfo(
629 newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2, EphemeralStorage: 10})),
630 name: "ephemeral local storage is sufficient",
631 },
632 {
633 pod: newResourcePod(framework.Resource{EphemeralStorage: 10}),
634 nodeInfo: framework.NewNodeInfo(
635 newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
636 name: "pod fits",
637 },
638 }
639
640 for _, test := range storagePodsTests {
641 t.Run(test.name, func(t *testing.T) {
642 _, ctx := ktesting.NewTestContext(t)
643 ctx, cancel := context.WithCancel(ctx)
644 defer cancel()
645 node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5), Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
646 test.nodeInfo.SetNode(&node)
647
648 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
649 if err != nil {
650 t.Fatal(err)
651 }
652 cycleState := framework.NewCycleState()
653 _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
654 if !preFilterStatus.IsSuccess() {
655 t.Errorf("prefilter failed with status: %v", preFilterStatus)
656 }
657
658 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
659 if !reflect.DeepEqual(gotStatus, test.wantStatus) {
660 t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
661 }
662 })
663 }
664
665 }
666
667 func TestRestartableInitContainers(t *testing.T) {
668 newPod := func() *v1.Pod {
669 return &v1.Pod{
670 Spec: v1.PodSpec{
671 Containers: []v1.Container{
672 {Name: "regular"},
673 },
674 },
675 }
676 }
677 newPodWithRestartableInitContainers := func() *v1.Pod {
678 restartPolicyAlways := v1.ContainerRestartPolicyAlways
679 return &v1.Pod{
680 Spec: v1.PodSpec{
681 Containers: []v1.Container{
682 {Name: "regular"},
683 },
684 InitContainers: []v1.Container{
685 {
686 Name: "restartable-init",
687 RestartPolicy: &restartPolicyAlways,
688 },
689 },
690 },
691 }
692 }
693
694 testCases := []struct {
695 name string
696 pod *v1.Pod
697 enableSidecarContainers bool
698 wantPreFilterStatus *framework.Status
699 }{
700 {
701 name: "allow pod without restartable init containers if sidecar containers is disabled",
702 pod: newPod(),
703 },
704 {
705 name: "not allow pod with restartable init containers if sidecar containers is disabled",
706 pod: newPodWithRestartableInitContainers(),
707 wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
708 },
709 {
710 name: "allow pod without restartable init containers if sidecar containers is enabled",
711 enableSidecarContainers: true,
712 pod: newPod(),
713 },
714 {
715 name: "allow pod with restartable init containers if sidecar containers is enabled",
716 enableSidecarContainers: true,
717 pod: newPodWithRestartableInitContainers(),
718 },
719 }
720
721 for _, test := range testCases {
722 t.Run(test.name, func(t *testing.T) {
723 _, ctx := ktesting.NewTestContext(t)
724 ctx, cancel := context.WithCancel(ctx)
725 defer cancel()
726 node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(0, 0, 1, 0, 0, 0)}}
727 nodeInfo := framework.NewNodeInfo()
728 nodeInfo.SetNode(&node)
729
730 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{EnableSidecarContainers: test.enableSidecarContainers})
731 if err != nil {
732 t.Fatal(err)
733 }
734 cycleState := framework.NewCycleState()
735 _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
736 if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
737 t.Error("status does not match (-expected +actual):\n", diff)
738 }
739 if !preFilterStatus.IsSuccess() {
740 return
741 }
742
743 filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
744 if !filterStatus.IsSuccess() {
745 t.Error("status does not match (-expected +actual):\n- Success\n +\n", filterStatus.Code())
746 }
747 })
748 }
749
750 }
751
752 func TestFitScore(t *testing.T) {
753 tests := []struct {
754 name string
755 requestedPod *v1.Pod
756 nodes []*v1.Node
757 existingPods []*v1.Pod
758 expectedPriorities framework.NodeScoreList
759 nodeResourcesFitArgs config.NodeResourcesFitArgs
760 runPreScore bool
761 }{
762 {
763 name: "test case for ScoringStrategy RequestedToCapacityRatio case1",
764 requestedPod: st.MakePod().
765 Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
766 Obj(),
767 nodes: []*v1.Node{
768 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
769 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
770 },
771 existingPods: []*v1.Pod{
772 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
773 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
774 },
775 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
776 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
777 ScoringStrategy: &config.ScoringStrategy{
778 Type: config.RequestedToCapacityRatio,
779 Resources: defaultResources,
780 RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
781 Shape: []config.UtilizationShapePoint{
782 {Utilization: 0, Score: 10},
783 {Utilization: 100, Score: 0},
784 },
785 },
786 },
787 },
788 runPreScore: true,
789 },
790 {
791 name: "test case for ScoringStrategy RequestedToCapacityRatio case2",
792 requestedPod: st.MakePod().
793 Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
794 Obj(),
795 nodes: []*v1.Node{
796 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
797 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
798 },
799 existingPods: []*v1.Pod{
800 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
801 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
802 },
803 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 68}},
804 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
805 ScoringStrategy: &config.ScoringStrategy{
806 Type: config.RequestedToCapacityRatio,
807 Resources: defaultResources,
808 RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
809 Shape: []config.UtilizationShapePoint{
810 {Utilization: 0, Score: 0},
811 {Utilization: 100, Score: 10},
812 },
813 },
814 },
815 },
816 runPreScore: true,
817 },
818 {
819 name: "test case for ScoringStrategy MostAllocated",
820 requestedPod: st.MakePod().
821 Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
822 Obj(),
823 nodes: []*v1.Node{
824 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
825 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
826 },
827 existingPods: []*v1.Pod{
828 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
829 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
830 },
831 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
832 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
833 ScoringStrategy: &config.ScoringStrategy{
834 Type: config.MostAllocated,
835 Resources: defaultResources,
836 },
837 },
838 runPreScore: true,
839 },
840 {
841 name: "test case for ScoringStrategy LeastAllocated",
842 requestedPod: st.MakePod().
843 Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
844 Obj(),
845 nodes: []*v1.Node{
846 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
847 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
848 },
849 existingPods: []*v1.Pod{
850 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
851 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
852 },
853 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
854 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
855 ScoringStrategy: &config.ScoringStrategy{
856 Type: config.LeastAllocated,
857 Resources: defaultResources,
858 },
859 },
860 runPreScore: true,
861 },
862 {
863 name: "test case for ScoringStrategy RequestedToCapacityRatio case1 if PreScore is not called",
864 requestedPod: st.MakePod().
865 Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
866 Obj(),
867 nodes: []*v1.Node{
868 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
869 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
870 },
871 existingPods: []*v1.Pod{
872 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
873 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
874 },
875 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
876 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
877 ScoringStrategy: &config.ScoringStrategy{
878 Type: config.RequestedToCapacityRatio,
879 Resources: defaultResources,
880 RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
881 Shape: []config.UtilizationShapePoint{
882 {Utilization: 0, Score: 10},
883 {Utilization: 100, Score: 0},
884 },
885 },
886 },
887 },
888 runPreScore: false,
889 },
890 {
891 name: "test case for ScoringStrategy MostAllocated if PreScore is not called",
892 requestedPod: st.MakePod().
893 Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
894 Obj(),
895 nodes: []*v1.Node{
896 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
897 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
898 },
899 existingPods: []*v1.Pod{
900 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
901 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
902 },
903 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
904 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
905 ScoringStrategy: &config.ScoringStrategy{
906 Type: config.MostAllocated,
907 Resources: defaultResources,
908 },
909 },
910 runPreScore: false,
911 },
912 {
913 name: "test case for ScoringStrategy LeastAllocated if PreScore is not called",
914 requestedPod: st.MakePod().
915 Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
916 Obj(),
917 nodes: []*v1.Node{
918 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
919 st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
920 },
921 existingPods: []*v1.Pod{
922 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
923 st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
924 },
925 expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
926 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
927 ScoringStrategy: &config.ScoringStrategy{
928 Type: config.LeastAllocated,
929 Resources: defaultResources,
930 },
931 },
932 runPreScore: false,
933 },
934 }
935
936 for _, test := range tests {
937 t.Run(test.name, func(t *testing.T) {
938 _, ctx := ktesting.NewTestContext(t)
939 ctx, cancel := context.WithCancel(ctx)
940 defer cancel()
941
942 state := framework.NewCycleState()
943 snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
944 fh, _ := runtime.NewFramework(ctx, nil, nil, runtime.WithSnapshotSharedLister(snapshot))
945 args := test.nodeResourcesFitArgs
946 p, err := NewFit(ctx, &args, fh, plfeature.Features{})
947 if err != nil {
948 t.Fatalf("unexpected error: %v", err)
949 }
950
951 var gotPriorities framework.NodeScoreList
952 for _, n := range test.nodes {
953 if test.runPreScore {
954 status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, tf.BuildNodeInfos(test.nodes))
955 if !status.IsSuccess() {
956 t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
957 }
958 }
959 score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, n.Name)
960 if !status.IsSuccess() {
961 t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
962 }
963 gotPriorities = append(gotPriorities, framework.NodeScore{Name: n.Name, Score: score})
964 }
965
966 if !reflect.DeepEqual(test.expectedPriorities, gotPriorities) {
967 t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedPriorities, gotPriorities)
968 }
969 })
970 }
971 }
972
973 var benchmarkResourceSet = []config.ResourceSpec{
974 {Name: string(v1.ResourceCPU), Weight: 1},
975 {Name: string(v1.ResourceMemory), Weight: 1},
976 {Name: string(v1.ResourcePods), Weight: 1},
977 {Name: string(v1.ResourceStorage), Weight: 1},
978 {Name: string(v1.ResourceEphemeralStorage), Weight: 1},
979 {Name: string(extendedResourceA), Weight: 1},
980 {Name: string(extendedResourceB), Weight: 1},
981 {Name: string(kubernetesIOResourceA), Weight: 1},
982 {Name: string(kubernetesIOResourceB), Weight: 1},
983 {Name: string(hugePageResourceA), Weight: 1},
984 }
985
986 func BenchmarkTestFitScore(b *testing.B) {
987 tests := []struct {
988 name string
989 nodeResourcesFitArgs config.NodeResourcesFitArgs
990 }{
991 {
992 name: "RequestedToCapacityRatio with defaultResources",
993 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
994 ScoringStrategy: &config.ScoringStrategy{
995 Type: config.RequestedToCapacityRatio,
996 Resources: defaultResources,
997 RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
998 Shape: []config.UtilizationShapePoint{
999 {Utilization: 0, Score: 10},
1000 {Utilization: 100, Score: 0},
1001 },
1002 },
1003 },
1004 },
1005 },
1006 {
1007 name: "RequestedToCapacityRatio with 10 resources",
1008 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
1009 ScoringStrategy: &config.ScoringStrategy{
1010 Type: config.RequestedToCapacityRatio,
1011 Resources: benchmarkResourceSet,
1012 RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
1013 Shape: []config.UtilizationShapePoint{
1014 {Utilization: 0, Score: 10},
1015 {Utilization: 100, Score: 0},
1016 },
1017 },
1018 },
1019 },
1020 },
1021 {
1022 name: "MostAllocated with defaultResources",
1023 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
1024 ScoringStrategy: &config.ScoringStrategy{
1025 Type: config.MostAllocated,
1026 Resources: defaultResources,
1027 },
1028 },
1029 },
1030 {
1031 name: "MostAllocated with 10 resources",
1032 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
1033 ScoringStrategy: &config.ScoringStrategy{
1034 Type: config.MostAllocated,
1035 Resources: benchmarkResourceSet,
1036 },
1037 },
1038 },
1039 {
1040 name: "LeastAllocated with defaultResources",
1041 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
1042 ScoringStrategy: &config.ScoringStrategy{
1043 Type: config.LeastAllocated,
1044 Resources: defaultResources,
1045 },
1046 },
1047 },
1048 {
1049 name: "LeastAllocated with 10 resources",
1050 nodeResourcesFitArgs: config.NodeResourcesFitArgs{
1051 ScoringStrategy: &config.ScoringStrategy{
1052 Type: config.LeastAllocated,
1053 Resources: benchmarkResourceSet,
1054 },
1055 },
1056 },
1057 }
1058
1059 for _, test := range tests {
1060 b.Run(test.name, func(b *testing.B) {
1061 _, ctx := ktesting.NewTestContext(b)
1062 ctx, cancel := context.WithCancel(ctx)
1063 defer cancel()
1064 existingPods := []*v1.Pod{
1065 st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
1066 }
1067 nodes := []*v1.Node{
1068 st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
1069 }
1070 state := framework.NewCycleState()
1071 var nodeResourcesFunc = runtime.FactoryAdapter(plfeature.Features{}, NewFit)
1072 pl := plugintesting.SetupPlugin(ctx, b, nodeResourcesFunc, &test.nodeResourcesFitArgs, cache.NewSnapshot(existingPods, nodes))
1073 p := pl.(*Fit)
1074
1075 b.ResetTimer()
1076
1077 requestedPod := st.MakePod().Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj()
1078 for i := 0; i < b.N; i++ {
1079 _, status := p.Score(ctx, state, requestedPod, nodes[0].Name)
1080 if !status.IsSuccess() {
1081 b.Errorf("unexpected status: %v", status)
1082 }
1083 }
1084 })
1085 }
1086 }
1087
1088 func TestEventsToRegister(t *testing.T) {
1089 tests := []struct {
1090 name string
1091 inPlacePodVerticalScalingEnabled bool
1092 expectedClusterEvents []framework.ClusterEventWithHint
1093 }{
1094 {
1095 "Register events with InPlacePodVerticalScaling feature enabled",
1096 true,
1097 []framework.ClusterEventWithHint{
1098 {Event: framework.ClusterEvent{Resource: "Pod", ActionType: framework.Update | framework.Delete}},
1099 {Event: framework.ClusterEvent{Resource: "Node", ActionType: framework.Add | framework.Update}},
1100 },
1101 },
1102 {
1103 "Register events with InPlacePodVerticalScaling feature disabled",
1104 false,
1105 []framework.ClusterEventWithHint{
1106 {Event: framework.ClusterEvent{Resource: "Pod", ActionType: framework.Delete}},
1107 {Event: framework.ClusterEvent{Resource: "Node", ActionType: framework.Add | framework.Update}},
1108 },
1109 },
1110 }
1111
1112 for _, test := range tests {
1113 t.Run(test.name, func(t *testing.T) {
1114 fp := &Fit{enableInPlacePodVerticalScaling: test.inPlacePodVerticalScalingEnabled}
1115 actualClusterEvents := fp.EventsToRegister()
1116 for i := range actualClusterEvents {
1117 actualClusterEvents[i].QueueingHintFn = nil
1118 }
1119 if diff := cmp.Diff(test.expectedClusterEvents, actualClusterEvents); diff != "" {
1120 t.Error("Cluster Events doesn't match extected events (-expected +actual):\n", diff)
1121 }
1122 })
1123 }
1124 }
1125
1126 func Test_isSchedulableAfterPodChange(t *testing.T) {
1127 testcases := map[string]struct {
1128 pod *v1.Pod
1129 oldObj, newObj interface{}
1130 enableInPlacePodVerticalScaling bool
1131 expectedHint framework.QueueingHint
1132 expectedErr bool
1133 }{
1134 "backoff-wrong-old-object": {
1135 pod: &v1.Pod{},
1136 oldObj: "not-a-pod",
1137 enableInPlacePodVerticalScaling: true,
1138 expectedHint: framework.Queue,
1139 expectedErr: true,
1140 },
1141 "backoff-wrong-new-object": {
1142 pod: &v1.Pod{},
1143 newObj: "not-a-pod",
1144 enableInPlacePodVerticalScaling: true,
1145 expectedHint: framework.Queue,
1146 expectedErr: true,
1147 },
1148 "queue-on-deleted": {
1149 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1150 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1151 enableInPlacePodVerticalScaling: true,
1152 expectedHint: framework.Queue,
1153 },
1154 "skip-queue-on-unscheduled-pod-deleted": {
1155 pod: &v1.Pod{},
1156 oldObj: &v1.Pod{},
1157 enableInPlacePodVerticalScaling: true,
1158 expectedHint: framework.QueueSkip,
1159 },
1160 "skip-queue-on-disable-inplace-pod-vertical-scaling": {
1161 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1162 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1163 newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1164 enableInPlacePodVerticalScaling: false,
1165 expectedHint: framework.QueueSkip,
1166 },
1167 "skip-queue-on-unscheduled-pod": {
1168 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1169 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
1170 newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1171 enableInPlacePodVerticalScaling: true,
1172 expectedHint: framework.QueueSkip,
1173 },
1174 "skip-queue-on-non-resource-changes": {
1175 pod: &v1.Pod{},
1176 oldObj: st.MakePod().Label("k", "v").Node("fake").Obj(),
1177 newObj: st.MakePod().Label("foo", "bar").Node("fake").Obj(),
1178 enableInPlacePodVerticalScaling: true,
1179 expectedHint: framework.QueueSkip,
1180 },
1181 "skip-queue-on-unrelated-resource-changes": {
1182 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1183 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
1184 newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
1185 enableInPlacePodVerticalScaling: true,
1186 expectedHint: framework.QueueSkip,
1187 },
1188 "skip-queue-on-resource-scale-up": {
1189 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1190 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1191 newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1192 enableInPlacePodVerticalScaling: true,
1193 expectedHint: framework.QueueSkip,
1194 },
1195 "queue-on-some-resource-scale-down": {
1196 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1197 oldObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1198 newObj: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1199 enableInPlacePodVerticalScaling: true,
1200 expectedHint: framework.Queue,
1201 },
1202 }
1203
1204 for name, tc := range testcases {
1205 t.Run(name, func(t *testing.T) {
1206 logger, ctx := ktesting.NewTestContext(t)
1207 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{
1208 EnableInPlacePodVerticalScaling: tc.enableInPlacePodVerticalScaling,
1209 })
1210 if err != nil {
1211 t.Fatal(err)
1212 }
1213 actualHint, err := p.(*Fit).isSchedulableAfterPodChange(logger, tc.pod, tc.oldObj, tc.newObj)
1214 if tc.expectedErr {
1215 require.Error(t, err)
1216 return
1217 }
1218 require.NoError(t, err)
1219 require.Equal(t, tc.expectedHint, actualHint)
1220 })
1221 }
1222 }
1223
1224 func Test_isSchedulableAfterNodeChange(t *testing.T) {
1225 testcases := map[string]struct {
1226 pod *v1.Pod
1227 oldObj, newObj interface{}
1228 expectedHint framework.QueueingHint
1229 expectedErr bool
1230 }{
1231 "backoff-wrong-new-object": {
1232 pod: &v1.Pod{},
1233 newObj: "not-a-node",
1234 expectedHint: framework.Queue,
1235 expectedErr: true,
1236 },
1237 "backoff-wrong-old-object": {
1238 pod: &v1.Pod{},
1239 oldObj: "not-a-node",
1240 newObj: &v1.Node{},
1241 expectedHint: framework.Queue,
1242 expectedErr: true,
1243 },
1244 "skip-queue-on-node-add-without-sufficient-resources": {
1245 pod: newResourcePod(framework.Resource{Memory: 2}),
1246 newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1247 v1.ResourceMemory: "1",
1248 }).Obj(),
1249 expectedHint: framework.QueueSkip,
1250 },
1251 "skip-queue-on-node-add-without-required-resource-type": {
1252 pod: newResourcePod(framework.Resource{
1253 ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}},
1254 ),
1255 newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1256 extendedResourceB: "1",
1257 }).Obj(),
1258 expectedHint: framework.QueueSkip,
1259 },
1260 "queue-on-node-add-with-sufficient-resources": {
1261 pod: newResourcePod(framework.Resource{
1262 Memory: 2,
1263 ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
1264 }),
1265 newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1266 v1.ResourceMemory: "4",
1267 extendedResourceA: "2",
1268 }).Obj(),
1269 expectedHint: framework.Queue,
1270 },
1271
1272
1273
1274
1275
1276
1277
1278
1279 "skip-queue-on-node-changes-from-suitable-to-unsuitable": {
1280 pod: newResourcePod(framework.Resource{
1281 Memory: 2,
1282 ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
1283 }),
1284 oldObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1285 v1.ResourceMemory: "4",
1286 extendedResourceA: "2",
1287 }).Obj(),
1288 newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1289 v1.ResourceMemory: "1",
1290 extendedResourceA: "2",
1291 }).Obj(),
1292 expectedHint: framework.QueueSkip,
1293 },
1294 "queue-on-node-changes-from-unsuitable-to-suitable": {
1295 pod: newResourcePod(framework.Resource{
1296 Memory: 2,
1297 ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1},
1298 }),
1299 oldObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1300 v1.ResourceMemory: "1",
1301 extendedResourceA: "2",
1302 }).Obj(),
1303 newObj: st.MakeNode().Capacity(map[v1.ResourceName]string{
1304 v1.ResourceMemory: "4",
1305 extendedResourceA: "2",
1306 }).Obj(),
1307 expectedHint: framework.Queue,
1308 },
1309 }
1310
1311 for name, tc := range testcases {
1312 t.Run(name, func(t *testing.T) {
1313 logger, ctx := ktesting.NewTestContext(t)
1314 p, err := NewFit(ctx, &config.NodeResourcesFitArgs{ScoringStrategy: defaultScoringStrategy}, nil, plfeature.Features{})
1315 if err != nil {
1316 t.Fatal(err)
1317 }
1318 actualHint, err := p.(*Fit).isSchedulableAfterNodeChange(logger, tc.pod, tc.oldObj, tc.newObj)
1319 if tc.expectedErr {
1320 require.Error(t, err)
1321 return
1322 }
1323 require.NoError(t, err)
1324 require.Equal(t, tc.expectedHint, actualHint)
1325 })
1326 }
1327 }
1328
1329 func TestIsFit(t *testing.T) {
1330 testCases := map[string]struct {
1331 pod *v1.Pod
1332 node *v1.Node
1333 expected bool
1334 }{
1335 "nil node": {
1336 pod: &v1.Pod{},
1337 expected: false,
1338 },
1339 "insufficient resource": {
1340 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
1341 node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1342 expected: false,
1343 },
1344 "sufficient resource": {
1345 pod: st.MakePod().Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1346 node: st.MakeNode().Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
1347 expected: true,
1348 },
1349 }
1350
1351 for name, tc := range testCases {
1352 t.Run(name, func(t *testing.T) {
1353 if got := isFit(tc.pod, tc.node); got != tc.expected {
1354 t.Errorf("expected: %v, got: %v", tc.expected, got)
1355 }
1356 })
1357 }
1358 }
1359
View as plain text