1
16
17 package cpumanager
18
19 import (
20 "context"
21 "fmt"
22 "os"
23 "reflect"
24 "strconv"
25 "strings"
26 "testing"
27 "time"
28
29 cadvisorapi "github.com/google/cadvisor/info/v1"
30 v1 "k8s.io/api/core/v1"
31 "k8s.io/apimachinery/pkg/api/resource"
32 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33 "k8s.io/apimachinery/pkg/types"
34 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
35
36 "k8s.io/kubernetes/pkg/kubelet/cm/containermap"
37 "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/state"
38 "k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
39 "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
40 "k8s.io/utils/cpuset"
41 )
42
43 type mockState struct {
44 assignments state.ContainerCPUAssignments
45 defaultCPUSet cpuset.CPUSet
46 }
47
48 func (s *mockState) GetCPUSet(podUID string, containerName string) (cpuset.CPUSet, bool) {
49 res, ok := s.assignments[podUID][containerName]
50 return res.Clone(), ok
51 }
52
53 func (s *mockState) GetDefaultCPUSet() cpuset.CPUSet {
54 return s.defaultCPUSet.Clone()
55 }
56
57 func (s *mockState) GetCPUSetOrDefault(podUID string, containerName string) cpuset.CPUSet {
58 if res, ok := s.GetCPUSet(podUID, containerName); ok {
59 return res
60 }
61 return s.GetDefaultCPUSet()
62 }
63
64 func (s *mockState) SetCPUSet(podUID string, containerName string, cset cpuset.CPUSet) {
65 if _, exists := s.assignments[podUID]; !exists {
66 s.assignments[podUID] = make(map[string]cpuset.CPUSet)
67 }
68 s.assignments[podUID][containerName] = cset
69 }
70
71 func (s *mockState) SetDefaultCPUSet(cset cpuset.CPUSet) {
72 s.defaultCPUSet = cset
73 }
74
75 func (s *mockState) Delete(podUID string, containerName string) {
76 delete(s.assignments[podUID], containerName)
77 if len(s.assignments[podUID]) == 0 {
78 delete(s.assignments, podUID)
79 }
80 }
81
82 func (s *mockState) ClearState() {
83 s.defaultCPUSet = cpuset.CPUSet{}
84 s.assignments = make(state.ContainerCPUAssignments)
85 }
86
87 func (s *mockState) SetCPUAssignments(a state.ContainerCPUAssignments) {
88 s.assignments = a.Clone()
89 }
90
91 func (s *mockState) GetCPUAssignments() state.ContainerCPUAssignments {
92 return s.assignments.Clone()
93 }
94
95 type mockPolicy struct {
96 err error
97 }
98
99 func (p *mockPolicy) Name() string {
100 return "mock"
101 }
102
103 func (p *mockPolicy) Start(s state.State) error {
104 return p.err
105 }
106
107 func (p *mockPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
108 return p.err
109 }
110
111 func (p *mockPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
112 return p.err
113 }
114
115 func (p *mockPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
116 return nil
117 }
118
119 func (p *mockPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[string][]topologymanager.TopologyHint {
120 return nil
121 }
122
123 func (p *mockPolicy) GetAllocatableCPUs(m state.State) cpuset.CPUSet {
124 return cpuset.New()
125 }
126
127 type mockRuntimeService struct {
128 err error
129 }
130
131 func (rt mockRuntimeService) UpdateContainerResources(_ context.Context, id string, resources *runtimeapi.ContainerResources) error {
132 return rt.err
133 }
134
135 type mockPodStatusProvider struct {
136 podStatus v1.PodStatus
137 found bool
138 }
139
140 func (psp mockPodStatusProvider) GetPodStatus(uid types.UID) (v1.PodStatus, bool) {
141 return psp.podStatus, psp.found
142 }
143
144 func makePod(podUID, containerName, cpuRequest, cpuLimit string) *v1.Pod {
145 pod := &v1.Pod{
146 Spec: v1.PodSpec{
147 Containers: []v1.Container{
148 {
149 Resources: v1.ResourceRequirements{
150 Requests: v1.ResourceList{
151 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpuRequest),
152 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
153 },
154 Limits: v1.ResourceList{
155 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpuLimit),
156 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
157 },
158 },
159 },
160 },
161 },
162 }
163
164 pod.UID = types.UID(podUID)
165 pod.Spec.Containers[0].Name = containerName
166
167 return pod
168 }
169
170 func makeMultiContainerPod(initCPUs, appCPUs []struct{ request, limit string }) *v1.Pod {
171 pod := &v1.Pod{
172 ObjectMeta: metav1.ObjectMeta{
173 Name: "pod",
174 UID: "podUID",
175 },
176 Spec: v1.PodSpec{
177 InitContainers: []v1.Container{},
178 Containers: []v1.Container{},
179 },
180 }
181
182 for i, cpu := range initCPUs {
183 pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
184 Name: "initContainer-" + strconv.Itoa(i),
185 Resources: v1.ResourceRequirements{
186 Requests: v1.ResourceList{
187 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request),
188 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
189 },
190 Limits: v1.ResourceList{
191 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit),
192 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
193 },
194 },
195 })
196 }
197
198 for i, cpu := range appCPUs {
199 pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
200 Name: "appContainer-" + strconv.Itoa(i),
201 Resources: v1.ResourceRequirements{
202 Requests: v1.ResourceList{
203 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request),
204 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
205 },
206 Limits: v1.ResourceList{
207 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit),
208 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
209 },
210 },
211 })
212 }
213
214 return pod
215 }
216
217 func makeMultiContainerPodWithOptions(initCPUs, appCPUs []*containerOptions) *v1.Pod {
218 pod := &v1.Pod{
219 ObjectMeta: metav1.ObjectMeta{
220 Name: "pod",
221 UID: "podUID",
222 },
223 Spec: v1.PodSpec{
224 InitContainers: []v1.Container{},
225 Containers: []v1.Container{},
226 },
227 }
228
229 for i, cpu := range initCPUs {
230 pod.Spec.InitContainers = append(pod.Spec.InitContainers, v1.Container{
231 Name: "initContainer-" + strconv.Itoa(i),
232 Resources: v1.ResourceRequirements{
233 Requests: v1.ResourceList{
234 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request),
235 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
236 },
237 Limits: v1.ResourceList{
238 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit),
239 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
240 },
241 },
242 RestartPolicy: &cpu.restartPolicy,
243 })
244 }
245
246 for i, cpu := range appCPUs {
247 pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
248 Name: "appContainer-" + strconv.Itoa(i),
249 Resources: v1.ResourceRequirements{
250 Requests: v1.ResourceList{
251 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.request),
252 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
253 },
254 Limits: v1.ResourceList{
255 v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpu.limit),
256 v1.ResourceName(v1.ResourceMemory): resource.MustParse("1G"),
257 },
258 },
259 })
260 }
261
262 return pod
263 }
264
265 func TestCPUManagerAdd(t *testing.T) {
266 testPolicy, _ := NewStaticPolicy(
267 &topology.CPUTopology{
268 NumCPUs: 4,
269 NumSockets: 1,
270 NumCores: 4,
271 CPUDetails: map[int]topology.CPUInfo{
272 0: {CoreID: 0, SocketID: 0},
273 1: {CoreID: 1, SocketID: 0},
274 2: {CoreID: 2, SocketID: 0},
275 3: {CoreID: 3, SocketID: 0},
276 },
277 },
278 0,
279 cpuset.New(),
280 topologymanager.NewFakeManager(),
281 nil)
282 testCases := []struct {
283 description string
284 updateErr error
285 policy Policy
286 expCPUSet cpuset.CPUSet
287 expAllocateErr error
288 expAddContainerErr error
289 }{
290 {
291 description: "cpu manager add - no error",
292 updateErr: nil,
293 policy: testPolicy,
294 expCPUSet: cpuset.New(3, 4),
295 expAllocateErr: nil,
296 expAddContainerErr: nil,
297 },
298 {
299 description: "cpu manager add - policy add container error",
300 updateErr: nil,
301 policy: &mockPolicy{
302 err: fmt.Errorf("fake reg error"),
303 },
304 expCPUSet: cpuset.New(1, 2, 3, 4),
305 expAllocateErr: fmt.Errorf("fake reg error"),
306 expAddContainerErr: nil,
307 },
308 }
309
310 for _, testCase := range testCases {
311 mgr := &manager{
312 policy: testCase.policy,
313 state: &mockState{
314 assignments: state.ContainerCPUAssignments{},
315 defaultCPUSet: cpuset.New(1, 2, 3, 4),
316 },
317 lastUpdateState: state.NewMemoryState(),
318 containerRuntime: mockRuntimeService{
319 err: testCase.updateErr,
320 },
321 containerMap: containermap.NewContainerMap(),
322 podStatusProvider: mockPodStatusProvider{},
323 sourcesReady: &sourcesReadyStub{},
324 }
325
326 pod := makePod("fakePod", "fakeContainer", "2", "2")
327 container := &pod.Spec.Containers[0]
328 mgr.activePods = func() []*v1.Pod { return nil }
329
330 err := mgr.Allocate(pod, container)
331 if !reflect.DeepEqual(err, testCase.expAllocateErr) {
332 t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v",
333 testCase.description, testCase.expAllocateErr, err)
334 }
335
336 mgr.AddContainer(pod, container, "fakeID")
337 _, _, err = mgr.containerMap.GetContainerRef("fakeID")
338 if !reflect.DeepEqual(err, testCase.expAddContainerErr) {
339 t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v",
340 testCase.description, testCase.expAddContainerErr, err)
341 }
342 if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) {
343 t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v",
344 testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet())
345 }
346 }
347 }
348
349 func TestCPUManagerAddWithInitContainers(t *testing.T) {
350 testCases := []struct {
351 description string
352 topo *topology.CPUTopology
353 numReservedCPUs int
354 initContainerIDs []string
355 containerIDs []string
356 stAssignments state.ContainerCPUAssignments
357 stDefaultCPUSet cpuset.CPUSet
358 pod *v1.Pod
359 expInitCSets []cpuset.CPUSet
360 expCSets []cpuset.CPUSet
361 }{
362 {
363 description: "No Guaranteed Init CPUs",
364 topo: topoSingleSocketHT,
365 numReservedCPUs: 0,
366 stAssignments: state.ContainerCPUAssignments{},
367 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
368 initContainerIDs: []string{"initFakeID"},
369 containerIDs: []string{"appFakeID"},
370 pod: makeMultiContainerPod(
371 []struct{ request, limit string }{{"100m", "100m"}},
372 []struct{ request, limit string }{{"4000m", "4000m"}}),
373 expInitCSets: []cpuset.CPUSet{
374 cpuset.New()},
375 expCSets: []cpuset.CPUSet{
376 cpuset.New(0, 4, 1, 5)},
377 },
378 {
379 description: "Equal Number of Guaranteed CPUs",
380 topo: topoSingleSocketHT,
381 numReservedCPUs: 0,
382 stAssignments: state.ContainerCPUAssignments{},
383 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
384 initContainerIDs: []string{"initFakeID"},
385 containerIDs: []string{"appFakeID"},
386 pod: makeMultiContainerPod(
387 []struct{ request, limit string }{{"4000m", "4000m"}},
388 []struct{ request, limit string }{{"4000m", "4000m"}}),
389 expInitCSets: []cpuset.CPUSet{
390 cpuset.New(0, 4, 1, 5)},
391 expCSets: []cpuset.CPUSet{
392 cpuset.New(0, 4, 1, 5)},
393 },
394 {
395 description: "More Init Container Guaranteed CPUs",
396 topo: topoSingleSocketHT,
397 numReservedCPUs: 0,
398 stAssignments: state.ContainerCPUAssignments{},
399 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
400 initContainerIDs: []string{"initFakeID"},
401 containerIDs: []string{"appFakeID"},
402 pod: makeMultiContainerPod(
403 []struct{ request, limit string }{{"6000m", "6000m"}},
404 []struct{ request, limit string }{{"4000m", "4000m"}}),
405 expInitCSets: []cpuset.CPUSet{
406 cpuset.New(0, 4, 1, 5, 2, 6)},
407 expCSets: []cpuset.CPUSet{
408 cpuset.New(0, 4, 1, 5)},
409 },
410 {
411 description: "Less Init Container Guaranteed CPUs",
412 topo: topoSingleSocketHT,
413 numReservedCPUs: 0,
414 stAssignments: state.ContainerCPUAssignments{},
415 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
416 initContainerIDs: []string{"initFakeID"},
417 containerIDs: []string{"appFakeID"},
418 pod: makeMultiContainerPod(
419 []struct{ request, limit string }{{"2000m", "2000m"}},
420 []struct{ request, limit string }{{"4000m", "4000m"}}),
421 expInitCSets: []cpuset.CPUSet{
422 cpuset.New(0, 4)},
423 expCSets: []cpuset.CPUSet{
424 cpuset.New(0, 4, 1, 5)},
425 },
426 {
427 description: "Multi Init Container Equal CPUs",
428 topo: topoSingleSocketHT,
429 numReservedCPUs: 0,
430 stAssignments: state.ContainerCPUAssignments{},
431 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
432 initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
433 containerIDs: []string{"appFakeID"},
434 pod: makeMultiContainerPod(
435 []struct{ request, limit string }{
436 {"2000m", "2000m"},
437 {"2000m", "2000m"}},
438 []struct{ request, limit string }{
439 {"2000m", "2000m"}}),
440 expInitCSets: []cpuset.CPUSet{
441 cpuset.New(0, 4),
442 cpuset.New(0, 4)},
443 expCSets: []cpuset.CPUSet{
444 cpuset.New(0, 4)},
445 },
446 {
447 description: "Multi Init Container Less CPUs",
448 topo: topoSingleSocketHT,
449 numReservedCPUs: 0,
450 stAssignments: state.ContainerCPUAssignments{},
451 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
452 initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
453 containerIDs: []string{"appFakeID"},
454 pod: makeMultiContainerPod(
455 []struct{ request, limit string }{
456 {"4000m", "4000m"},
457 {"4000m", "4000m"}},
458 []struct{ request, limit string }{
459 {"2000m", "2000m"}}),
460 expInitCSets: []cpuset.CPUSet{
461 cpuset.New(0, 4, 1, 5),
462 cpuset.New(0, 4, 1, 5)},
463 expCSets: []cpuset.CPUSet{
464 cpuset.New(0, 4)},
465 },
466 {
467 description: "Multi Init Container More CPUs",
468 topo: topoSingleSocketHT,
469 numReservedCPUs: 0,
470 stAssignments: state.ContainerCPUAssignments{},
471 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
472 initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
473 containerIDs: []string{"appFakeID"},
474 pod: makeMultiContainerPod(
475 []struct{ request, limit string }{
476 {"2000m", "2000m"},
477 {"2000m", "2000m"}},
478 []struct{ request, limit string }{
479 {"4000m", "4000m"}}),
480 expInitCSets: []cpuset.CPUSet{
481 cpuset.New(0, 4),
482 cpuset.New(0, 4)},
483 expCSets: []cpuset.CPUSet{
484 cpuset.New(0, 4, 1, 5)},
485 },
486 {
487 description: "Multi Init Container Increasing CPUs",
488 topo: topoSingleSocketHT,
489 numReservedCPUs: 0,
490 stAssignments: state.ContainerCPUAssignments{},
491 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
492 initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
493 containerIDs: []string{"appFakeID"},
494 pod: makeMultiContainerPod(
495 []struct{ request, limit string }{
496 {"2000m", "2000m"},
497 {"4000m", "4000m"}},
498 []struct{ request, limit string }{
499 {"6000m", "6000m"}}),
500 expInitCSets: []cpuset.CPUSet{
501 cpuset.New(0, 4),
502 cpuset.New(0, 4, 1, 5)},
503 expCSets: []cpuset.CPUSet{
504 cpuset.New(0, 4, 1, 5, 2, 6)},
505 },
506 {
507 description: "Multi Init, Multi App Container Split CPUs",
508 topo: topoSingleSocketHT,
509 numReservedCPUs: 0,
510 stAssignments: state.ContainerCPUAssignments{},
511 stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
512 initContainerIDs: []string{"initFakeID-1", "initFakeID-2"},
513 containerIDs: []string{"appFakeID-1", "appFakeID-2"},
514 pod: makeMultiContainerPod(
515 []struct{ request, limit string }{
516 {"2000m", "2000m"},
517 {"4000m", "4000m"}},
518 []struct{ request, limit string }{
519 {"2000m", "2000m"},
520 {"2000m", "2000m"}}),
521 expInitCSets: []cpuset.CPUSet{
522 cpuset.New(0, 4),
523 cpuset.New(0, 4, 1, 5)},
524 expCSets: []cpuset.CPUSet{
525 cpuset.New(0, 4),
526 cpuset.New(1, 5)},
527 },
528 }
529
530 for _, testCase := range testCases {
531 policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), topologymanager.NewFakeManager(), nil)
532
533 mockState := &mockState{
534 assignments: testCase.stAssignments,
535 defaultCPUSet: testCase.stDefaultCPUSet,
536 }
537
538 mgr := &manager{
539 policy: policy,
540 state: mockState,
541 lastUpdateState: state.NewMemoryState(),
542 containerRuntime: mockRuntimeService{},
543 containerMap: containermap.NewContainerMap(),
544 podStatusProvider: mockPodStatusProvider{},
545 sourcesReady: &sourcesReadyStub{},
546 activePods: func() []*v1.Pod {
547 return []*v1.Pod{testCase.pod}
548 },
549 }
550
551 containers := append(
552 testCase.pod.Spec.InitContainers,
553 testCase.pod.Spec.Containers...)
554
555 containerIDs := append(
556 testCase.initContainerIDs,
557 testCase.containerIDs...)
558
559 expCSets := append(
560 testCase.expInitCSets,
561 testCase.expCSets...)
562
563 cumCSet := cpuset.New()
564
565 for i := range containers {
566 err := mgr.Allocate(testCase.pod, &containers[i])
567 if err != nil {
568 t.Errorf("StaticPolicy Allocate() error (%v). unexpected error for container id: %v: %v",
569 testCase.description, containerIDs[i], err)
570 }
571
572 mgr.AddContainer(testCase.pod, &containers[i], containerIDs[i])
573 _, _, err = mgr.containerMap.GetContainerRef(containerIDs[i])
574 if err != nil {
575 t.Errorf("StaticPolicy AddContainer() error (%v). unexpected error for container id: %v: %v",
576 testCase.description, containerIDs[i], err)
577 }
578
579 cset, found := mockState.assignments[string(testCase.pod.UID)][containers[i].Name]
580 if !expCSets[i].IsEmpty() && !found {
581 t.Errorf("StaticPolicy AddContainer() error (%v). expected container %v to be present in assignments %v",
582 testCase.description, containers[i].Name, mockState.assignments)
583 }
584
585 if found && !cset.Equals(expCSets[i]) {
586 t.Errorf("StaticPolicy AddContainer() error (%v). expected cpuset %v for container %v but got %v",
587 testCase.description, expCSets[i], containers[i].Name, cset)
588 }
589
590 cumCSet = cumCSet.Union(cset)
591 }
592
593 if !testCase.stDefaultCPUSet.Difference(cumCSet).Equals(mockState.defaultCPUSet) {
594 t.Errorf("StaticPolicy error (%v). expected final state for defaultCPUSet %v but got %v",
595 testCase.description, testCase.stDefaultCPUSet.Difference(cumCSet), mockState.defaultCPUSet)
596 }
597 }
598 }
599
600 func TestCPUManagerGenerate(t *testing.T) {
601 testCases := []struct {
602 description string
603 cpuPolicyName string
604 nodeAllocatableReservation v1.ResourceList
605 isTopologyBroken bool
606 expectedPolicy string
607 expectedError error
608 }{
609 {
610 description: "set none policy",
611 cpuPolicyName: "none",
612 nodeAllocatableReservation: nil,
613 expectedPolicy: "none",
614 },
615 {
616 description: "invalid policy name",
617 cpuPolicyName: "invalid",
618 nodeAllocatableReservation: nil,
619 expectedError: fmt.Errorf("unknown policy: \"invalid\""),
620 },
621 {
622 description: "static policy",
623 cpuPolicyName: "static",
624 nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(3, resource.DecimalSI)},
625 expectedPolicy: "static",
626 },
627 {
628 description: "static policy - broken topology",
629 cpuPolicyName: "static",
630 nodeAllocatableReservation: v1.ResourceList{},
631 isTopologyBroken: true,
632 expectedError: fmt.Errorf("could not detect number of cpus"),
633 },
634 {
635 description: "static policy - broken reservation",
636 cpuPolicyName: "static",
637 nodeAllocatableReservation: v1.ResourceList{},
638 expectedError: fmt.Errorf("unable to determine reserved CPU resources for static policy"),
639 },
640 {
641 description: "static policy - no CPU resources",
642 cpuPolicyName: "static",
643 nodeAllocatableReservation: v1.ResourceList{v1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI)},
644 expectedError: fmt.Errorf("the static policy requires systemreserved.cpu + kubereserved.cpu to be greater than zero"),
645 },
646 }
647
648 mockedMachineInfo := cadvisorapi.MachineInfo{
649 NumCores: 4,
650 Topology: []cadvisorapi.Node{
651 {
652 Cores: []cadvisorapi.Core{
653 {
654 Id: 0,
655 Threads: []int{0},
656 },
657 {
658 Id: 1,
659 Threads: []int{1},
660 },
661 {
662 Id: 2,
663 Threads: []int{2},
664 },
665 {
666 Id: 3,
667 Threads: []int{3},
668 },
669 },
670 },
671 },
672 }
673
674 for _, testCase := range testCases {
675 t.Run(testCase.description, func(t *testing.T) {
676 machineInfo := &mockedMachineInfo
677 if testCase.isTopologyBroken {
678 machineInfo = &cadvisorapi.MachineInfo{}
679 }
680 sDir, err := os.MkdirTemp("", "cpu_manager_test")
681 if err != nil {
682 t.Errorf("cannot create state file: %s", err.Error())
683 }
684 defer os.RemoveAll(sDir)
685
686 mgr, err := NewManager(testCase.cpuPolicyName, nil, 5*time.Second, machineInfo, cpuset.New(), testCase.nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
687 if testCase.expectedError != nil {
688 if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
689 t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
690 }
691 } else {
692 rawMgr := mgr.(*manager)
693 if rawMgr.policy.Name() != testCase.expectedPolicy {
694 t.Errorf("Unexpected policy name. Have: %q wants %q", rawMgr.policy.Name(), testCase.expectedPolicy)
695 }
696 if rawMgr.policy.Name() == string(PolicyNone) {
697 if rawMgr.topology != nil {
698 t.Errorf("Expected topology to be nil for 'none' policy. Have: %q", rawMgr.topology)
699 }
700 }
701 if rawMgr.policy.Name() != string(PolicyNone) {
702 if rawMgr.topology == nil {
703 t.Errorf("Expected topology to be non-nil for policy '%v'. Have: %q", rawMgr.policy.Name(), rawMgr.topology)
704 }
705 }
706 }
707 })
708
709 }
710 }
711
712 func TestCPUManagerRemove(t *testing.T) {
713 containerID := "fakeID"
714 containerMap := containermap.NewContainerMap()
715
716 mgr := &manager{
717 policy: &mockPolicy{
718 err: nil,
719 },
720 state: &mockState{
721 assignments: state.ContainerCPUAssignments{},
722 defaultCPUSet: cpuset.New(),
723 },
724 lastUpdateState: state.NewMemoryState(),
725 containerRuntime: mockRuntimeService{},
726 containerMap: containerMap,
727 activePods: func() []*v1.Pod { return nil },
728 podStatusProvider: mockPodStatusProvider{},
729 }
730
731 containerMap.Add("", "", containerID)
732 err := mgr.RemoveContainer(containerID)
733 if err != nil {
734 t.Errorf("CPU Manager RemoveContainer() error. expected error to be nil but got: %v", err)
735 }
736
737 mgr = &manager{
738 policy: &mockPolicy{
739 err: fmt.Errorf("fake error"),
740 },
741 state: state.NewMemoryState(),
742 containerRuntime: mockRuntimeService{},
743 containerMap: containerMap,
744 activePods: func() []*v1.Pod { return nil },
745 podStatusProvider: mockPodStatusProvider{},
746 }
747
748 containerMap.Add("", "", containerID)
749 err = mgr.RemoveContainer(containerID)
750 if !reflect.DeepEqual(err, fmt.Errorf("fake error")) {
751 t.Errorf("CPU Manager RemoveContainer() error. expected error: fake error but got: %v", err)
752 }
753 }
754
755 func TestReconcileState(t *testing.T) {
756 testPolicy, _ := NewStaticPolicy(
757 &topology.CPUTopology{
758 NumCPUs: 8,
759 NumSockets: 2,
760 NumCores: 4,
761 CPUDetails: map[int]topology.CPUInfo{
762 0: {CoreID: 0, SocketID: 0},
763 1: {CoreID: 1, SocketID: 0},
764 2: {CoreID: 2, SocketID: 0},
765 3: {CoreID: 3, SocketID: 0},
766 4: {CoreID: 0, SocketID: 1},
767 5: {CoreID: 1, SocketID: 1},
768 6: {CoreID: 2, SocketID: 1},
769 7: {CoreID: 3, SocketID: 1},
770 },
771 },
772 0,
773 cpuset.New(),
774 topologymanager.NewFakeManager(),
775 nil)
776
777 testCases := []struct {
778 description string
779 policy Policy
780 activePods []*v1.Pod
781 pspPS v1.PodStatus
782 pspFound bool
783 updateErr error
784 stAssignments state.ContainerCPUAssignments
785 stDefaultCPUSet cpuset.CPUSet
786 lastUpdateStAssignments state.ContainerCPUAssignments
787 lastUpdateStDefaultCPUSet cpuset.CPUSet
788 expectStAssignments state.ContainerCPUAssignments
789 expectStDefaultCPUSet cpuset.CPUSet
790 expectSucceededContainerName string
791 expectFailedContainerName string
792 }{
793 {
794 description: "cpu manager reconcile - no error",
795 policy: testPolicy,
796 activePods: []*v1.Pod{
797 {
798 ObjectMeta: metav1.ObjectMeta{
799 Name: "fakePodName",
800 UID: "fakePodUID",
801 },
802 Spec: v1.PodSpec{
803 Containers: []v1.Container{
804 {
805 Name: "fakeContainerName",
806 },
807 },
808 },
809 },
810 },
811 pspPS: v1.PodStatus{
812 ContainerStatuses: []v1.ContainerStatus{
813 {
814 Name: "fakeContainerName",
815 ContainerID: "docker://fakeContainerID",
816 State: v1.ContainerState{
817 Running: &v1.ContainerStateRunning{},
818 },
819 },
820 },
821 },
822 pspFound: true,
823 updateErr: nil,
824 stAssignments: state.ContainerCPUAssignments{
825 "fakePodUID": map[string]cpuset.CPUSet{
826 "fakeContainerName": cpuset.New(1, 2),
827 },
828 },
829 stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
830 lastUpdateStAssignments: state.ContainerCPUAssignments{},
831 lastUpdateStDefaultCPUSet: cpuset.New(),
832 expectStAssignments: state.ContainerCPUAssignments{
833 "fakePodUID": map[string]cpuset.CPUSet{
834 "fakeContainerName": cpuset.New(1, 2),
835 },
836 },
837 expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
838 expectSucceededContainerName: "fakeContainerName",
839 expectFailedContainerName: "",
840 },
841 {
842 description: "cpu manager reconcile init container - no error",
843 policy: testPolicy,
844 activePods: []*v1.Pod{
845 {
846 ObjectMeta: metav1.ObjectMeta{
847 Name: "fakePodName",
848 UID: "fakePodUID",
849 },
850 Spec: v1.PodSpec{
851 InitContainers: []v1.Container{
852 {
853 Name: "fakeContainerName",
854 },
855 },
856 },
857 },
858 },
859 pspPS: v1.PodStatus{
860 InitContainerStatuses: []v1.ContainerStatus{
861 {
862 Name: "fakeContainerName",
863 ContainerID: "docker://fakeContainerID",
864 State: v1.ContainerState{
865 Running: &v1.ContainerStateRunning{},
866 },
867 },
868 },
869 },
870 pspFound: true,
871 updateErr: nil,
872 stAssignments: state.ContainerCPUAssignments{
873 "fakePodUID": map[string]cpuset.CPUSet{
874 "fakeContainerName": cpuset.New(1, 2),
875 },
876 },
877 stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
878 lastUpdateStAssignments: state.ContainerCPUAssignments{},
879 lastUpdateStDefaultCPUSet: cpuset.New(),
880 expectStAssignments: state.ContainerCPUAssignments{
881 "fakePodUID": map[string]cpuset.CPUSet{
882 "fakeContainerName": cpuset.New(1, 2),
883 },
884 },
885 expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
886 expectSucceededContainerName: "fakeContainerName",
887 expectFailedContainerName: "",
888 },
889 {
890 description: "cpu manager reconcile - pod status not found",
891 policy: testPolicy,
892 activePods: []*v1.Pod{
893 {
894 ObjectMeta: metav1.ObjectMeta{
895 Name: "fakePodName",
896 UID: "fakePodUID",
897 },
898 Spec: v1.PodSpec{
899 Containers: []v1.Container{
900 {
901 Name: "fakeContainerName",
902 },
903 },
904 },
905 },
906 },
907 pspPS: v1.PodStatus{},
908 pspFound: false,
909 updateErr: nil,
910 stAssignments: state.ContainerCPUAssignments{},
911 stDefaultCPUSet: cpuset.New(),
912 lastUpdateStAssignments: state.ContainerCPUAssignments{},
913 lastUpdateStDefaultCPUSet: cpuset.New(),
914 expectStAssignments: state.ContainerCPUAssignments{},
915 expectStDefaultCPUSet: cpuset.New(),
916 expectSucceededContainerName: "",
917 expectFailedContainerName: "",
918 },
919 {
920 description: "cpu manager reconcile - container state not found",
921 policy: testPolicy,
922 activePods: []*v1.Pod{
923 {
924 ObjectMeta: metav1.ObjectMeta{
925 Name: "fakePodName",
926 UID: "fakePodUID",
927 },
928 Spec: v1.PodSpec{
929 Containers: []v1.Container{
930 {
931 Name: "fakeContainerName",
932 },
933 },
934 },
935 },
936 },
937 pspPS: v1.PodStatus{
938 ContainerStatuses: []v1.ContainerStatus{
939 {
940 Name: "fakeContainerName1",
941 ContainerID: "docker://fakeContainerID",
942 },
943 },
944 },
945 pspFound: true,
946 updateErr: nil,
947 stAssignments: state.ContainerCPUAssignments{},
948 stDefaultCPUSet: cpuset.New(),
949 lastUpdateStAssignments: state.ContainerCPUAssignments{},
950 lastUpdateStDefaultCPUSet: cpuset.New(),
951 expectStAssignments: state.ContainerCPUAssignments{},
952 expectStDefaultCPUSet: cpuset.New(),
953 expectSucceededContainerName: "",
954 expectFailedContainerName: "fakeContainerName",
955 },
956 {
957 description: "cpu manager reconclie - cpuset is empty",
958 policy: testPolicy,
959 activePods: []*v1.Pod{
960 {
961 ObjectMeta: metav1.ObjectMeta{
962 Name: "fakePodName",
963 UID: "fakePodUID",
964 },
965 Spec: v1.PodSpec{
966 Containers: []v1.Container{
967 {
968 Name: "fakeContainerName",
969 },
970 },
971 },
972 },
973 },
974 pspPS: v1.PodStatus{
975 ContainerStatuses: []v1.ContainerStatus{
976 {
977 Name: "fakeContainerName",
978 ContainerID: "docker://fakeContainerID",
979 State: v1.ContainerState{
980 Running: &v1.ContainerStateRunning{},
981 },
982 },
983 },
984 },
985 pspFound: true,
986 updateErr: nil,
987 stAssignments: state.ContainerCPUAssignments{
988 "fakePodUID": map[string]cpuset.CPUSet{
989 "fakeContainerName": cpuset.New(),
990 },
991 },
992 stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
993 lastUpdateStAssignments: state.ContainerCPUAssignments{},
994 lastUpdateStDefaultCPUSet: cpuset.New(),
995 expectStAssignments: state.ContainerCPUAssignments{
996 "fakePodUID": map[string]cpuset.CPUSet{
997 "fakeContainerName": cpuset.New(),
998 },
999 },
1000 expectStDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 6, 7),
1001 expectSucceededContainerName: "",
1002 expectFailedContainerName: "fakeContainerName",
1003 },
1004 {
1005 description: "cpu manager reconclie - container update error",
1006 policy: testPolicy,
1007 activePods: []*v1.Pod{
1008 {
1009 ObjectMeta: metav1.ObjectMeta{
1010 Name: "fakePodName",
1011 UID: "fakePodUID",
1012 },
1013 Spec: v1.PodSpec{
1014 Containers: []v1.Container{
1015 {
1016 Name: "fakeContainerName",
1017 },
1018 },
1019 },
1020 },
1021 },
1022 pspPS: v1.PodStatus{
1023 ContainerStatuses: []v1.ContainerStatus{
1024 {
1025 Name: "fakeContainerName",
1026 ContainerID: "docker://fakeContainerID",
1027 State: v1.ContainerState{
1028 Running: &v1.ContainerStateRunning{},
1029 },
1030 },
1031 },
1032 },
1033 pspFound: true,
1034 updateErr: fmt.Errorf("fake container update error"),
1035 stAssignments: state.ContainerCPUAssignments{
1036 "fakePodUID": map[string]cpuset.CPUSet{
1037 "fakeContainerName": cpuset.New(1, 2),
1038 },
1039 },
1040 stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
1041 lastUpdateStAssignments: state.ContainerCPUAssignments{},
1042 lastUpdateStDefaultCPUSet: cpuset.New(),
1043 expectStAssignments: state.ContainerCPUAssignments{
1044 "fakePodUID": map[string]cpuset.CPUSet{
1045 "fakeContainerName": cpuset.New(1, 2),
1046 },
1047 },
1048 expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
1049 expectSucceededContainerName: "",
1050 expectFailedContainerName: "fakeContainerName",
1051 },
1052 {
1053 description: "cpu manager reconcile - state has inactive container",
1054 policy: testPolicy,
1055 activePods: []*v1.Pod{
1056 {
1057 ObjectMeta: metav1.ObjectMeta{
1058 Name: "fakePodName",
1059 UID: "fakePodUID",
1060 },
1061 Spec: v1.PodSpec{
1062 Containers: []v1.Container{
1063 {
1064 Name: "fakeContainerName",
1065 },
1066 },
1067 },
1068 },
1069 },
1070 pspPS: v1.PodStatus{
1071 ContainerStatuses: []v1.ContainerStatus{
1072 {
1073 Name: "fakeContainerName",
1074 ContainerID: "docker://fakeContainerID",
1075 State: v1.ContainerState{
1076 Running: &v1.ContainerStateRunning{},
1077 },
1078 },
1079 },
1080 },
1081 pspFound: true,
1082 updateErr: nil,
1083 stAssignments: state.ContainerCPUAssignments{
1084 "fakePodUID": map[string]cpuset.CPUSet{
1085 "fakeContainerName": cpuset.New(1, 2),
1086 },
1087 "secondfakePodUID": map[string]cpuset.CPUSet{
1088 "secondfakeContainerName": cpuset.New(3, 4),
1089 },
1090 },
1091 stDefaultCPUSet: cpuset.New(5, 6, 7),
1092 lastUpdateStAssignments: state.ContainerCPUAssignments{},
1093 lastUpdateStDefaultCPUSet: cpuset.New(),
1094 expectStAssignments: state.ContainerCPUAssignments{
1095 "fakePodUID": map[string]cpuset.CPUSet{
1096 "fakeContainerName": cpuset.New(1, 2),
1097 },
1098 },
1099 expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
1100 expectSucceededContainerName: "fakeContainerName",
1101 expectFailedContainerName: "",
1102 },
1103 {
1104 description: "cpu manager reconcile - last update state is current",
1105 policy: testPolicy,
1106 activePods: []*v1.Pod{
1107 {
1108 ObjectMeta: metav1.ObjectMeta{
1109 Name: "fakePodName",
1110 UID: "fakePodUID",
1111 },
1112 Spec: v1.PodSpec{
1113 Containers: []v1.Container{
1114 {
1115 Name: "fakeContainerName",
1116 },
1117 },
1118 },
1119 },
1120 },
1121 pspPS: v1.PodStatus{
1122 ContainerStatuses: []v1.ContainerStatus{
1123 {
1124 Name: "fakeContainerName",
1125 ContainerID: "docker://fakeContainerID",
1126 State: v1.ContainerState{
1127 Running: &v1.ContainerStateRunning{},
1128 },
1129 },
1130 },
1131 },
1132 pspFound: true,
1133 updateErr: nil,
1134 stAssignments: state.ContainerCPUAssignments{
1135 "fakePodUID": map[string]cpuset.CPUSet{
1136 "fakeContainerName": cpuset.New(1, 2),
1137 },
1138 },
1139 stDefaultCPUSet: cpuset.New(5, 6, 7),
1140 lastUpdateStAssignments: state.ContainerCPUAssignments{
1141 "fakePodUID": map[string]cpuset.CPUSet{
1142 "fakeContainerName": cpuset.New(1, 2),
1143 },
1144 },
1145 lastUpdateStDefaultCPUSet: cpuset.New(5, 6, 7),
1146 expectStAssignments: state.ContainerCPUAssignments{
1147 "fakePodUID": map[string]cpuset.CPUSet{
1148 "fakeContainerName": cpuset.New(1, 2),
1149 },
1150 },
1151 expectStDefaultCPUSet: cpuset.New(5, 6, 7),
1152 expectSucceededContainerName: "fakeContainerName",
1153 expectFailedContainerName: "",
1154 },
1155 {
1156 description: "cpu manager reconcile - last update state is not current",
1157 policy: testPolicy,
1158 activePods: []*v1.Pod{
1159 {
1160 ObjectMeta: metav1.ObjectMeta{
1161 Name: "fakePodName",
1162 UID: "fakePodUID",
1163 },
1164 Spec: v1.PodSpec{
1165 Containers: []v1.Container{
1166 {
1167 Name: "fakeContainerName",
1168 },
1169 },
1170 },
1171 },
1172 },
1173 pspPS: v1.PodStatus{
1174 ContainerStatuses: []v1.ContainerStatus{
1175 {
1176 Name: "fakeContainerName",
1177 ContainerID: "docker://fakeContainerID",
1178 State: v1.ContainerState{
1179 Running: &v1.ContainerStateRunning{},
1180 },
1181 },
1182 },
1183 },
1184 pspFound: true,
1185 updateErr: nil,
1186 stAssignments: state.ContainerCPUAssignments{
1187 "fakePodUID": map[string]cpuset.CPUSet{
1188 "fakeContainerName": cpuset.New(1, 2),
1189 },
1190 },
1191 stDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
1192 lastUpdateStAssignments: state.ContainerCPUAssignments{
1193 "fakePodUID": map[string]cpuset.CPUSet{
1194 "fakeContainerName": cpuset.New(3, 4),
1195 },
1196 },
1197 lastUpdateStDefaultCPUSet: cpuset.New(1, 2, 5, 6, 7),
1198 expectStAssignments: state.ContainerCPUAssignments{
1199 "fakePodUID": map[string]cpuset.CPUSet{
1200 "fakeContainerName": cpuset.New(1, 2),
1201 },
1202 },
1203 expectStDefaultCPUSet: cpuset.New(3, 4, 5, 6, 7),
1204 expectSucceededContainerName: "fakeContainerName",
1205 expectFailedContainerName: "",
1206 },
1207 }
1208
1209 for _, testCase := range testCases {
1210 mgr := &manager{
1211 policy: testCase.policy,
1212 state: &mockState{
1213 assignments: testCase.stAssignments,
1214 defaultCPUSet: testCase.stDefaultCPUSet,
1215 },
1216 lastUpdateState: state.NewMemoryState(),
1217 containerRuntime: mockRuntimeService{
1218 err: testCase.updateErr,
1219 },
1220 containerMap: containermap.NewContainerMap(),
1221 activePods: func() []*v1.Pod {
1222 return testCase.activePods
1223 },
1224 podStatusProvider: mockPodStatusProvider{
1225 podStatus: testCase.pspPS,
1226 found: testCase.pspFound,
1227 },
1228 }
1229 mgr.sourcesReady = &sourcesReadyStub{}
1230 success, failure := mgr.reconcileState()
1231
1232 if !reflect.DeepEqual(testCase.expectStAssignments, mgr.state.GetCPUAssignments()) {
1233 t.Errorf("%v", testCase.description)
1234 t.Errorf("Expected state container cpu assignments: %v, actual: %v", testCase.expectStAssignments, mgr.state.GetCPUAssignments())
1235
1236 }
1237
1238 if !reflect.DeepEqual(testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet()) {
1239 t.Errorf("%v", testCase.description)
1240 t.Errorf("Expected state default cpuset: %v, actual: %v", testCase.expectStDefaultCPUSet, mgr.state.GetDefaultCPUSet())
1241
1242 }
1243
1244 if testCase.expectSucceededContainerName != "" {
1245
1246 foundSucceededContainer := false
1247 for _, reconciled := range success {
1248 if reconciled.containerName == testCase.expectSucceededContainerName {
1249 foundSucceededContainer = true
1250 break
1251 }
1252 }
1253 if !foundSucceededContainer {
1254 t.Errorf("%v", testCase.description)
1255 t.Errorf("Expected reconciliation success for container: %s", testCase.expectSucceededContainerName)
1256 }
1257 }
1258
1259 if testCase.expectFailedContainerName != "" {
1260
1261 foundFailedContainer := false
1262 for _, reconciled := range failure {
1263 if reconciled.containerName == testCase.expectFailedContainerName {
1264 foundFailedContainer = true
1265 break
1266 }
1267 }
1268 if !foundFailedContainer {
1269 t.Errorf("%v", testCase.description)
1270 t.Errorf("Expected reconciliation failure for container: %s", testCase.expectFailedContainerName)
1271 }
1272 }
1273 }
1274 }
1275
1276
1277
1278 func TestCPUManagerAddWithResvList(t *testing.T) {
1279 testPolicy, _ := NewStaticPolicy(
1280 &topology.CPUTopology{
1281 NumCPUs: 4,
1282 NumSockets: 1,
1283 NumCores: 4,
1284 CPUDetails: map[int]topology.CPUInfo{
1285 0: {CoreID: 0, SocketID: 0},
1286 1: {CoreID: 1, SocketID: 0},
1287 2: {CoreID: 2, SocketID: 0},
1288 3: {CoreID: 3, SocketID: 0},
1289 },
1290 },
1291 1,
1292 cpuset.New(0),
1293 topologymanager.NewFakeManager(),
1294 nil)
1295 testCases := []struct {
1296 description string
1297 updateErr error
1298 policy Policy
1299 expCPUSet cpuset.CPUSet
1300 expAllocateErr error
1301 expAddContainerErr error
1302 }{
1303 {
1304 description: "cpu manager add - no error",
1305 updateErr: nil,
1306 policy: testPolicy,
1307 expCPUSet: cpuset.New(0, 3),
1308 expAllocateErr: nil,
1309 expAddContainerErr: nil,
1310 },
1311 }
1312
1313 for _, testCase := range testCases {
1314 mgr := &manager{
1315 policy: testCase.policy,
1316 state: &mockState{
1317 assignments: state.ContainerCPUAssignments{},
1318 defaultCPUSet: cpuset.New(0, 1, 2, 3),
1319 },
1320 lastUpdateState: state.NewMemoryState(),
1321 containerRuntime: mockRuntimeService{
1322 err: testCase.updateErr,
1323 },
1324 containerMap: containermap.NewContainerMap(),
1325 podStatusProvider: mockPodStatusProvider{},
1326 sourcesReady: &sourcesReadyStub{},
1327 }
1328
1329 pod := makePod("fakePod", "fakeContainer", "2", "2")
1330 container := &pod.Spec.Containers[0]
1331 mgr.activePods = func() []*v1.Pod { return nil }
1332
1333 err := mgr.Allocate(pod, container)
1334 if !reflect.DeepEqual(err, testCase.expAllocateErr) {
1335 t.Errorf("CPU Manager Allocate() error (%v). expected error: %v but got: %v",
1336 testCase.description, testCase.expAllocateErr, err)
1337 }
1338
1339 mgr.AddContainer(pod, container, "fakeID")
1340 _, _, err = mgr.containerMap.GetContainerRef("fakeID")
1341 if !reflect.DeepEqual(err, testCase.expAddContainerErr) {
1342 t.Errorf("CPU Manager AddContainer() error (%v). expected error: %v but got: %v",
1343 testCase.description, testCase.expAddContainerErr, err)
1344 }
1345 if !testCase.expCPUSet.Equals(mgr.state.GetDefaultCPUSet()) {
1346 t.Errorf("CPU Manager AddContainer() error (%v). expected cpuset: %v but got: %v",
1347 testCase.description, testCase.expCPUSet, mgr.state.GetDefaultCPUSet())
1348 }
1349 }
1350 }
1351
1352 func TestCPUManagerHandlePolicyOptions(t *testing.T) {
1353 testCases := []struct {
1354 description string
1355 cpuPolicyName string
1356 cpuPolicyOptions map[string]string
1357 expectedError error
1358 }{
1359 {
1360 description: "options to none policy",
1361 cpuPolicyName: "none",
1362 cpuPolicyOptions: map[string]string{
1363 FullPCPUsOnlyOption: "true",
1364 },
1365 expectedError: fmt.Errorf("received unsupported options"),
1366 },
1367 }
1368
1369
1370 mockedMachineInfo := cadvisorapi.MachineInfo{
1371 NumCores: 4,
1372 Topology: []cadvisorapi.Node{
1373 {
1374 Cores: []cadvisorapi.Core{
1375 {
1376 Id: 0,
1377 Threads: []int{0},
1378 },
1379 {
1380 Id: 1,
1381 Threads: []int{1},
1382 },
1383 {
1384 Id: 2,
1385 Threads: []int{2},
1386 },
1387 {
1388 Id: 3,
1389 Threads: []int{3},
1390 },
1391 },
1392 },
1393 },
1394 }
1395
1396 for _, testCase := range testCases {
1397 t.Run(testCase.description, func(t *testing.T) {
1398 machineInfo := &mockedMachineInfo
1399 nodeAllocatableReservation := v1.ResourceList{}
1400 sDir, err := os.MkdirTemp("", "cpu_manager_test")
1401 if err != nil {
1402 t.Errorf("cannot create state file: %s", err.Error())
1403 }
1404 defer os.RemoveAll(sDir)
1405
1406 _, err = NewManager(testCase.cpuPolicyName, testCase.cpuPolicyOptions, 5*time.Second, machineInfo, cpuset.New(), nodeAllocatableReservation, sDir, topologymanager.NewFakeManager())
1407 if err == nil {
1408 t.Errorf("Expected error, but NewManager succeeded")
1409 }
1410 if !strings.Contains(err.Error(), testCase.expectedError.Error()) {
1411 t.Errorf("Unexpected error message. Have: %s wants %s", err.Error(), testCase.expectedError.Error())
1412 }
1413 })
1414
1415 }
1416 }
1417
1418 func TestCPUManagerGetAllocatableCPUs(t *testing.T) {
1419 nonePolicy, _ := NewNonePolicy(nil)
1420 staticPolicy, _ := NewStaticPolicy(
1421 &topology.CPUTopology{
1422 NumCPUs: 4,
1423 NumSockets: 1,
1424 NumCores: 4,
1425 CPUDetails: map[int]topology.CPUInfo{
1426 0: {CoreID: 0, SocketID: 0},
1427 1: {CoreID: 1, SocketID: 0},
1428 2: {CoreID: 2, SocketID: 0},
1429 3: {CoreID: 3, SocketID: 0},
1430 },
1431 },
1432 1,
1433 cpuset.New(0),
1434 topologymanager.NewFakeManager(),
1435 nil)
1436
1437 testCases := []struct {
1438 description string
1439 policy Policy
1440 expAllocatableCPUs cpuset.CPUSet
1441 }{
1442 {
1443 description: "None Policy",
1444 policy: nonePolicy,
1445 expAllocatableCPUs: cpuset.New(),
1446 },
1447 {
1448 description: "Static Policy",
1449 policy: staticPolicy,
1450 expAllocatableCPUs: cpuset.New(1, 2, 3),
1451 },
1452 }
1453 for _, testCase := range testCases {
1454 mgr := &manager{
1455 policy: testCase.policy,
1456 activePods: func() []*v1.Pod { return nil },
1457 state: &mockState{
1458 assignments: state.ContainerCPUAssignments{},
1459 defaultCPUSet: cpuset.New(0, 1, 2, 3),
1460 },
1461 lastUpdateState: state.NewMemoryState(),
1462 containerMap: containermap.NewContainerMap(),
1463 podStatusProvider: mockPodStatusProvider{},
1464 sourcesReady: &sourcesReadyStub{},
1465 }
1466 mgr.sourcesReady = &sourcesReadyStub{}
1467 mgr.allocatableCPUs = testCase.policy.GetAllocatableCPUs(mgr.state)
1468
1469 pod := makePod("fakePod", "fakeContainer", "2", "2")
1470 container := &pod.Spec.Containers[0]
1471
1472 _ = mgr.Allocate(pod, container)
1473
1474 if !mgr.GetAllocatableCPUs().Equals(testCase.expAllocatableCPUs) {
1475 t.Errorf("Policy GetAllocatableCPUs() error (%v). expected cpuset %v for container %v but got %v",
1476 testCase.description, testCase.expAllocatableCPUs, "fakeContainer", mgr.GetAllocatableCPUs())
1477 }
1478 }
1479 }
1480
View as plain text