1
16
17 package kuberuntime
18
19 import (
20 "context"
21 "fmt"
22 "path/filepath"
23 "reflect"
24 "sort"
25 "testing"
26 "time"
27
28 "google.golang.org/grpc/codes"
29 "google.golang.org/grpc/status"
30
31 cadvisorapi "github.com/google/cadvisor/info/v1"
32 "github.com/stretchr/testify/assert"
33 "github.com/stretchr/testify/require"
34 oteltrace "go.opentelemetry.io/otel/trace"
35
36 v1 "k8s.io/api/core/v1"
37 "k8s.io/apimachinery/pkg/api/resource"
38 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
39 "k8s.io/apimachinery/pkg/types"
40 "k8s.io/apimachinery/pkg/util/sets"
41 utilfeature "k8s.io/apiserver/pkg/util/feature"
42 "k8s.io/client-go/util/flowcontrol"
43 featuregatetesting "k8s.io/component-base/featuregate/testing"
44 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
45 apitest "k8s.io/cri-api/pkg/apis/testing"
46 podutil "k8s.io/kubernetes/pkg/api/v1/pod"
47 "k8s.io/kubernetes/pkg/credentialprovider"
48 "k8s.io/kubernetes/pkg/features"
49 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
50 containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
51 proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
52 )
53
54 var (
55 fakeCreatedAt int64 = 1
56 containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
57 )
58
59 func createTestRuntimeManager() (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
60 return customTestRuntimeManager(&credentialprovider.BasicDockerKeyring{})
61 }
62
63 func customTestRuntimeManager(keyring *credentialprovider.BasicDockerKeyring) (*apitest.FakeRuntimeService, *apitest.FakeImageService, *kubeGenericRuntimeManager, error) {
64 fakeRuntimeService := apitest.NewFakeRuntimeService()
65 fakeImageService := apitest.NewFakeImageService()
66
67
68
69 memoryCapacityQuantity := resource.MustParse(fakeNodeAllocatableMemory)
70 machineInfo := &cadvisorapi.MachineInfo{
71 MemoryCapacity: uint64(memoryCapacityQuantity.Value()),
72 }
73 osInterface := &containertest.FakeOS{}
74 manager, err := newFakeKubeRuntimeManager(fakeRuntimeService, fakeImageService, machineInfo, osInterface, &containertest.FakeRuntimeHelper{}, keyring, oteltrace.NewNoopTracerProvider().Tracer(""))
75 return fakeRuntimeService, fakeImageService, manager, err
76 }
77
78
79 type sandboxTemplate struct {
80 pod *v1.Pod
81 attempt uint32
82 createdAt int64
83 state runtimeapi.PodSandboxState
84 running bool
85 terminating bool
86 }
87
88
89 type containerTemplate struct {
90 pod *v1.Pod
91 container *v1.Container
92 sandboxAttempt uint32
93 attempt int
94 createdAt int64
95 state runtimeapi.ContainerState
96 }
97
98
99
100 func makeAndSetFakePod(t *testing.T, m *kubeGenericRuntimeManager, fakeRuntime *apitest.FakeRuntimeService,
101 pod *v1.Pod) (*apitest.FakePodSandbox, []*apitest.FakeContainer) {
102 sandbox := makeFakePodSandbox(t, m, sandboxTemplate{
103 pod: pod,
104 createdAt: fakeCreatedAt,
105 state: runtimeapi.PodSandboxState_SANDBOX_READY,
106 })
107
108 var containers []*apitest.FakeContainer
109 newTemplate := func(c *v1.Container) containerTemplate {
110 return containerTemplate{
111 pod: pod,
112 container: c,
113 createdAt: fakeCreatedAt,
114 state: runtimeapi.ContainerState_CONTAINER_RUNNING,
115 }
116 }
117 podutil.VisitContainers(&pod.Spec, podutil.AllFeatureEnabledContainers(), func(c *v1.Container, containerType podutil.ContainerType) bool {
118 containers = append(containers, makeFakeContainer(t, m, newTemplate(c)))
119 return true
120 })
121
122 fakeRuntime.SetFakeSandboxes([]*apitest.FakePodSandbox{sandbox})
123 fakeRuntime.SetFakeContainers(containers)
124 return sandbox, containers
125 }
126
127
128 func makeFakePodSandbox(t *testing.T, m *kubeGenericRuntimeManager, template sandboxTemplate) *apitest.FakePodSandbox {
129 config, err := m.generatePodSandboxConfig(template.pod, template.attempt)
130 assert.NoError(t, err, "generatePodSandboxConfig for sandbox template %+v", template)
131
132 podSandboxID := apitest.BuildSandboxName(config.Metadata)
133 podSandBoxStatus := &apitest.FakePodSandbox{
134 PodSandboxStatus: runtimeapi.PodSandboxStatus{
135 Id: podSandboxID,
136 Metadata: config.Metadata,
137 State: template.state,
138 CreatedAt: template.createdAt,
139 Network: &runtimeapi.PodSandboxNetworkStatus{
140 Ip: apitest.FakePodSandboxIPs[0],
141 },
142 Labels: config.Labels,
143 },
144 }
145
146 additionalIPs := apitest.FakePodSandboxIPs[1:]
147 additionalPodIPs := make([]*runtimeapi.PodIP, 0, len(additionalIPs))
148 for _, ip := range additionalIPs {
149 additionalPodIPs = append(additionalPodIPs, &runtimeapi.PodIP{
150 Ip: ip,
151 })
152 }
153 podSandBoxStatus.Network.AdditionalIps = additionalPodIPs
154 return podSandBoxStatus
155
156 }
157
158
159
160 func makeFakePodSandboxes(t *testing.T, m *kubeGenericRuntimeManager, templates []sandboxTemplate) []*apitest.FakePodSandbox {
161 var fakePodSandboxes []*apitest.FakePodSandbox
162 for _, template := range templates {
163 fakePodSandboxes = append(fakePodSandboxes, makeFakePodSandbox(t, m, template))
164 }
165 return fakePodSandboxes
166 }
167
168
169 func makeFakeContainer(t *testing.T, m *kubeGenericRuntimeManager, template containerTemplate) *apitest.FakeContainer {
170 ctx := context.Background()
171 sandboxConfig, err := m.generatePodSandboxConfig(template.pod, template.sandboxAttempt)
172 assert.NoError(t, err, "generatePodSandboxConfig for container template %+v", template)
173
174 containerConfig, _, err := m.generateContainerConfig(ctx, template.container, template.pod, template.attempt, "", template.container.Image, []string{}, nil)
175 assert.NoError(t, err, "generateContainerConfig for container template %+v", template)
176
177 podSandboxID := apitest.BuildSandboxName(sandboxConfig.Metadata)
178 containerID := apitest.BuildContainerName(containerConfig.Metadata, podSandboxID)
179 imageRef := containerConfig.Image.Image
180 return &apitest.FakeContainer{
181 ContainerStatus: runtimeapi.ContainerStatus{
182 Id: containerID,
183 Metadata: containerConfig.Metadata,
184 Image: containerConfig.Image,
185 ImageRef: imageRef,
186 CreatedAt: template.createdAt,
187 State: template.state,
188 Labels: containerConfig.Labels,
189 Annotations: containerConfig.Annotations,
190 LogPath: filepath.Join(sandboxConfig.GetLogDirectory(), containerConfig.GetLogPath()),
191 },
192 SandboxID: podSandboxID,
193 }
194 }
195
196
197
198 func makeFakeContainers(t *testing.T, m *kubeGenericRuntimeManager, templates []containerTemplate) []*apitest.FakeContainer {
199 var fakeContainers []*apitest.FakeContainer
200 for _, template := range templates {
201 fakeContainers = append(fakeContainers, makeFakeContainer(t, m, template))
202 }
203 return fakeContainers
204 }
205
206
207 func makeTestContainer(name, image string) v1.Container {
208 return v1.Container{
209 Name: name,
210 Image: image,
211 }
212 }
213
214
215 func makeTestPod(podName, podNamespace, podUID string, containers []v1.Container) *v1.Pod {
216 return &v1.Pod{
217 ObjectMeta: metav1.ObjectMeta{
218 UID: types.UID(podUID),
219 Name: podName,
220 Namespace: podNamespace,
221 },
222 Spec: v1.PodSpec{
223 Containers: containers,
224 },
225 }
226 }
227
228
229 func verifyPods(a, b []*kubecontainer.Pod) bool {
230 if len(a) != len(b) {
231 return false
232 }
233
234
235 for i := range a {
236 sort.Sort(containersByID(a[i].Containers))
237 }
238 for i := range b {
239 sort.Sort(containersByID(b[i].Containers))
240 }
241
242
243 sort.Sort(podsByID(a))
244 sort.Sort(podsByID(b))
245
246 return reflect.DeepEqual(a, b)
247 }
248
249 func verifyFakeContainerList(fakeRuntime *apitest.FakeRuntimeService, expected sets.String) (sets.String, bool) {
250 actual := sets.NewString()
251 for _, c := range fakeRuntime.Containers {
252 actual.Insert(c.Id)
253 }
254 return actual, actual.Equal(expected)
255 }
256
257
258 type cRecord struct {
259 name string
260 attempt uint32
261 state runtimeapi.ContainerState
262 }
263
264 type cRecordList []*cRecord
265
266 func (b cRecordList) Len() int { return len(b) }
267 func (b cRecordList) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
268 func (b cRecordList) Less(i, j int) bool {
269 if b[i].name != b[j].name {
270 return b[i].name < b[j].name
271 }
272 return b[i].attempt < b[j].attempt
273 }
274
275 func verifyContainerStatuses(t *testing.T, runtime *apitest.FakeRuntimeService, expected []*cRecord, desc string) {
276 actual := []*cRecord{}
277 for _, cStatus := range runtime.Containers {
278 actual = append(actual, &cRecord{name: cStatus.Metadata.Name, attempt: cStatus.Metadata.Attempt, state: cStatus.State})
279 }
280 sort.Sort(cRecordList(expected))
281 sort.Sort(cRecordList(actual))
282 assert.Equal(t, expected, actual, desc)
283 }
284
285 func TestNewKubeRuntimeManager(t *testing.T) {
286 _, _, _, err := createTestRuntimeManager()
287 assert.NoError(t, err)
288 }
289
290 func TestVersion(t *testing.T) {
291 ctx := context.Background()
292 _, _, m, err := createTestRuntimeManager()
293 assert.NoError(t, err)
294
295 version, err := m.Version(ctx)
296 assert.NoError(t, err)
297 assert.Equal(t, kubeRuntimeAPIVersion, version.String())
298 }
299
300 func TestContainerRuntimeType(t *testing.T) {
301 _, _, m, err := createTestRuntimeManager()
302 assert.NoError(t, err)
303
304 runtimeType := m.Type()
305 assert.Equal(t, apitest.FakeRuntimeName, runtimeType)
306 }
307
308 func TestGetPodStatus(t *testing.T) {
309 ctx := context.Background()
310 fakeRuntime, _, m, err := createTestRuntimeManager()
311 assert.NoError(t, err)
312
313 containers := []v1.Container{
314 {
315 Name: "foo1",
316 Image: "busybox",
317 ImagePullPolicy: v1.PullIfNotPresent,
318 },
319 {
320 Name: "foo2",
321 Image: "busybox",
322 ImagePullPolicy: v1.PullIfNotPresent,
323 },
324 }
325 pod := &v1.Pod{
326 ObjectMeta: metav1.ObjectMeta{
327 UID: "12345678",
328 Name: "foo",
329 Namespace: "new",
330 },
331 Spec: v1.PodSpec{
332 Containers: containers,
333 },
334 }
335
336
337 makeAndSetFakePod(t, m, fakeRuntime, pod)
338
339 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
340 assert.NoError(t, err)
341 assert.Equal(t, pod.UID, podStatus.ID)
342 assert.Equal(t, pod.Name, podStatus.Name)
343 assert.Equal(t, pod.Namespace, podStatus.Namespace)
344 assert.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs)
345 }
346
347 func TestStopContainerWithNotFoundError(t *testing.T) {
348 ctx := context.Background()
349 fakeRuntime, _, m, err := createTestRuntimeManager()
350 assert.NoError(t, err)
351
352 containers := []v1.Container{
353 {
354 Name: "foo1",
355 Image: "busybox",
356 ImagePullPolicy: v1.PullIfNotPresent,
357 },
358 {
359 Name: "foo2",
360 Image: "busybox",
361 ImagePullPolicy: v1.PullIfNotPresent,
362 },
363 }
364 pod := &v1.Pod{
365 ObjectMeta: metav1.ObjectMeta{
366 UID: "12345678",
367 Name: "foo",
368 Namespace: "new",
369 },
370 Spec: v1.PodSpec{
371 Containers: containers,
372 },
373 }
374
375
376 makeAndSetFakePod(t, m, fakeRuntime, pod)
377 fakeRuntime.InjectError("StopContainer", status.Error(codes.NotFound, "No such container"))
378 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
379 require.NoError(t, err)
380 p := kubecontainer.ConvertPodStatusToRunningPod("", podStatus)
381 gracePeriod := int64(1)
382 err = m.KillPod(ctx, pod, p, &gracePeriod)
383 require.NoError(t, err)
384 }
385
386 func TestGetPodStatusWithNotFoundError(t *testing.T) {
387 ctx := context.Background()
388 fakeRuntime, _, m, err := createTestRuntimeManager()
389 assert.NoError(t, err)
390
391 containers := []v1.Container{
392 {
393 Name: "foo1",
394 Image: "busybox",
395 ImagePullPolicy: v1.PullIfNotPresent,
396 },
397 {
398 Name: "foo2",
399 Image: "busybox",
400 ImagePullPolicy: v1.PullIfNotPresent,
401 },
402 }
403 pod := &v1.Pod{
404 ObjectMeta: metav1.ObjectMeta{
405 UID: "12345678",
406 Name: "foo",
407 Namespace: "new",
408 },
409 Spec: v1.PodSpec{
410 Containers: containers,
411 },
412 }
413
414
415 makeAndSetFakePod(t, m, fakeRuntime, pod)
416 fakeRuntime.InjectError("ContainerStatus", status.Error(codes.NotFound, "No such container"))
417 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
418 require.NoError(t, err)
419 require.Equal(t, pod.UID, podStatus.ID)
420 require.Equal(t, pod.Name, podStatus.Name)
421 require.Equal(t, pod.Namespace, podStatus.Namespace)
422 require.Equal(t, apitest.FakePodSandboxIPs, podStatus.IPs)
423 }
424
425 func TestGetPods(t *testing.T) {
426 ctx := context.Background()
427 fakeRuntime, _, m, err := createTestRuntimeManager()
428 assert.NoError(t, err)
429
430 pod := &v1.Pod{
431 ObjectMeta: metav1.ObjectMeta{
432 UID: "12345678",
433 Name: "foo",
434 Namespace: "new",
435 },
436 Spec: v1.PodSpec{
437 Containers: []v1.Container{
438 {
439 Name: "foo1",
440 Image: "busybox",
441 },
442 {
443 Name: "foo2",
444 Image: "busybox",
445 },
446 },
447 },
448 }
449
450
451 fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
452
453
454 containers := make([]*kubecontainer.Container, len(fakeContainers))
455 for i := range containers {
456 fakeContainer := fakeContainers[i]
457 c, err := m.toKubeContainer(&runtimeapi.Container{
458 Id: fakeContainer.Id,
459 Metadata: fakeContainer.Metadata,
460 State: fakeContainer.State,
461 Image: fakeContainer.Image,
462 ImageRef: fakeContainer.ImageRef,
463 Labels: fakeContainer.Labels,
464 Annotations: fakeContainer.Annotations,
465 })
466 if err != nil {
467 t.Fatalf("unexpected error %v", err)
468 }
469 containers[i] = c
470 }
471
472 sandbox, err := m.sandboxToKubeContainer(&runtimeapi.PodSandbox{
473 Id: fakeSandbox.Id,
474 Metadata: fakeSandbox.Metadata,
475 State: fakeSandbox.State,
476 CreatedAt: fakeSandbox.CreatedAt,
477 Labels: fakeSandbox.Labels,
478 Annotations: fakeSandbox.Annotations,
479 })
480 if err != nil {
481 t.Fatalf("unexpected error %v", err)
482 }
483
484 expected := []*kubecontainer.Pod{
485 {
486 ID: types.UID("12345678"),
487 Name: "foo",
488 Namespace: "new",
489 CreatedAt: uint64(fakeSandbox.CreatedAt),
490 Containers: []*kubecontainer.Container{containers[0], containers[1]},
491 Sandboxes: []*kubecontainer.Container{sandbox},
492 },
493 }
494
495 actual, err := m.GetPods(ctx, false)
496 assert.NoError(t, err)
497
498 if !verifyPods(expected, actual) {
499 t.Errorf("expected %#v, got %#v", expected, actual)
500 }
501 }
502
503 func TestGetPodsSorted(t *testing.T) {
504 ctx := context.Background()
505 fakeRuntime, _, m, err := createTestRuntimeManager()
506 assert.NoError(t, err)
507
508 pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "bar"}}
509
510 createdTimestamps := []uint64{10, 5, 20}
511 fakeSandboxes := []*apitest.FakePodSandbox{}
512 for i, createdAt := range createdTimestamps {
513 pod.UID = types.UID(fmt.Sprint(i))
514 fakeSandboxes = append(fakeSandboxes, makeFakePodSandbox(t, m, sandboxTemplate{
515 pod: pod,
516 createdAt: int64(createdAt),
517 state: runtimeapi.PodSandboxState_SANDBOX_READY,
518 }))
519 }
520 fakeRuntime.SetFakeSandboxes(fakeSandboxes)
521
522 actual, err := m.GetPods(ctx, false)
523 assert.NoError(t, err)
524
525 assert.Len(t, actual, 3)
526
527
528 assert.Equal(t, uint64(createdTimestamps[2]), actual[0].CreatedAt)
529 assert.Equal(t, uint64(createdTimestamps[0]), actual[1].CreatedAt)
530 assert.Equal(t, uint64(createdTimestamps[1]), actual[2].CreatedAt)
531 }
532
533 func TestKillPod(t *testing.T) {
534 ctx := context.Background()
535 fakeRuntime, _, m, err := createTestRuntimeManager()
536 assert.NoError(t, err)
537
538 pod := &v1.Pod{
539 ObjectMeta: metav1.ObjectMeta{
540 UID: "12345678",
541 Name: "foo",
542 Namespace: "new",
543 },
544 Spec: v1.PodSpec{
545 Containers: []v1.Container{
546 {
547 Name: "foo1",
548 Image: "busybox",
549 },
550 {
551 Name: "foo2",
552 Image: "busybox",
553 },
554 },
555 EphemeralContainers: []v1.EphemeralContainer{
556 {
557 EphemeralContainerCommon: v1.EphemeralContainerCommon{
558 Name: "debug",
559 Image: "busybox",
560 },
561 },
562 },
563 },
564 }
565
566
567 fakeSandbox, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
568
569
570 containers := make([]*kubecontainer.Container, len(fakeContainers))
571 for i := range containers {
572 fakeContainer := fakeContainers[i]
573 c, err := m.toKubeContainer(&runtimeapi.Container{
574 Id: fakeContainer.Id,
575 Metadata: fakeContainer.Metadata,
576 State: fakeContainer.State,
577 Image: fakeContainer.Image,
578 ImageRef: fakeContainer.ImageRef,
579 Labels: fakeContainer.Labels,
580 })
581 if err != nil {
582 t.Fatalf("unexpected error %v", err)
583 }
584 containers[i] = c
585 }
586 runningPod := kubecontainer.Pod{
587 ID: pod.UID,
588 Name: pod.Name,
589 Namespace: pod.Namespace,
590 Containers: []*kubecontainer.Container{containers[0], containers[1], containers[2]},
591 Sandboxes: []*kubecontainer.Container{
592 {
593 ID: kubecontainer.ContainerID{
594 ID: fakeSandbox.Id,
595 Type: apitest.FakeRuntimeName,
596 },
597 },
598 },
599 }
600
601 err = m.KillPod(ctx, pod, runningPod, nil)
602 assert.NoError(t, err)
603 assert.Equal(t, 3, len(fakeRuntime.Containers))
604 assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
605 for _, sandbox := range fakeRuntime.Sandboxes {
606 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
607 }
608 for _, c := range fakeRuntime.Containers {
609 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_EXITED, c.State)
610 }
611 }
612
613 func TestSyncPod(t *testing.T) {
614 fakeRuntime, fakeImage, m, err := createTestRuntimeManager()
615 assert.NoError(t, err)
616
617 containers := []v1.Container{
618 {
619 Name: "foo1",
620 Image: "busybox",
621 ImagePullPolicy: v1.PullIfNotPresent,
622 },
623 {
624 Name: "foo2",
625 Image: "alpine",
626 ImagePullPolicy: v1.PullIfNotPresent,
627 },
628 }
629 pod := &v1.Pod{
630 ObjectMeta: metav1.ObjectMeta{
631 UID: "12345678",
632 Name: "foo",
633 Namespace: "new",
634 },
635 Spec: v1.PodSpec{
636 Containers: containers,
637 },
638 }
639
640 backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
641 result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
642 assert.NoError(t, result.Error())
643 assert.Equal(t, 2, len(fakeRuntime.Containers))
644 assert.Equal(t, 2, len(fakeImage.Images))
645 assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
646 for _, sandbox := range fakeRuntime.Sandboxes {
647 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
648 }
649 for _, c := range fakeRuntime.Containers {
650 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
651 }
652 }
653
654 func TestSyncPodWithConvertedPodSysctls(t *testing.T) {
655 fakeRuntime, _, m, err := createTestRuntimeManager()
656 assert.NoError(t, err)
657
658 containers := []v1.Container{
659 {
660 Name: "foo",
661 Image: "busybox",
662 ImagePullPolicy: v1.PullIfNotPresent,
663 },
664 }
665
666 securityContext := &v1.PodSecurityContext{
667 Sysctls: []v1.Sysctl{
668 {
669 Name: "kernel/shm_rmid_forced",
670 Value: "1",
671 },
672 {
673 Name: "net/ipv4/ip_local_port_range",
674 Value: "1024 65535",
675 },
676 },
677 }
678 exceptSysctls := []v1.Sysctl{
679 {
680 Name: "kernel.shm_rmid_forced",
681 Value: "1",
682 },
683 {
684 Name: "net.ipv4.ip_local_port_range",
685 Value: "1024 65535",
686 },
687 }
688 pod := &v1.Pod{
689 ObjectMeta: metav1.ObjectMeta{
690 UID: "12345678",
691 Name: "foo",
692 Namespace: "new",
693 },
694 Spec: v1.PodSpec{
695 Containers: containers,
696 SecurityContext: securityContext,
697 },
698 }
699
700 backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
701 result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
702 assert.NoError(t, result.Error())
703 assert.Equal(t, exceptSysctls, pod.Spec.SecurityContext.Sysctls)
704 for _, sandbox := range fakeRuntime.Sandboxes {
705 assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
706 }
707 for _, c := range fakeRuntime.Containers {
708 assert.Equal(t, runtimeapi.ContainerState_CONTAINER_RUNNING, c.State)
709 }
710 }
711
712 func TestPruneInitContainers(t *testing.T) {
713 ctx := context.Background()
714 fakeRuntime, _, m, err := createTestRuntimeManager()
715 assert.NoError(t, err)
716
717 init1 := makeTestContainer("init1", "busybox")
718 init2 := makeTestContainer("init2", "busybox")
719 pod := &v1.Pod{
720 ObjectMeta: metav1.ObjectMeta{
721 UID: "12345678",
722 Name: "foo",
723 Namespace: "new",
724 },
725 Spec: v1.PodSpec{
726 InitContainers: []v1.Container{init1, init2},
727 },
728 }
729
730 templates := []containerTemplate{
731 {pod: pod, container: &init1, attempt: 3, createdAt: 3, state: runtimeapi.ContainerState_CONTAINER_EXITED},
732 {pod: pod, container: &init1, attempt: 2, createdAt: 2, state: runtimeapi.ContainerState_CONTAINER_EXITED},
733 {pod: pod, container: &init2, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_EXITED},
734 {pod: pod, container: &init1, attempt: 1, createdAt: 1, state: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
735 {pod: pod, container: &init2, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
736 {pod: pod, container: &init1, attempt: 0, createdAt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
737 }
738 fakes := makeFakeContainers(t, m, templates)
739 fakeRuntime.SetFakeContainers(fakes)
740 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
741 assert.NoError(t, err)
742
743 m.pruneInitContainersBeforeStart(ctx, pod, podStatus)
744 expectedContainers := sets.NewString(fakes[0].Id, fakes[2].Id)
745 if actual, ok := verifyFakeContainerList(fakeRuntime, expectedContainers); !ok {
746 t.Errorf("expected %v, got %v", expectedContainers, actual)
747 }
748 }
749
750 func TestSyncPodWithInitContainers(t *testing.T) {
751 ctx := context.Background()
752 fakeRuntime, _, m, err := createTestRuntimeManager()
753 assert.NoError(t, err)
754
755 initContainers := []v1.Container{
756 {
757 Name: "init1",
758 Image: "init",
759 ImagePullPolicy: v1.PullIfNotPresent,
760 },
761 }
762 containers := []v1.Container{
763 {
764 Name: "foo1",
765 Image: "busybox",
766 ImagePullPolicy: v1.PullIfNotPresent,
767 },
768 {
769 Name: "foo2",
770 Image: "alpine",
771 ImagePullPolicy: v1.PullIfNotPresent,
772 },
773 }
774 pod := &v1.Pod{
775 ObjectMeta: metav1.ObjectMeta{
776 UID: "12345678",
777 Name: "foo",
778 Namespace: "new",
779 },
780 Spec: v1.PodSpec{
781 Containers: containers,
782 InitContainers: initContainers,
783 },
784 }
785
786 backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
787
788
789 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
790 assert.NoError(t, err)
791 result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
792 assert.NoError(t, result.Error())
793 expected := []*cRecord{
794 {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
795 }
796 verifyContainerStatuses(t, fakeRuntime, expected, "start only the init container")
797
798
799 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
800 assert.NoError(t, err)
801 result = m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
802 assert.NoError(t, result.Error())
803 verifyContainerStatuses(t, fakeRuntime, expected, "init container still running; do nothing")
804
805
806
807 sandboxIDs, err := m.getSandboxIDByPodUID(ctx, pod.UID, nil)
808 require.NoError(t, err)
809 sandboxID := sandboxIDs[0]
810 initID0, err := fakeRuntime.GetContainerID(sandboxID, initContainers[0].Name, 0)
811 require.NoError(t, err)
812 fakeRuntime.StopContainer(ctx, initID0, 0)
813
814 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
815 assert.NoError(t, err)
816 result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
817 assert.NoError(t, result.Error())
818 expected = []*cRecord{
819 {name: initContainers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
820 {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
821 {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
822 }
823 verifyContainerStatuses(t, fakeRuntime, expected, "init container completed; all app containers should be running")
824
825
826
827 fakeRuntime.StopPodSandbox(ctx, sandboxID)
828
829 podStatus, err = m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
830 assert.NoError(t, err)
831 result = m.SyncPod(ctx, pod, podStatus, []v1.Secret{}, backOff)
832 assert.NoError(t, result.Error())
833 expected = []*cRecord{
834
835
836 {name: initContainers[0].Name, attempt: 1, state: runtimeapi.ContainerState_CONTAINER_RUNNING},
837
838 {name: containers[0].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
839 {name: containers[1].Name, attempt: 0, state: runtimeapi.ContainerState_CONTAINER_EXITED},
840 }
841 verifyContainerStatuses(t, fakeRuntime, expected, "kill all app containers, purge the existing init container, and restart a new one")
842 }
843
844
845
846 func makeBasePodAndStatus() (*v1.Pod, *kubecontainer.PodStatus) {
847 pod := &v1.Pod{
848 ObjectMeta: metav1.ObjectMeta{
849 UID: "12345678",
850 Name: "foo",
851 Namespace: "foo-ns",
852 },
853 Spec: v1.PodSpec{
854 Containers: []v1.Container{
855 {
856 Name: "foo1",
857 Image: "busybox",
858 },
859 {
860 Name: "foo2",
861 Image: "busybox",
862 },
863 {
864 Name: "foo3",
865 Image: "busybox",
866 },
867 },
868 },
869 Status: v1.PodStatus{
870 ContainerStatuses: []v1.ContainerStatus{
871 {
872 ContainerID: "://id1",
873 Name: "foo1",
874 Image: "busybox",
875 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
876 },
877 {
878 ContainerID: "://id2",
879 Name: "foo2",
880 Image: "busybox",
881 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
882 },
883 {
884 ContainerID: "://id3",
885 Name: "foo3",
886 Image: "busybox",
887 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
888 },
889 },
890 },
891 }
892 status := &kubecontainer.PodStatus{
893 ID: pod.UID,
894 Name: pod.Name,
895 Namespace: pod.Namespace,
896 SandboxStatuses: []*runtimeapi.PodSandboxStatus{
897 {
898 Id: "sandboxID",
899 State: runtimeapi.PodSandboxState_SANDBOX_READY,
900 Metadata: &runtimeapi.PodSandboxMetadata{Name: pod.Name, Namespace: pod.Namespace, Uid: "sandboxuid", Attempt: uint32(0)},
901 Network: &runtimeapi.PodSandboxNetworkStatus{Ip: "10.0.0.1"},
902 },
903 },
904 ContainerStatuses: []*kubecontainer.Status{
905 {
906 ID: kubecontainer.ContainerID{ID: "id1"},
907 Name: "foo1", State: kubecontainer.ContainerStateRunning,
908 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[0]),
909 },
910 {
911 ID: kubecontainer.ContainerID{ID: "id2"},
912 Name: "foo2", State: kubecontainer.ContainerStateRunning,
913 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[1]),
914 },
915 {
916 ID: kubecontainer.ContainerID{ID: "id3"},
917 Name: "foo3", State: kubecontainer.ContainerStateRunning,
918 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[2]),
919 },
920 },
921 }
922 return pod, status
923 }
924
925 func TestComputePodActions(t *testing.T) {
926 _, _, m, err := createTestRuntimeManager()
927 require.NoError(t, err)
928
929
930
931 basePod, baseStatus := makeBasePodAndStatus()
932 noAction := podActions{
933 SandboxID: baseStatus.SandboxStatuses[0].Id,
934 ContainersToStart: []int{},
935 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
936 }
937
938 for desc, test := range map[string]struct {
939 mutatePodFn func(*v1.Pod)
940 mutateStatusFn func(*kubecontainer.PodStatus)
941 actions podActions
942 resetStatusFn func(*kubecontainer.PodStatus)
943 }{
944 "everything is good; do nothing": {
945 actions: noAction,
946 },
947 "start pod sandbox and all containers for a new pod": {
948 mutateStatusFn: func(status *kubecontainer.PodStatus) {
949
950 status.SandboxStatuses = []*runtimeapi.PodSandboxStatus{}
951 status.ContainerStatuses = []*kubecontainer.Status{}
952 },
953 actions: podActions{
954 KillPod: true,
955 CreateSandbox: true,
956 Attempt: uint32(0),
957 ContainersToStart: []int{0, 1, 2},
958 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
959 },
960 },
961 "restart exited containers if RestartPolicy == Always": {
962 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
963 mutateStatusFn: func(status *kubecontainer.PodStatus) {
964
965 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
966 status.ContainerStatuses[0].ExitCode = 0
967
968
969 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
970 status.ContainerStatuses[1].ExitCode = 111
971 },
972 actions: podActions{
973 SandboxID: baseStatus.SandboxStatuses[0].Id,
974 ContainersToStart: []int{0, 1},
975 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
976 },
977 },
978 "restart failed containers if RestartPolicy == OnFailure": {
979 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
980 mutateStatusFn: func(status *kubecontainer.PodStatus) {
981
982 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
983 status.ContainerStatuses[0].ExitCode = 0
984
985
986 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
987 status.ContainerStatuses[1].ExitCode = 111
988 },
989 actions: podActions{
990 SandboxID: baseStatus.SandboxStatuses[0].Id,
991 ContainersToStart: []int{1},
992 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
993 },
994 },
995 "don't restart containers if RestartPolicy == Never": {
996 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
997 mutateStatusFn: func(status *kubecontainer.PodStatus) {
998
999 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
1000 status.ContainerStatuses[0].ExitCode = 0
1001 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
1002 status.ContainerStatuses[1].ExitCode = 111
1003 },
1004 actions: noAction,
1005 },
1006 "Kill pod and recreate everything if the pod sandbox is dead, and RestartPolicy == Always": {
1007 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1008 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1009 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1010 },
1011 actions: podActions{
1012 KillPod: true,
1013 CreateSandbox: true,
1014 SandboxID: baseStatus.SandboxStatuses[0].Id,
1015 Attempt: uint32(1),
1016 ContainersToStart: []int{0, 1, 2},
1017 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
1018 },
1019 },
1020 "Kill pod and recreate all containers (except for the succeeded one) if the pod sandbox is dead, and RestartPolicy == OnFailure": {
1021 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1022 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1023 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1024 status.ContainerStatuses[1].State = kubecontainer.ContainerStateExited
1025 status.ContainerStatuses[1].ExitCode = 0
1026 },
1027 actions: podActions{
1028 KillPod: true,
1029 CreateSandbox: true,
1030 SandboxID: baseStatus.SandboxStatuses[0].Id,
1031 Attempt: uint32(1),
1032 ContainersToStart: []int{0, 2},
1033 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
1034 },
1035 },
1036 "Kill pod and recreate all containers if the PodSandbox does not have an IP": {
1037 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1038 status.SandboxStatuses[0].Network.Ip = ""
1039 },
1040 actions: podActions{
1041 KillPod: true,
1042 CreateSandbox: true,
1043 SandboxID: baseStatus.SandboxStatuses[0].Id,
1044 Attempt: uint32(1),
1045 ContainersToStart: []int{0, 1, 2},
1046 ContainersToKill: getKillMap(basePod, baseStatus, []int{}),
1047 },
1048 },
1049 "Kill and recreate the container if the container's spec changed": {
1050 mutatePodFn: func(pod *v1.Pod) {
1051 pod.Spec.RestartPolicy = v1.RestartPolicyAlways
1052 },
1053 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1054 status.ContainerStatuses[1].Hash = uint64(432423432)
1055 },
1056 actions: podActions{
1057 SandboxID: baseStatus.SandboxStatuses[0].Id,
1058 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
1059 ContainersToStart: []int{1},
1060 },
1061 },
1062 "Kill and recreate the container if the liveness check has failed": {
1063 mutatePodFn: func(pod *v1.Pod) {
1064 pod.Spec.RestartPolicy = v1.RestartPolicyAlways
1065 },
1066 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1067 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod)
1068 },
1069 actions: podActions{
1070 SandboxID: baseStatus.SandboxStatuses[0].Id,
1071 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
1072 ContainersToStart: []int{1},
1073 },
1074 resetStatusFn: func(status *kubecontainer.PodStatus) {
1075 m.livenessManager.Remove(status.ContainerStatuses[1].ID)
1076 },
1077 },
1078 "Kill and recreate the container if the startup check has failed": {
1079 mutatePodFn: func(pod *v1.Pod) {
1080 pod.Spec.RestartPolicy = v1.RestartPolicyAlways
1081 },
1082 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1083 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Failure, basePod)
1084 },
1085 actions: podActions{
1086 SandboxID: baseStatus.SandboxStatuses[0].Id,
1087 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
1088 ContainersToStart: []int{1},
1089 },
1090 resetStatusFn: func(status *kubecontainer.PodStatus) {
1091 m.startupManager.Remove(status.ContainerStatuses[1].ID)
1092 },
1093 },
1094 "Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and all containers exited": {
1095 mutatePodFn: func(pod *v1.Pod) {
1096 pod.Spec.RestartPolicy = v1.RestartPolicyNever
1097 },
1098 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1099
1100 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1101 status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
1102
1103 for i := range status.ContainerStatuses {
1104 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
1105 status.ContainerStatuses[i].ExitCode = 0
1106 }
1107 },
1108 actions: podActions{
1109 SandboxID: baseStatus.SandboxStatuses[0].Id,
1110 Attempt: uint32(2),
1111 CreateSandbox: false,
1112 KillPod: true,
1113 ContainersToStart: []int{},
1114 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1115 },
1116 },
1117 "Verify we do not create a pod sandbox if no ready sandbox for pod with RestartPolicy=OnFailure and all containers succeeded": {
1118 mutatePodFn: func(pod *v1.Pod) {
1119 pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
1120 },
1121 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1122
1123 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1124 status.SandboxStatuses[0].Metadata.Attempt = uint32(1)
1125
1126 for i := range status.ContainerStatuses {
1127 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
1128 status.ContainerStatuses[i].ExitCode = 0
1129 }
1130 },
1131 actions: podActions{
1132 SandboxID: baseStatus.SandboxStatuses[0].Id,
1133 Attempt: uint32(2),
1134 CreateSandbox: false,
1135 KillPod: true,
1136 ContainersToStart: []int{},
1137 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1138 },
1139 },
1140 "Verify we create a pod sandbox if no ready sandbox for pod with RestartPolicy=Never and no containers have ever been created": {
1141 mutatePodFn: func(pod *v1.Pod) {
1142 pod.Spec.RestartPolicy = v1.RestartPolicyNever
1143 },
1144 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1145
1146 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1147 status.SandboxStatuses[0].Metadata.Attempt = uint32(2)
1148
1149 status.ContainerStatuses = []*kubecontainer.Status{}
1150 },
1151 actions: podActions{
1152 SandboxID: baseStatus.SandboxStatuses[0].Id,
1153 Attempt: uint32(3),
1154 CreateSandbox: true,
1155 KillPod: true,
1156 ContainersToStart: []int{0, 1, 2},
1157 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1158 },
1159 },
1160 "Kill and recreate the container if the container is in unknown state": {
1161 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1162 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1163 status.ContainerStatuses[1].State = kubecontainer.ContainerStateUnknown
1164 },
1165 actions: podActions{
1166 SandboxID: baseStatus.SandboxStatuses[0].Id,
1167 ContainersToKill: getKillMap(basePod, baseStatus, []int{1}),
1168 ContainersToStart: []int{1},
1169 },
1170 },
1171 } {
1172 pod, status := makeBasePodAndStatus()
1173 if test.mutatePodFn != nil {
1174 test.mutatePodFn(pod)
1175 }
1176 if test.mutateStatusFn != nil {
1177 test.mutateStatusFn(status)
1178 }
1179 ctx := context.Background()
1180 actions := m.computePodActions(ctx, pod, status)
1181 verifyActions(t, &test.actions, &actions, desc)
1182 if test.resetStatusFn != nil {
1183 test.resetStatusFn(status)
1184 }
1185 }
1186 }
1187
1188 func getKillMap(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
1189 m := map[kubecontainer.ContainerID]containerToKillInfo{}
1190 for _, i := range cIndexes {
1191 m[status.ContainerStatuses[i].ID] = containerToKillInfo{
1192 container: &pod.Spec.Containers[i],
1193 name: pod.Spec.Containers[i].Name,
1194 }
1195 }
1196 return m
1197 }
1198
1199 func getKillMapWithInitContainers(pod *v1.Pod, status *kubecontainer.PodStatus, cIndexes []int) map[kubecontainer.ContainerID]containerToKillInfo {
1200 m := map[kubecontainer.ContainerID]containerToKillInfo{}
1201 for _, i := range cIndexes {
1202 m[status.ContainerStatuses[i].ID] = containerToKillInfo{
1203 container: &pod.Spec.InitContainers[i],
1204 name: pod.Spec.InitContainers[i].Name,
1205 }
1206 }
1207 return m
1208 }
1209
1210 func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
1211 if actual.ContainersToKill != nil {
1212
1213 for k, info := range actual.ContainersToKill {
1214 info.message = ""
1215 info.reason = ""
1216 actual.ContainersToKill[k] = info
1217 }
1218 }
1219 assert.Equal(t, expected, actual, desc)
1220 }
1221
1222 func TestComputePodActionsWithInitContainers(t *testing.T) {
1223 t.Run("sidecar containers disabled", func(t *testing.T) {
1224 testComputePodActionsWithInitContainers(t, false)
1225 })
1226 t.Run("sidecar containers enabled", func(t *testing.T) {
1227 testComputePodActionsWithInitContainers(t, true)
1228 })
1229 }
1230
1231 func testComputePodActionsWithInitContainers(t *testing.T, sidecarContainersEnabled bool) {
1232 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)()
1233 _, _, m, err := createTestRuntimeManager()
1234 require.NoError(t, err)
1235
1236
1237
1238 basePod, baseStatus := makeBasePodAndStatusWithInitContainers()
1239 noAction := podActions{
1240 SandboxID: baseStatus.SandboxStatuses[0].Id,
1241 ContainersToStart: []int{},
1242 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1243 }
1244
1245 for desc, test := range map[string]struct {
1246 mutatePodFn func(*v1.Pod)
1247 mutateStatusFn func(*kubecontainer.PodStatus)
1248 actions podActions
1249 }{
1250 "initialization completed; start all containers": {
1251 actions: podActions{
1252 SandboxID: baseStatus.SandboxStatuses[0].Id,
1253 ContainersToStart: []int{0, 1, 2},
1254 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1255 },
1256 },
1257 "no init containers have been started; start the first one": {
1258 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1259 status.ContainerStatuses = nil
1260 },
1261 actions: podActions{
1262 SandboxID: baseStatus.SandboxStatuses[0].Id,
1263 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
1264 InitContainersToStart: []int{0},
1265 ContainersToStart: []int{},
1266 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1267 },
1268 },
1269 "initialization in progress; do nothing": {
1270 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1271 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1272 status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
1273 },
1274 actions: noAction,
1275 },
1276 "Kill pod and restart the first init container if the pod sandbox is dead": {
1277 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1278 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1279 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1280 },
1281 actions: podActions{
1282 KillPod: true,
1283 CreateSandbox: true,
1284 SandboxID: baseStatus.SandboxStatuses[0].Id,
1285 Attempt: uint32(1),
1286 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
1287 InitContainersToStart: []int{0},
1288 ContainersToStart: []int{},
1289 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1290 },
1291 },
1292 "initialization failed; restart the last init container if RestartPolicy == Always": {
1293 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1294 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1295 status.ContainerStatuses[2].ExitCode = 137
1296 },
1297 actions: podActions{
1298 SandboxID: baseStatus.SandboxStatuses[0].Id,
1299 NextInitContainerToStart: &basePod.Spec.InitContainers[2],
1300 InitContainersToStart: []int{2},
1301 ContainersToStart: []int{},
1302 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1303 },
1304 },
1305 "initialization failed; restart the last init container if RestartPolicy == OnFailure": {
1306 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1307 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1308 status.ContainerStatuses[2].ExitCode = 137
1309 },
1310 actions: podActions{
1311 SandboxID: baseStatus.SandboxStatuses[0].Id,
1312 NextInitContainerToStart: &basePod.Spec.InitContainers[2],
1313 InitContainersToStart: []int{2},
1314 ContainersToStart: []int{},
1315 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1316 },
1317 },
1318 "initialization failed; kill pod if RestartPolicy == Never": {
1319 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1320 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1321 status.ContainerStatuses[2].ExitCode = 137
1322 },
1323 actions: podActions{
1324 KillPod: true,
1325 SandboxID: baseStatus.SandboxStatuses[0].Id,
1326 ContainersToStart: []int{},
1327 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1328 },
1329 },
1330 "init container state unknown; kill and recreate the last init container if RestartPolicy == Always": {
1331 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1332 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1333 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
1334 },
1335 actions: podActions{
1336 SandboxID: baseStatus.SandboxStatuses[0].Id,
1337 NextInitContainerToStart: &basePod.Spec.InitContainers[2],
1338 InitContainersToStart: []int{2},
1339 ContainersToStart: []int{},
1340 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
1341 },
1342 },
1343 "init container state unknown; kill and recreate the last init container if RestartPolicy == OnFailure": {
1344 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1345 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1346 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
1347 },
1348 actions: podActions{
1349 SandboxID: baseStatus.SandboxStatuses[0].Id,
1350 NextInitContainerToStart: &basePod.Spec.InitContainers[2],
1351 InitContainersToStart: []int{2},
1352 ContainersToStart: []int{},
1353 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
1354 },
1355 },
1356 "init container state unknown; kill pod if RestartPolicy == Never": {
1357 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1358 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1359 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
1360 },
1361 actions: podActions{
1362 KillPod: true,
1363 SandboxID: baseStatus.SandboxStatuses[0].Id,
1364 ContainersToStart: []int{},
1365 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1366 },
1367 },
1368 "Pod sandbox not ready, init container failed, but RestartPolicy == Never; kill pod only": {
1369 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1370 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1371 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1372 },
1373 actions: podActions{
1374 KillPod: true,
1375 CreateSandbox: false,
1376 SandboxID: baseStatus.SandboxStatuses[0].Id,
1377 Attempt: uint32(1),
1378 ContainersToStart: []int{},
1379 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1380 },
1381 },
1382 "Pod sandbox not ready, and RestartPolicy == Never, but no visible init containers; create a new pod sandbox": {
1383 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1384 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1385 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1386 status.ContainerStatuses = []*kubecontainer.Status{}
1387 },
1388 actions: podActions{
1389 KillPod: true,
1390 CreateSandbox: true,
1391 SandboxID: baseStatus.SandboxStatuses[0].Id,
1392 Attempt: uint32(1),
1393 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
1394 InitContainersToStart: []int{0},
1395 ContainersToStart: []int{},
1396 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1397 },
1398 },
1399 "Pod sandbox not ready, init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": {
1400 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1401 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1402 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1403 status.ContainerStatuses[2].ExitCode = 137
1404 },
1405 actions: podActions{
1406 KillPod: true,
1407 CreateSandbox: true,
1408 SandboxID: baseStatus.SandboxStatuses[0].Id,
1409 Attempt: uint32(1),
1410 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
1411 InitContainersToStart: []int{0},
1412 ContainersToStart: []int{},
1413 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1414 },
1415 },
1416 "some of the init container statuses are missing but the last init container is running, don't restart preceding ones": {
1417 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1418 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1419 status.ContainerStatuses[2].State = kubecontainer.ContainerStateRunning
1420 status.ContainerStatuses = status.ContainerStatuses[2:]
1421 },
1422 actions: podActions{
1423 KillPod: false,
1424 SandboxID: baseStatus.SandboxStatuses[0].Id,
1425 ContainersToStart: []int{},
1426 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1427 },
1428 },
1429 } {
1430 pod, status := makeBasePodAndStatusWithInitContainers()
1431 if test.mutatePodFn != nil {
1432 test.mutatePodFn(pod)
1433 }
1434 if test.mutateStatusFn != nil {
1435 test.mutateStatusFn(status)
1436 }
1437 ctx := context.Background()
1438 actions := m.computePodActions(ctx, pod, status)
1439 if !sidecarContainersEnabled {
1440
1441
1442 test.actions.InitContainersToStart = nil
1443 } else {
1444
1445
1446 test.actions.NextInitContainerToStart = nil
1447 }
1448 verifyActions(t, &test.actions, &actions, desc)
1449 }
1450 }
1451
1452 func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
1453 pod, status := makeBasePodAndStatus()
1454 pod.Spec.InitContainers = []v1.Container{
1455 {
1456 Name: "init1",
1457 Image: "bar-image",
1458 },
1459 {
1460 Name: "init2",
1461 Image: "bar-image",
1462 },
1463 {
1464 Name: "init3",
1465 Image: "bar-image",
1466 },
1467 }
1468
1469
1470 status.ContainerStatuses = []*kubecontainer.Status{
1471 {
1472 ID: kubecontainer.ContainerID{ID: "initid1"},
1473 Name: "init1", State: kubecontainer.ContainerStateExited,
1474 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1475 },
1476 {
1477 ID: kubecontainer.ContainerID{ID: "initid2"},
1478 Name: "init2", State: kubecontainer.ContainerStateExited,
1479 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1480 },
1481 {
1482 ID: kubecontainer.ContainerID{ID: "initid3"},
1483 Name: "init3", State: kubecontainer.ContainerStateExited,
1484 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1485 },
1486 }
1487 return pod, status
1488 }
1489
1490 func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
1491 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)()
1492 _, _, m, err := createTestRuntimeManager()
1493 require.NoError(t, err)
1494
1495
1496
1497 basePod, baseStatus := makeBasePodAndStatusWithRestartableInitContainers()
1498 noAction := podActions{
1499 SandboxID: baseStatus.SandboxStatuses[0].Id,
1500 ContainersToStart: []int{},
1501 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1502 }
1503
1504 for desc, test := range map[string]struct {
1505 mutatePodFn func(*v1.Pod)
1506 mutateStatusFn func(*v1.Pod, *kubecontainer.PodStatus)
1507 actions podActions
1508 resetStatusFn func(*kubecontainer.PodStatus)
1509 }{
1510 "initialization completed; start all containers": {
1511 actions: podActions{
1512 SandboxID: baseStatus.SandboxStatuses[0].Id,
1513 ContainersToStart: []int{0, 1, 2},
1514 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1515 },
1516 },
1517 "no init containers have been started; start the first one": {
1518 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1519 status.ContainerStatuses = nil
1520 },
1521 actions: podActions{
1522 SandboxID: baseStatus.SandboxStatuses[0].Id,
1523 InitContainersToStart: []int{0},
1524 ContainersToStart: []int{},
1525 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1526 },
1527 },
1528 "initialization in progress; do nothing": {
1529 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1530 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1531 status.ContainerStatuses[2].State = kubecontainer.ContainerStateCreated
1532 },
1533 actions: noAction,
1534 },
1535 "restartable init container has started; start the next": {
1536 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1537 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1538 status.ContainerStatuses = status.ContainerStatuses[:1]
1539 },
1540 actions: podActions{
1541 SandboxID: baseStatus.SandboxStatuses[0].Id,
1542 InitContainersToStart: []int{1},
1543 ContainersToStart: []int{},
1544 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1545 },
1546 },
1547 "livenessProbe has not been run; start the nothing": {
1548 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1549 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1550 m.livenessManager.Remove(status.ContainerStatuses[1].ID)
1551 status.ContainerStatuses = status.ContainerStatuses[:2]
1552 },
1553 actions: podActions{
1554 SandboxID: baseStatus.SandboxStatuses[0].Id,
1555 InitContainersToStart: []int{2},
1556 ContainersToStart: []int{},
1557 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1558 },
1559 },
1560 "livenessProbe in progress; start the next": {
1561 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1562 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1563 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod)
1564 status.ContainerStatuses = status.ContainerStatuses[:2]
1565 },
1566 actions: podActions{
1567 SandboxID: baseStatus.SandboxStatuses[0].Id,
1568 InitContainersToStart: []int{2},
1569 ContainersToStart: []int{},
1570 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1571 },
1572 resetStatusFn: func(status *kubecontainer.PodStatus) {
1573 m.livenessManager.Remove(status.ContainerStatuses[1].ID)
1574 },
1575 },
1576 "livenessProbe has completed; start the next": {
1577 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1578 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1579 status.ContainerStatuses = status.ContainerStatuses[:2]
1580 },
1581 actions: podActions{
1582 SandboxID: baseStatus.SandboxStatuses[0].Id,
1583 InitContainersToStart: []int{2},
1584 ContainersToStart: []int{},
1585 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1586 },
1587 },
1588 "kill and recreate the restartable init container if the liveness check has failed": {
1589 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1590 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1591 m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod)
1592 },
1593 actions: podActions{
1594 SandboxID: baseStatus.SandboxStatuses[0].Id,
1595 InitContainersToStart: []int{2},
1596 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
1597 ContainersToStart: []int{0, 1, 2},
1598 },
1599 resetStatusFn: func(status *kubecontainer.PodStatus) {
1600 m.livenessManager.Remove(status.ContainerStatuses[2].ID)
1601 },
1602 },
1603 "startupProbe has not been run; do nothing": {
1604 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1605 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1606 m.startupManager.Remove(status.ContainerStatuses[1].ID)
1607 status.ContainerStatuses = status.ContainerStatuses[:2]
1608 },
1609 actions: noAction,
1610 },
1611 "startupProbe in progress; do nothing": {
1612 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1613 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1614 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod)
1615 status.ContainerStatuses = status.ContainerStatuses[:2]
1616 },
1617 actions: noAction,
1618 resetStatusFn: func(status *kubecontainer.PodStatus) {
1619 m.startupManager.Remove(status.ContainerStatuses[1].ID)
1620 },
1621 },
1622 "startupProbe has completed; start the next": {
1623 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1624 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1625 status.ContainerStatuses = status.ContainerStatuses[:2]
1626 },
1627 actions: podActions{
1628 SandboxID: baseStatus.SandboxStatuses[0].Id,
1629 InitContainersToStart: []int{2},
1630 ContainersToStart: []int{},
1631 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1632 },
1633 },
1634 "kill and recreate the restartable init container if the startup check has failed": {
1635 mutatePodFn: func(pod *v1.Pod) {
1636 pod.Spec.RestartPolicy = v1.RestartPolicyAlways
1637 pod.Spec.InitContainers[2].StartupProbe = &v1.Probe{}
1638 },
1639 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1640 m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod)
1641 },
1642 actions: podActions{
1643 SandboxID: baseStatus.SandboxStatuses[0].Id,
1644 InitContainersToStart: []int{2},
1645 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
1646 ContainersToStart: []int{},
1647 },
1648 resetStatusFn: func(status *kubecontainer.PodStatus) {
1649 m.startupManager.Remove(status.ContainerStatuses[2].ID)
1650 },
1651 },
1652 "restart terminated restartable init container and next init container": {
1653 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1654 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1655 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
1656 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1657 },
1658 actions: podActions{
1659 SandboxID: baseStatus.SandboxStatuses[0].Id,
1660 InitContainersToStart: []int{0, 2},
1661 ContainersToStart: []int{},
1662 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1663 },
1664 },
1665 "restart terminated restartable init container and regular containers": {
1666 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1667 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1668 status.ContainerStatuses[0].State = kubecontainer.ContainerStateExited
1669 },
1670 actions: podActions{
1671 SandboxID: baseStatus.SandboxStatuses[0].Id,
1672 InitContainersToStart: []int{0},
1673 ContainersToStart: []int{0, 1, 2},
1674 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1675 },
1676 },
1677 "Pod sandbox not ready, restartable init container failed, but RestartPolicy == Never; kill pod only": {
1678 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1679 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1680 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1681 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1682 status.ContainerStatuses[2].ExitCode = 137
1683 },
1684 actions: podActions{
1685 KillPod: true,
1686 CreateSandbox: false,
1687 SandboxID: baseStatus.SandboxStatuses[0].Id,
1688 Attempt: uint32(1),
1689 ContainersToStart: []int{},
1690 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1691 },
1692 },
1693 "Pod sandbox not ready, and RestartPolicy == Never, but no visible restartable init containers; create a new pod sandbox": {
1694 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1695 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1696 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1697 status.ContainerStatuses = []*kubecontainer.Status{}
1698 },
1699 actions: podActions{
1700 KillPod: true,
1701 CreateSandbox: true,
1702 SandboxID: baseStatus.SandboxStatuses[0].Id,
1703 Attempt: uint32(1),
1704 InitContainersToStart: []int{0},
1705 ContainersToStart: []int{},
1706 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1707 },
1708 },
1709 "Pod sandbox not ready, restartable init container failed, and RestartPolicy == OnFailure; create a new pod sandbox": {
1710 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1711 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1712 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1713 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1714 status.ContainerStatuses[2].ExitCode = 137
1715 },
1716 actions: podActions{
1717 KillPod: true,
1718 CreateSandbox: true,
1719 SandboxID: baseStatus.SandboxStatuses[0].Id,
1720 Attempt: uint32(1),
1721 InitContainersToStart: []int{0},
1722 ContainersToStart: []int{},
1723 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1724 },
1725 },
1726 "Pod sandbox not ready, restartable init container failed, and RestartPolicy == Always; create a new pod sandbox": {
1727 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1728 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1729 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1730 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1731 status.ContainerStatuses[2].ExitCode = 137
1732 },
1733 actions: podActions{
1734 KillPod: true,
1735 CreateSandbox: true,
1736 SandboxID: baseStatus.SandboxStatuses[0].Id,
1737 Attempt: uint32(1),
1738 InitContainersToStart: []int{0},
1739 ContainersToStart: []int{},
1740 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1741 },
1742 },
1743 "initialization failed; restart the last restartable init container even if pod's RestartPolicy == Never": {
1744 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1745 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1746 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1747 status.ContainerStatuses[2].ExitCode = 137
1748 },
1749 actions: podActions{
1750 SandboxID: baseStatus.SandboxStatuses[0].Id,
1751 InitContainersToStart: []int{2},
1752 ContainersToStart: []int{},
1753 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1754 },
1755 },
1756 "restartable init container state unknown; kill and recreate the last restartable init container even if pod's RestartPolicy == Never": {
1757 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1758 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1759 status.ContainerStatuses[2].State = kubecontainer.ContainerStateUnknown
1760 },
1761 actions: podActions{
1762 SandboxID: baseStatus.SandboxStatuses[0].Id,
1763 InitContainersToStart: []int{2},
1764 ContainersToStart: []int{},
1765 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
1766 },
1767 },
1768 "restart restartable init container if regular containers are running even if pod's RestartPolicy == Never": {
1769 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1770 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1771 status.ContainerStatuses[2].State = kubecontainer.ContainerStateExited
1772 status.ContainerStatuses[2].ExitCode = 137
1773
1774 for i := 1; i <= 3; i++ {
1775 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
1776 ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)},
1777 Name: fmt.Sprintf("foo%d", i),
1778 State: kubecontainer.ContainerStateRunning,
1779 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]),
1780 })
1781 }
1782 },
1783 actions: podActions{
1784 SandboxID: baseStatus.SandboxStatuses[0].Id,
1785 InitContainersToStart: []int{2},
1786 ContainersToStart: []int{},
1787 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1788 },
1789 },
1790 "kill the pod if all main containers succeeded if pod's RestartPolicy == Never": {
1791 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
1792 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1793
1794 for i := 1; i <= 3; i++ {
1795 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
1796 ID: kubecontainer.ContainerID{ID: fmt.Sprintf("id%d", i)},
1797 Name: fmt.Sprintf("foo%d", i),
1798 State: kubecontainer.ContainerStateExited,
1799 ExitCode: 0,
1800 Hash: kubecontainer.HashContainer(&pod.Spec.Containers[i-1]),
1801 })
1802 }
1803 },
1804 actions: podActions{
1805 KillPod: true,
1806 SandboxID: baseStatus.SandboxStatuses[0].Id,
1807 ContainersToStart: []int{},
1808 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1809 },
1810 },
1811 "some of the init container statuses are missing but the last init container is running, restart restartable init and regular containers": {
1812 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1813 mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
1814 status.ContainerStatuses = status.ContainerStatuses[2:]
1815 },
1816 actions: podActions{
1817 SandboxID: baseStatus.SandboxStatuses[0].Id,
1818 InitContainersToStart: []int{0, 1},
1819 ContainersToStart: []int{0, 1, 2},
1820 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1821 },
1822 },
1823 } {
1824 pod, status := makeBasePodAndStatusWithRestartableInitContainers()
1825 m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
1826 m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
1827 m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
1828 m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
1829 if test.mutatePodFn != nil {
1830 test.mutatePodFn(pod)
1831 }
1832 if test.mutateStatusFn != nil {
1833 test.mutateStatusFn(pod, status)
1834 }
1835 ctx := context.Background()
1836 actions := m.computePodActions(ctx, pod, status)
1837 verifyActions(t, &test.actions, &actions, desc)
1838 if test.resetStatusFn != nil {
1839 test.resetStatusFn(status)
1840 }
1841 }
1842 }
1843
1844 func makeBasePodAndStatusWithRestartableInitContainers() (*v1.Pod, *kubecontainer.PodStatus) {
1845 pod, status := makeBasePodAndStatus()
1846 pod.Spec.InitContainers = []v1.Container{
1847 {
1848 Name: "restartable-init-1",
1849 Image: "bar-image",
1850 RestartPolicy: &containerRestartPolicyAlways,
1851 },
1852 {
1853 Name: "restartable-init-2",
1854 Image: "bar-image",
1855 RestartPolicy: &containerRestartPolicyAlways,
1856 LivenessProbe: &v1.Probe{},
1857 StartupProbe: &v1.Probe{},
1858 },
1859 {
1860 Name: "restartable-init-3",
1861 Image: "bar-image",
1862 RestartPolicy: &containerRestartPolicyAlways,
1863 LivenessProbe: &v1.Probe{},
1864 StartupProbe: &v1.Probe{},
1865 },
1866 }
1867
1868
1869 status.ContainerStatuses = []*kubecontainer.Status{
1870 {
1871 ID: kubecontainer.ContainerID{ID: "initid1"},
1872 Name: "restartable-init-1", State: kubecontainer.ContainerStateRunning,
1873 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1874 },
1875 {
1876 ID: kubecontainer.ContainerID{ID: "initid2"},
1877 Name: "restartable-init-2", State: kubecontainer.ContainerStateRunning,
1878 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1879 },
1880 {
1881 ID: kubecontainer.ContainerID{ID: "initid3"},
1882 Name: "restartable-init-3", State: kubecontainer.ContainerStateRunning,
1883 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
1884 },
1885 }
1886 return pod, status
1887 }
1888
1889 func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
1890
1891 TestComputePodActions(t)
1892 TestComputePodActionsWithInitContainers(t)
1893
1894 t.Run("sidecar containers disabled", func(t *testing.T) {
1895 testComputePodActionsWithInitAndEphemeralContainers(t, false)
1896 })
1897 t.Run("sidecar containers enabled", func(t *testing.T) {
1898 testComputePodActionsWithInitAndEphemeralContainers(t, true)
1899 })
1900 }
1901
1902 func testComputePodActionsWithInitAndEphemeralContainers(t *testing.T, sidecarContainersEnabled bool) {
1903 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)()
1904 _, _, m, err := createTestRuntimeManager()
1905 require.NoError(t, err)
1906
1907 basePod, baseStatus := makeBasePodAndStatusWithInitAndEphemeralContainers()
1908 noAction := podActions{
1909 SandboxID: baseStatus.SandboxStatuses[0].Id,
1910 ContainersToStart: []int{},
1911 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1912 }
1913
1914 for desc, test := range map[string]struct {
1915 mutatePodFn func(*v1.Pod)
1916 mutateStatusFn func(*kubecontainer.PodStatus)
1917 actions podActions
1918 }{
1919 "steady state; do nothing; ignore ephemeral container": {
1920 actions: noAction,
1921 },
1922 "No ephemeral containers running; start one": {
1923 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1924 status.ContainerStatuses = status.ContainerStatuses[:4]
1925 },
1926 actions: podActions{
1927 SandboxID: baseStatus.SandboxStatuses[0].Id,
1928 ContainersToStart: []int{},
1929 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1930 EphemeralContainersToStart: []int{0},
1931 },
1932 },
1933 "Start second ephemeral container": {
1934 mutatePodFn: func(pod *v1.Pod) {
1935 pod.Spec.EphemeralContainers = append(pod.Spec.EphemeralContainers, v1.EphemeralContainer{
1936 EphemeralContainerCommon: v1.EphemeralContainerCommon{
1937 Name: "debug2",
1938 Image: "busybox",
1939 },
1940 })
1941 },
1942 actions: podActions{
1943 SandboxID: baseStatus.SandboxStatuses[0].Id,
1944 ContainersToStart: []int{},
1945 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1946 EphemeralContainersToStart: []int{1},
1947 },
1948 },
1949 "Ephemeral container exited; do not restart": {
1950 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1951 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1952 status.ContainerStatuses[4].State = kubecontainer.ContainerStateExited
1953 },
1954 actions: podActions{
1955 SandboxID: baseStatus.SandboxStatuses[0].Id,
1956 ContainersToStart: []int{},
1957 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1958 },
1959 },
1960 "initialization in progress; start ephemeral container": {
1961 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1962 status.ContainerStatuses[3].State = kubecontainer.ContainerStateRunning
1963 status.ContainerStatuses = status.ContainerStatuses[:4]
1964 },
1965 actions: podActions{
1966 SandboxID: baseStatus.SandboxStatuses[0].Id,
1967 ContainersToStart: []int{},
1968 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
1969 EphemeralContainersToStart: []int{0},
1970 },
1971 },
1972 "Create a new pod sandbox if the pod sandbox is dead, init container failed and RestartPolicy == OnFailure": {
1973 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure },
1974 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1975 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1976 status.ContainerStatuses = status.ContainerStatuses[3:]
1977 status.ContainerStatuses[0].ExitCode = 137
1978 },
1979 actions: podActions{
1980 KillPod: true,
1981 CreateSandbox: true,
1982 SandboxID: baseStatus.SandboxStatuses[0].Id,
1983 Attempt: uint32(1),
1984 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
1985 InitContainersToStart: []int{0},
1986 ContainersToStart: []int{},
1987 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
1988 },
1989 },
1990 "Kill pod and do not restart ephemeral container if the pod sandbox is dead": {
1991 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
1992 mutateStatusFn: func(status *kubecontainer.PodStatus) {
1993 status.SandboxStatuses[0].State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
1994 },
1995 actions: podActions{
1996 KillPod: true,
1997 CreateSandbox: true,
1998 SandboxID: baseStatus.SandboxStatuses[0].Id,
1999 Attempt: uint32(1),
2000 NextInitContainerToStart: &basePod.Spec.InitContainers[0],
2001 InitContainersToStart: []int{0},
2002 ContainersToStart: []int{},
2003 ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
2004 },
2005 },
2006 "Kill pod if all containers exited except ephemeral container": {
2007 mutatePodFn: func(pod *v1.Pod) {
2008 pod.Spec.RestartPolicy = v1.RestartPolicyNever
2009 },
2010 mutateStatusFn: func(status *kubecontainer.PodStatus) {
2011
2012 for i := 0; i < 3; i++ {
2013 status.ContainerStatuses[i].State = kubecontainer.ContainerStateExited
2014 status.ContainerStatuses[i].ExitCode = 0
2015 }
2016 },
2017 actions: podActions{
2018 SandboxID: baseStatus.SandboxStatuses[0].Id,
2019 CreateSandbox: false,
2020 KillPod: true,
2021 ContainersToStart: []int{},
2022 ContainersToKill: map[kubecontainer.ContainerID]containerToKillInfo{},
2023 },
2024 },
2025 "Ephemeral container is in unknown state; leave it alone": {
2026 mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyNever },
2027 mutateStatusFn: func(status *kubecontainer.PodStatus) {
2028 status.ContainerStatuses[4].State = kubecontainer.ContainerStateUnknown
2029 },
2030 actions: noAction,
2031 },
2032 } {
2033 pod, status := makeBasePodAndStatusWithInitAndEphemeralContainers()
2034 if test.mutatePodFn != nil {
2035 test.mutatePodFn(pod)
2036 }
2037 if test.mutateStatusFn != nil {
2038 test.mutateStatusFn(status)
2039 }
2040 ctx := context.Background()
2041 actions := m.computePodActions(ctx, pod, status)
2042 if !sidecarContainersEnabled {
2043
2044
2045 test.actions.InitContainersToStart = nil
2046 } else {
2047
2048
2049 test.actions.NextInitContainerToStart = nil
2050 }
2051 verifyActions(t, &test.actions, &actions, desc)
2052 }
2053 }
2054
2055 func TestSyncPodWithSandboxAndDeletedPod(t *testing.T) {
2056 ctx := context.Background()
2057 fakeRuntime, _, m, err := createTestRuntimeManager()
2058 assert.NoError(t, err)
2059 fakeRuntime.ErrorOnSandboxCreate = true
2060
2061 containers := []v1.Container{
2062 {
2063 Name: "foo1",
2064 Image: "busybox",
2065 ImagePullPolicy: v1.PullIfNotPresent,
2066 },
2067 }
2068 pod := &v1.Pod{
2069 ObjectMeta: metav1.ObjectMeta{
2070 UID: "12345678",
2071 Name: "foo",
2072 Namespace: "new",
2073 },
2074 Spec: v1.PodSpec{
2075 Containers: containers,
2076 },
2077 }
2078
2079 backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
2080 m.podStateProvider.(*fakePodStateProvider).removed = map[types.UID]struct{}{pod.UID: {}}
2081
2082
2083
2084
2085 podStatus, err := m.GetPodStatus(ctx, pod.UID, pod.Name, pod.Namespace)
2086 assert.NoError(t, err)
2087 result := m.SyncPod(context.Background(), pod, podStatus, []v1.Secret{}, backOff)
2088
2089 assert.NoError(t, result.Error())
2090 }
2091
2092 func makeBasePodAndStatusWithInitAndEphemeralContainers() (*v1.Pod, *kubecontainer.PodStatus) {
2093 pod, status := makeBasePodAndStatus()
2094 pod.Spec.InitContainers = []v1.Container{
2095 {
2096 Name: "init1",
2097 Image: "bar-image",
2098 },
2099 }
2100 pod.Spec.EphemeralContainers = []v1.EphemeralContainer{
2101 {
2102 EphemeralContainerCommon: v1.EphemeralContainerCommon{
2103 Name: "debug",
2104 Image: "busybox",
2105 },
2106 },
2107 }
2108 status.ContainerStatuses = append(status.ContainerStatuses, &kubecontainer.Status{
2109 ID: kubecontainer.ContainerID{ID: "initid1"},
2110 Name: "init1", State: kubecontainer.ContainerStateExited,
2111 Hash: kubecontainer.HashContainer(&pod.Spec.InitContainers[0]),
2112 }, &kubecontainer.Status{
2113 ID: kubecontainer.ContainerID{ID: "debug1"},
2114 Name: "debug", State: kubecontainer.ContainerStateRunning,
2115 Hash: kubecontainer.HashContainer((*v1.Container)(&pod.Spec.EphemeralContainers[0].EphemeralContainerCommon)),
2116 })
2117 return pod, status
2118 }
2119
2120 func TestComputePodActionsForPodResize(t *testing.T) {
2121 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
2122 fakeRuntime, _, m, err := createTestRuntimeManager()
2123 m.machineInfo.MemoryCapacity = 17179860387
2124 assert.NoError(t, err)
2125
2126 cpu100m := resource.MustParse("100m")
2127 cpu200m := resource.MustParse("200m")
2128 mem100M := resource.MustParse("100Mi")
2129 mem200M := resource.MustParse("200Mi")
2130 cpuPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.NotRequired}
2131 memPolicyRestartNotRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.NotRequired}
2132 cpuPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: v1.RestartContainer}
2133 memPolicyRestartRequired := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: v1.RestartContainer}
2134
2135 for desc, test := range map[string]struct {
2136 podResizePolicyFn func(*v1.Pod)
2137 mutatePodFn func(*v1.Pod)
2138 getExpectedPodActionsFn func(*v1.Pod, *kubecontainer.PodStatus) *podActions
2139 }{
2140 "Update container CPU and memory resources": {
2141 mutatePodFn: func(pod *v1.Pod) {
2142 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
2143 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2144 }
2145 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found {
2146 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2147 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2148 }
2149 }
2150 },
2151 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2152 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
2153 pa := podActions{
2154 SandboxID: podStatus.SandboxStatuses[0].Id,
2155 ContainersToStart: []int{},
2156 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2157 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2158 v1.ResourceMemory: {
2159 {
2160 apiContainerIdx: 1,
2161 kubeContainerID: kcs.ID,
2162 desiredContainerResources: containerResources{
2163 memoryLimit: mem100M.Value(),
2164 cpuLimit: cpu100m.MilliValue(),
2165 },
2166 currentContainerResources: &containerResources{
2167 memoryLimit: mem200M.Value(),
2168 cpuLimit: cpu200m.MilliValue(),
2169 },
2170 },
2171 },
2172 v1.ResourceCPU: {
2173 {
2174 apiContainerIdx: 1,
2175 kubeContainerID: kcs.ID,
2176 desiredContainerResources: containerResources{
2177 memoryLimit: mem100M.Value(),
2178 cpuLimit: cpu100m.MilliValue(),
2179 },
2180 currentContainerResources: &containerResources{
2181 memoryLimit: mem200M.Value(),
2182 cpuLimit: cpu200m.MilliValue(),
2183 },
2184 },
2185 },
2186 },
2187 }
2188 return &pa
2189 },
2190 },
2191 "Update container CPU resources": {
2192 mutatePodFn: func(pod *v1.Pod) {
2193 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
2194 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2195 }
2196 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found {
2197 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2198 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M},
2199 }
2200 }
2201 },
2202 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2203 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
2204 pa := podActions{
2205 SandboxID: podStatus.SandboxStatuses[0].Id,
2206 ContainersToStart: []int{},
2207 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2208 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2209 v1.ResourceCPU: {
2210 {
2211 apiContainerIdx: 1,
2212 kubeContainerID: kcs.ID,
2213 desiredContainerResources: containerResources{
2214 memoryLimit: mem100M.Value(),
2215 cpuLimit: cpu100m.MilliValue(),
2216 },
2217 currentContainerResources: &containerResources{
2218 memoryLimit: mem100M.Value(),
2219 cpuLimit: cpu200m.MilliValue(),
2220 },
2221 },
2222 },
2223 },
2224 }
2225 return &pa
2226 },
2227 },
2228 "Update container memory resources": {
2229 mutatePodFn: func(pod *v1.Pod) {
2230 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{
2231 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2232 }
2233 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found {
2234 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2235 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M},
2236 }
2237 }
2238 },
2239 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2240 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2241 pa := podActions{
2242 SandboxID: podStatus.SandboxStatuses[0].Id,
2243 ContainersToStart: []int{},
2244 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2245 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2246 v1.ResourceMemory: {
2247 {
2248 apiContainerIdx: 2,
2249 kubeContainerID: kcs.ID,
2250 desiredContainerResources: containerResources{
2251 memoryLimit: mem200M.Value(),
2252 cpuLimit: cpu200m.MilliValue(),
2253 },
2254 currentContainerResources: &containerResources{
2255 memoryLimit: mem100M.Value(),
2256 cpuLimit: cpu200m.MilliValue(),
2257 },
2258 },
2259 },
2260 },
2261 }
2262 return &pa
2263 },
2264 },
2265 "Nothing when spec.Resources and status.Resources are equal": {
2266 mutatePodFn: func(pod *v1.Pod) {
2267 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
2268 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m},
2269 }
2270 pod.Status.ContainerStatuses[1].Resources = &v1.ResourceRequirements{
2271 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m},
2272 }
2273 },
2274 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2275 pa := podActions{
2276 SandboxID: podStatus.SandboxStatuses[0].Id,
2277 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2278 ContainersToStart: []int{},
2279 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
2280 }
2281 return &pa
2282 },
2283 },
2284 "Update container CPU and memory resources with Restart policy for CPU": {
2285 podResizePolicyFn: func(pod *v1.Pod) {
2286 pod.Spec.Containers[0].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired}
2287 },
2288 mutatePodFn: func(pod *v1.Pod) {
2289 pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
2290 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2291 }
2292 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[0].Name); found {
2293 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2294 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2295 }
2296 }
2297 },
2298 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2299 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[0].Name)
2300 killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2301 killMap[kcs.ID] = containerToKillInfo{
2302 container: &pod.Spec.Containers[0],
2303 name: pod.Spec.Containers[0].Name,
2304 }
2305 pa := podActions{
2306 SandboxID: podStatus.SandboxStatuses[0].Id,
2307 ContainersToStart: []int{0},
2308 ContainersToKill: killMap,
2309 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
2310 UpdatePodResources: true,
2311 }
2312 return &pa
2313 },
2314 },
2315 "Update container CPU and memory resources with Restart policy for memory": {
2316 podResizePolicyFn: func(pod *v1.Pod) {
2317 pod.Spec.Containers[2].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
2318 },
2319 mutatePodFn: func(pod *v1.Pod) {
2320 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{
2321 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M},
2322 }
2323 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found {
2324 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2325 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2326 }
2327 }
2328 },
2329 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2330 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2331 killMap := make(map[kubecontainer.ContainerID]containerToKillInfo)
2332 killMap[kcs.ID] = containerToKillInfo{
2333 container: &pod.Spec.Containers[2],
2334 name: pod.Spec.Containers[2].Name,
2335 }
2336 pa := podActions{
2337 SandboxID: podStatus.SandboxStatuses[0].Id,
2338 ContainersToStart: []int{2},
2339 ContainersToKill: killMap,
2340 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{},
2341 UpdatePodResources: true,
2342 }
2343 return &pa
2344 },
2345 },
2346 "Update container memory resources with Restart policy for CPU": {
2347 podResizePolicyFn: func(pod *v1.Pod) {
2348 pod.Spec.Containers[1].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartRequired, memPolicyRestartNotRequired}
2349 },
2350 mutatePodFn: func(pod *v1.Pod) {
2351 pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
2352 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem200M},
2353 }
2354 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[1].Name); found {
2355 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2356 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2357 }
2358 }
2359 },
2360 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2361 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[1].Name)
2362 pa := podActions{
2363 SandboxID: podStatus.SandboxStatuses[0].Id,
2364 ContainersToStart: []int{},
2365 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2366 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2367 v1.ResourceMemory: {
2368 {
2369 apiContainerIdx: 1,
2370 kubeContainerID: kcs.ID,
2371 desiredContainerResources: containerResources{
2372 memoryLimit: mem200M.Value(),
2373 cpuLimit: cpu100m.MilliValue(),
2374 },
2375 currentContainerResources: &containerResources{
2376 memoryLimit: mem100M.Value(),
2377 cpuLimit: cpu100m.MilliValue(),
2378 },
2379 },
2380 },
2381 },
2382 }
2383 return &pa
2384 },
2385 },
2386 "Update container CPU resources with Restart policy for memory": {
2387 podResizePolicyFn: func(pod *v1.Pod) {
2388 pod.Spec.Containers[2].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartRequired}
2389 },
2390 mutatePodFn: func(pod *v1.Pod) {
2391 pod.Spec.Containers[2].Resources = v1.ResourceRequirements{
2392 Limits: v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem100M},
2393 }
2394 if idx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[2].Name); found {
2395 pod.Status.ContainerStatuses[idx].Resources = &v1.ResourceRequirements{
2396 Limits: v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M},
2397 }
2398 }
2399 },
2400 getExpectedPodActionsFn: func(pod *v1.Pod, podStatus *kubecontainer.PodStatus) *podActions {
2401 kcs := podStatus.FindContainerStatusByName(pod.Spec.Containers[2].Name)
2402 pa := podActions{
2403 SandboxID: podStatus.SandboxStatuses[0].Id,
2404 ContainersToStart: []int{},
2405 ContainersToKill: getKillMap(pod, podStatus, []int{}),
2406 ContainersToUpdate: map[v1.ResourceName][]containerToUpdateInfo{
2407 v1.ResourceCPU: {
2408 {
2409 apiContainerIdx: 2,
2410 kubeContainerID: kcs.ID,
2411 desiredContainerResources: containerResources{
2412 memoryLimit: mem100M.Value(),
2413 cpuLimit: cpu200m.MilliValue(),
2414 },
2415 currentContainerResources: &containerResources{
2416 memoryLimit: mem100M.Value(),
2417 cpuLimit: cpu100m.MilliValue(),
2418 },
2419 },
2420 },
2421 },
2422 }
2423 return &pa
2424 },
2425 },
2426 } {
2427 pod, kps := makeBasePodAndStatus()
2428 for idx := range pod.Spec.Containers {
2429
2430 pod.Spec.Containers[idx].ResizePolicy = []v1.ContainerResizePolicy{cpuPolicyRestartNotRequired, memPolicyRestartNotRequired}
2431 }
2432 if test.podResizePolicyFn != nil {
2433 test.podResizePolicyFn(pod)
2434 }
2435 for idx := range pod.Spec.Containers {
2436
2437 if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
2438 kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
2439 kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx])
2440 }
2441 }
2442 makeAndSetFakePod(t, m, fakeRuntime, pod)
2443 ctx := context.Background()
2444 status, _ := m.GetPodStatus(ctx, kps.ID, pod.Name, pod.Namespace)
2445 for idx := range pod.Spec.Containers {
2446 if rcs := status.FindContainerStatusByName(pod.Spec.Containers[idx].Name); rcs != nil {
2447 if csIdx, found := podutil.GetIndexOfContainerStatus(pod.Status.ContainerStatuses, pod.Spec.Containers[idx].Name); found {
2448 pod.Status.ContainerStatuses[csIdx].ContainerID = rcs.ID.String()
2449 }
2450 }
2451 }
2452 for idx := range pod.Spec.Containers {
2453 if kcs := kps.FindContainerStatusByName(pod.Spec.Containers[idx].Name); kcs != nil {
2454 kcs.Hash = kubecontainer.HashContainer(&pod.Spec.Containers[idx])
2455 kcs.HashWithoutResources = kubecontainer.HashContainerWithoutResources(&pod.Spec.Containers[idx])
2456 }
2457 }
2458 if test.mutatePodFn != nil {
2459 test.mutatePodFn(pod)
2460 }
2461 expectedActions := test.getExpectedPodActionsFn(pod, status)
2462 actions := m.computePodActions(ctx, pod, status)
2463 verifyActions(t, expectedActions, &actions, desc)
2464 }
2465 }
2466
2467 func TestUpdatePodContainerResources(t *testing.T) {
2468 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
2469 fakeRuntime, _, m, err := createTestRuntimeManager()
2470 m.machineInfo.MemoryCapacity = 17179860387
2471 assert.NoError(t, err)
2472
2473 cpu100m := resource.MustParse("100m")
2474 cpu150m := resource.MustParse("150m")
2475 cpu200m := resource.MustParse("200m")
2476 cpu250m := resource.MustParse("250m")
2477 cpu300m := resource.MustParse("300m")
2478 cpu350m := resource.MustParse("350m")
2479 mem100M := resource.MustParse("100Mi")
2480 mem150M := resource.MustParse("150Mi")
2481 mem200M := resource.MustParse("200Mi")
2482 mem250M := resource.MustParse("250Mi")
2483 mem300M := resource.MustParse("300Mi")
2484 mem350M := resource.MustParse("350Mi")
2485 res100m100Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem100M}
2486 res150m100Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem100M}
2487 res100m150Mi := v1.ResourceList{v1.ResourceCPU: cpu100m, v1.ResourceMemory: mem150M}
2488 res150m150Mi := v1.ResourceList{v1.ResourceCPU: cpu150m, v1.ResourceMemory: mem150M}
2489 res200m200Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem200M}
2490 res250m200Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem200M}
2491 res200m250Mi := v1.ResourceList{v1.ResourceCPU: cpu200m, v1.ResourceMemory: mem250M}
2492 res250m250Mi := v1.ResourceList{v1.ResourceCPU: cpu250m, v1.ResourceMemory: mem250M}
2493 res300m300Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem300M}
2494 res350m300Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem300M}
2495 res300m350Mi := v1.ResourceList{v1.ResourceCPU: cpu300m, v1.ResourceMemory: mem350M}
2496 res350m350Mi := v1.ResourceList{v1.ResourceCPU: cpu350m, v1.ResourceMemory: mem350M}
2497
2498 pod, _ := makeBasePodAndStatus()
2499 makeAndSetFakePod(t, m, fakeRuntime, pod)
2500
2501 for dsc, tc := range map[string]struct {
2502 resourceName v1.ResourceName
2503 apiSpecResources []v1.ResourceRequirements
2504 apiStatusResources []v1.ResourceRequirements
2505 requiresRestart []bool
2506 invokeUpdateResources bool
2507 expectedCurrentLimits []v1.ResourceList
2508 expectedCurrentRequests []v1.ResourceList
2509 }{
2510 "Guaranteed QoS Pod - CPU & memory resize requested, update CPU": {
2511 resourceName: v1.ResourceCPU,
2512 apiSpecResources: []v1.ResourceRequirements{
2513 {Limits: res150m150Mi, Requests: res150m150Mi},
2514 {Limits: res250m250Mi, Requests: res250m250Mi},
2515 {Limits: res350m350Mi, Requests: res350m350Mi},
2516 },
2517 apiStatusResources: []v1.ResourceRequirements{
2518 {Limits: res100m100Mi, Requests: res100m100Mi},
2519 {Limits: res200m200Mi, Requests: res200m200Mi},
2520 {Limits: res300m300Mi, Requests: res300m300Mi},
2521 },
2522 requiresRestart: []bool{false, false, false},
2523 invokeUpdateResources: true,
2524 expectedCurrentLimits: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
2525 expectedCurrentRequests: []v1.ResourceList{res150m100Mi, res250m200Mi, res350m300Mi},
2526 },
2527 "Guaranteed QoS Pod - CPU & memory resize requested, update memory": {
2528 resourceName: v1.ResourceMemory,
2529 apiSpecResources: []v1.ResourceRequirements{
2530 {Limits: res150m150Mi, Requests: res150m150Mi},
2531 {Limits: res250m250Mi, Requests: res250m250Mi},
2532 {Limits: res350m350Mi, Requests: res350m350Mi},
2533 },
2534 apiStatusResources: []v1.ResourceRequirements{
2535 {Limits: res100m100Mi, Requests: res100m100Mi},
2536 {Limits: res200m200Mi, Requests: res200m200Mi},
2537 {Limits: res300m300Mi, Requests: res300m300Mi},
2538 },
2539 requiresRestart: []bool{false, false, false},
2540 invokeUpdateResources: true,
2541 expectedCurrentLimits: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
2542 expectedCurrentRequests: []v1.ResourceList{res100m150Mi, res200m250Mi, res300m350Mi},
2543 },
2544 } {
2545 var containersToUpdate []containerToUpdateInfo
2546 for idx := range pod.Spec.Containers {
2547
2548 pod.Spec.Containers[idx].Resources = tc.apiSpecResources[idx]
2549 pod.Status.ContainerStatuses[idx].Resources = &tc.apiStatusResources[idx]
2550 cInfo := containerToUpdateInfo{
2551 apiContainerIdx: idx,
2552 kubeContainerID: kubecontainer.ContainerID{},
2553 desiredContainerResources: containerResources{
2554 memoryLimit: tc.apiSpecResources[idx].Limits.Memory().Value(),
2555 memoryRequest: tc.apiSpecResources[idx].Requests.Memory().Value(),
2556 cpuLimit: tc.apiSpecResources[idx].Limits.Cpu().MilliValue(),
2557 cpuRequest: tc.apiSpecResources[idx].Requests.Cpu().MilliValue(),
2558 },
2559 currentContainerResources: &containerResources{
2560 memoryLimit: tc.apiStatusResources[idx].Limits.Memory().Value(),
2561 memoryRequest: tc.apiStatusResources[idx].Requests.Memory().Value(),
2562 cpuLimit: tc.apiStatusResources[idx].Limits.Cpu().MilliValue(),
2563 cpuRequest: tc.apiStatusResources[idx].Requests.Cpu().MilliValue(),
2564 },
2565 }
2566 containersToUpdate = append(containersToUpdate, cInfo)
2567 }
2568 fakeRuntime.Called = []string{}
2569 err := m.updatePodContainerResources(pod, tc.resourceName, containersToUpdate)
2570 assert.NoError(t, err, dsc)
2571
2572 if tc.invokeUpdateResources {
2573 assert.Contains(t, fakeRuntime.Called, "UpdateContainerResources", dsc)
2574 }
2575 for idx := range pod.Spec.Containers {
2576 assert.Equal(t, tc.expectedCurrentLimits[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryLimit, dsc)
2577 assert.Equal(t, tc.expectedCurrentRequests[idx].Memory().Value(), containersToUpdate[idx].currentContainerResources.memoryRequest, dsc)
2578 assert.Equal(t, tc.expectedCurrentLimits[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuLimit, dsc)
2579 assert.Equal(t, tc.expectedCurrentRequests[idx].Cpu().MilliValue(), containersToUpdate[idx].currentContainerResources.cpuRequest, dsc)
2580 }
2581 }
2582 }
2583
View as plain text