1
16
17 package kubelet
18
19 import (
20 "context"
21 "errors"
22 "fmt"
23 "net"
24 "os"
25 "path/filepath"
26 "reflect"
27 "sort"
28 "strings"
29 "testing"
30 "time"
31
32 "github.com/google/go-cmp/cmp"
33 "github.com/stretchr/testify/assert"
34 "github.com/stretchr/testify/require"
35 v1 "k8s.io/api/core/v1"
36 apiequality "k8s.io/apimachinery/pkg/api/equality"
37 apierrors "k8s.io/apimachinery/pkg/api/errors"
38 "k8s.io/apimachinery/pkg/api/resource"
39 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
40 "k8s.io/apimachinery/pkg/labels"
41 "k8s.io/apimachinery/pkg/runtime"
42 "k8s.io/apimachinery/pkg/types"
43 utilfeature "k8s.io/apiserver/pkg/util/feature"
44 core "k8s.io/client-go/testing"
45 "k8s.io/client-go/tools/record"
46 featuregatetesting "k8s.io/component-base/featuregate/testing"
47 "k8s.io/component-base/metrics/testutil"
48 runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
49 "k8s.io/kubelet/pkg/cri/streaming/portforward"
50 "k8s.io/kubelet/pkg/cri/streaming/remotecommand"
51 _ "k8s.io/kubernetes/pkg/apis/core/install"
52 "k8s.io/kubernetes/pkg/features"
53 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
54 containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
55 "k8s.io/kubernetes/pkg/kubelet/metrics"
56 "k8s.io/kubernetes/pkg/kubelet/prober/results"
57 "k8s.io/kubernetes/pkg/kubelet/secret"
58 "k8s.io/kubernetes/pkg/kubelet/status"
59 kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
60 netutils "k8s.io/utils/net"
61 "k8s.io/utils/ptr"
62 )
63
64 var containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
65
66 func TestNodeHostsFileContent(t *testing.T) {
67 testCases := []struct {
68 hostsFileName string
69 hostAliases []v1.HostAlias
70 rawHostsFileContent string
71 expectedHostsFileContent string
72 }{
73 {
74 hostsFileName: "hosts_test_file1",
75 hostAliases: []v1.HostAlias{},
76 rawHostsFileContent: `# hosts file for testing.
77 127.0.0.1 localhost
78 ::1 localhost ip6-localhost ip6-loopback
79 fe00::0 ip6-localnet
80 fe00::0 ip6-mcastprefix
81 fe00::1 ip6-allnodes
82 fe00::2 ip6-allrouters
83 123.45.67.89 some.domain
84 `,
85 expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
86 # hosts file for testing.
87 127.0.0.1 localhost
88 ::1 localhost ip6-localhost ip6-loopback
89 fe00::0 ip6-localnet
90 fe00::0 ip6-mcastprefix
91 fe00::1 ip6-allnodes
92 fe00::2 ip6-allrouters
93 123.45.67.89 some.domain
94 `,
95 },
96 {
97 hostsFileName: "hosts_test_file2",
98 hostAliases: []v1.HostAlias{},
99 rawHostsFileContent: `# another hosts file for testing.
100 127.0.0.1 localhost
101 ::1 localhost ip6-localhost ip6-loopback
102 fe00::0 ip6-localnet
103 fe00::0 ip6-mcastprefix
104 fe00::1 ip6-allnodes
105 fe00::2 ip6-allrouters
106 12.34.56.78 another.domain
107 `,
108 expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
109 # another hosts file for testing.
110 127.0.0.1 localhost
111 ::1 localhost ip6-localhost ip6-loopback
112 fe00::0 ip6-localnet
113 fe00::0 ip6-mcastprefix
114 fe00::1 ip6-allnodes
115 fe00::2 ip6-allrouters
116 12.34.56.78 another.domain
117 `,
118 },
119 {
120 hostsFileName: "hosts_test_file1_with_host_aliases",
121 hostAliases: []v1.HostAlias{
122 {IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
123 },
124 rawHostsFileContent: `# hosts file for testing.
125 127.0.0.1 localhost
126 ::1 localhost ip6-localhost ip6-loopback
127 fe00::0 ip6-localnet
128 fe00::0 ip6-mcastprefix
129 fe00::1 ip6-allnodes
130 fe00::2 ip6-allrouters
131 123.45.67.89 some.domain
132 `,
133 expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
134 # hosts file for testing.
135 127.0.0.1 localhost
136 ::1 localhost ip6-localhost ip6-loopback
137 fe00::0 ip6-localnet
138 fe00::0 ip6-mcastprefix
139 fe00::1 ip6-allnodes
140 fe00::2 ip6-allrouters
141 123.45.67.89 some.domain
142
143 # Entries added by HostAliases.
144 123.45.67.89 foo bar baz
145 `,
146 },
147 {
148 hostsFileName: "hosts_test_file2_with_host_aliases",
149 hostAliases: []v1.HostAlias{
150 {IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
151 {IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
152 },
153 rawHostsFileContent: `# another hosts file for testing.
154 127.0.0.1 localhost
155 ::1 localhost ip6-localhost ip6-loopback
156 fe00::0 ip6-localnet
157 fe00::0 ip6-mcastprefix
158 fe00::1 ip6-allnodes
159 fe00::2 ip6-allrouters
160 12.34.56.78 another.domain
161 `,
162 expectedHostsFileContent: `# Kubernetes-managed hosts file (host network).
163 # another hosts file for testing.
164 127.0.0.1 localhost
165 ::1 localhost ip6-localhost ip6-loopback
166 fe00::0 ip6-localnet
167 fe00::0 ip6-mcastprefix
168 fe00::1 ip6-allnodes
169 fe00::2 ip6-allrouters
170 12.34.56.78 another.domain
171
172 # Entries added by HostAliases.
173 123.45.67.89 foo bar baz
174 456.78.90.123 park doo boo
175 `,
176 },
177 }
178
179 for _, testCase := range testCases {
180 t.Run(testCase.hostsFileName, func(t *testing.T) {
181 tmpdir, err := writeHostsFile(testCase.hostsFileName, testCase.rawHostsFileContent)
182 require.NoError(t, err, "could not create a temp hosts file")
183 defer os.RemoveAll(tmpdir)
184
185 actualContent, fileReadErr := nodeHostsFileContent(filepath.Join(tmpdir, testCase.hostsFileName), testCase.hostAliases)
186 require.NoError(t, fileReadErr, "could not create read hosts file")
187 assert.Equal(t, testCase.expectedHostsFileContent, string(actualContent), "hosts file content not expected")
188 })
189 }
190 }
191
192
193
194 func writeHostsFile(filename string, cfg string) (string, error) {
195 tmpdir, err := os.MkdirTemp("", "kubelet=kubelet_pods_test.go=")
196 if err != nil {
197 return "", err
198 }
199 return tmpdir, os.WriteFile(filepath.Join(tmpdir, filename), []byte(cfg), 0644)
200 }
201
202 func TestManagedHostsFileContent(t *testing.T) {
203 testCases := []struct {
204 hostIPs []string
205 hostName string
206 hostDomainName string
207 hostAliases []v1.HostAlias
208 expectedContent string
209 }{
210 {
211 hostIPs: []string{"123.45.67.89"},
212 hostName: "podFoo",
213 hostAliases: []v1.HostAlias{},
214 expectedContent: `# Kubernetes-managed hosts file.
215 127.0.0.1 localhost
216 ::1 localhost ip6-localhost ip6-loopback
217 fe00::0 ip6-localnet
218 fe00::0 ip6-mcastprefix
219 fe00::1 ip6-allnodes
220 fe00::2 ip6-allrouters
221 123.45.67.89 podFoo
222 `,
223 },
224 {
225 hostIPs: []string{"203.0.113.1"},
226 hostName: "podFoo",
227 hostDomainName: "domainFoo",
228 hostAliases: []v1.HostAlias{},
229 expectedContent: `# Kubernetes-managed hosts file.
230 127.0.0.1 localhost
231 ::1 localhost ip6-localhost ip6-loopback
232 fe00::0 ip6-localnet
233 fe00::0 ip6-mcastprefix
234 fe00::1 ip6-allnodes
235 fe00::2 ip6-allrouters
236 203.0.113.1 podFoo.domainFoo podFoo
237 `,
238 },
239 {
240 hostIPs: []string{"203.0.113.1"},
241 hostName: "podFoo",
242 hostDomainName: "domainFoo",
243 hostAliases: []v1.HostAlias{
244 {IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
245 },
246 expectedContent: `# Kubernetes-managed hosts file.
247 127.0.0.1 localhost
248 ::1 localhost ip6-localhost ip6-loopback
249 fe00::0 ip6-localnet
250 fe00::0 ip6-mcastprefix
251 fe00::1 ip6-allnodes
252 fe00::2 ip6-allrouters
253 203.0.113.1 podFoo.domainFoo podFoo
254
255 # Entries added by HostAliases.
256 123.45.67.89 foo bar baz
257 `,
258 },
259 {
260 hostIPs: []string{"203.0.113.1"},
261 hostName: "podFoo",
262 hostDomainName: "domainFoo",
263 hostAliases: []v1.HostAlias{
264 {IP: "123.45.67.89", Hostnames: []string{"foo", "bar", "baz"}},
265 {IP: "456.78.90.123", Hostnames: []string{"park", "doo", "boo"}},
266 },
267 expectedContent: `# Kubernetes-managed hosts file.
268 127.0.0.1 localhost
269 ::1 localhost ip6-localhost ip6-loopback
270 fe00::0 ip6-localnet
271 fe00::0 ip6-mcastprefix
272 fe00::1 ip6-allnodes
273 fe00::2 ip6-allrouters
274 203.0.113.1 podFoo.domainFoo podFoo
275
276 # Entries added by HostAliases.
277 123.45.67.89 foo bar baz
278 456.78.90.123 park doo boo
279 `,
280 },
281 {
282 hostIPs: []string{"203.0.113.1", "fd00::6"},
283 hostName: "podFoo",
284 hostDomainName: "domainFoo",
285 hostAliases: []v1.HostAlias{},
286 expectedContent: `# Kubernetes-managed hosts file.
287 127.0.0.1 localhost
288 ::1 localhost ip6-localhost ip6-loopback
289 fe00::0 ip6-localnet
290 fe00::0 ip6-mcastprefix
291 fe00::1 ip6-allnodes
292 fe00::2 ip6-allrouters
293 203.0.113.1 podFoo.domainFoo podFoo
294 fd00::6 podFoo.domainFoo podFoo
295 `,
296 },
297 }
298
299 for _, testCase := range testCases {
300 actualContent := managedHostsFileContent(testCase.hostIPs, testCase.hostName, testCase.hostDomainName, testCase.hostAliases)
301 assert.Equal(t, testCase.expectedContent, string(actualContent), "hosts file content not expected")
302 }
303 }
304
305 func TestRunInContainerNoSuchPod(t *testing.T) {
306 ctx := context.Background()
307 testKubelet := newTestKubelet(t, false )
308 defer testKubelet.Cleanup()
309 kubelet := testKubelet.kubelet
310 fakeRuntime := testKubelet.fakeRuntime
311 fakeRuntime.PodList = []*containertest.FakePod{}
312
313 podName := "podFoo"
314 podNamespace := "nsFoo"
315 containerName := "containerFoo"
316 output, err := kubelet.RunInContainer(
317 ctx,
318 kubecontainer.GetPodFullName(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}),
319 "",
320 containerName,
321 []string{"ls"})
322 assert.Error(t, err)
323 assert.Nil(t, output, "output should be nil")
324 }
325
326 func TestRunInContainer(t *testing.T) {
327 ctx := context.Background()
328 for _, testError := range []error{nil, errors.New("bar")} {
329 testKubelet := newTestKubelet(t, false )
330 defer testKubelet.Cleanup()
331 kubelet := testKubelet.kubelet
332 fakeRuntime := testKubelet.fakeRuntime
333 fakeCommandRunner := containertest.FakeContainerCommandRunner{
334 Err: testError,
335 Stdout: "foo",
336 }
337 kubelet.runner = &fakeCommandRunner
338
339 containerID := kubecontainer.ContainerID{Type: "test", ID: "abc1234"}
340 fakeRuntime.PodList = []*containertest.FakePod{
341 {Pod: &kubecontainer.Pod{
342 ID: "12345678",
343 Name: "podFoo",
344 Namespace: "nsFoo",
345 Containers: []*kubecontainer.Container{
346 {Name: "containerFoo",
347 ID: containerID,
348 },
349 },
350 }},
351 }
352 cmd := []string{"ls"}
353 actualOutput, err := kubelet.RunInContainer(ctx, "podFoo_nsFoo", "", "containerFoo", cmd)
354 assert.Equal(t, containerID, fakeCommandRunner.ContainerID, "(testError=%v) ID", testError)
355 assert.Equal(t, cmd, fakeCommandRunner.Cmd, "(testError=%v) command", testError)
356
357 assert.Equal(t, "foo", string(actualOutput), "(testError=%v) output", testError)
358 assert.Equal(t, err, testError, "(testError=%v) err", testError)
359 }
360 }
361
362 type testServiceLister struct {
363 services []*v1.Service
364 }
365
366 func (ls testServiceLister) List(labels.Selector) ([]*v1.Service, error) {
367 return ls.services, nil
368 }
369
370 type envs []kubecontainer.EnvVar
371
372 func (e envs) Len() int {
373 return len(e)
374 }
375
376 func (e envs) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
377
378 func (e envs) Less(i, j int) bool { return e[i].Name < e[j].Name }
379
380 func buildService(name, namespace, clusterIP, protocol string, port int) *v1.Service {
381 return &v1.Service{
382 ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
383 Spec: v1.ServiceSpec{
384 Ports: []v1.ServicePort{{
385 Protocol: v1.Protocol(protocol),
386 Port: int32(port),
387 }},
388 ClusterIP: clusterIP,
389 },
390 }
391 }
392
393 func TestMakeEnvironmentVariables(t *testing.T) {
394 trueVal := true
395 services := []*v1.Service{
396 buildService("kubernetes", metav1.NamespaceDefault, "1.2.3.1", "TCP", 8081),
397 buildService("test", "test1", "1.2.3.3", "TCP", 8083),
398 buildService("kubernetes", "test2", "1.2.3.4", "TCP", 8084),
399 buildService("test", "test2", "1.2.3.5", "TCP", 8085),
400 buildService("test", "test2", "None", "TCP", 8085),
401 buildService("test", "test2", "", "TCP", 8085),
402 buildService("not-special", metav1.NamespaceDefault, "1.2.3.8", "TCP", 8088),
403 buildService("not-special", metav1.NamespaceDefault, "None", "TCP", 8088),
404 buildService("not-special", metav1.NamespaceDefault, "", "TCP", 8088),
405 }
406
407 trueValue := true
408 falseValue := false
409 testCases := []struct {
410 name string
411 ns string
412 enableServiceLinks *bool
413 enableRelaxedEnvironmentVariableValidation bool
414 container *v1.Container
415 nilLister bool
416 staticPod bool
417 unsyncedServices bool
418 configMap *v1.ConfigMap
419 secret *v1.Secret
420 podIPs []string
421 expectedEnvs []kubecontainer.EnvVar
422 expectedError bool
423 expectedEvent string
424 }{
425 {
426 name: "if services aren't synced, non-static pods should fail",
427 ns: "test1",
428 enableServiceLinks: &falseValue,
429 container: &v1.Container{Env: []v1.EnvVar{}},
430 nilLister: false,
431 staticPod: false,
432 unsyncedServices: true,
433 expectedEnvs: []kubecontainer.EnvVar{},
434 expectedError: true,
435 },
436 {
437 name: "if services aren't synced, static pods should succeed",
438 ns: "test1",
439 enableServiceLinks: &falseValue,
440 container: &v1.Container{Env: []v1.EnvVar{}},
441 nilLister: false,
442 staticPod: true,
443 unsyncedServices: true,
444 },
445 {
446 name: "api server = Y, kubelet = Y",
447 ns: "test1",
448 enableServiceLinks: &falseValue,
449 container: &v1.Container{
450 Env: []v1.EnvVar{
451 {Name: "FOO", Value: "BAR"},
452 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
453 {Name: "TEST_SERVICE_PORT", Value: "8083"},
454 {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
455 {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
456 {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
457 {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
458 {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
459 },
460 },
461 nilLister: false,
462 expectedEnvs: []kubecontainer.EnvVar{
463 {Name: "FOO", Value: "BAR"},
464 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
465 {Name: "TEST_SERVICE_PORT", Value: "8083"},
466 {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
467 {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
468 {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
469 {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
470 {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
471 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
472 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
473 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
474 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
475 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
476 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
477 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
478 },
479 },
480 {
481 name: "api server = Y, kubelet = N",
482 ns: "test1",
483 enableServiceLinks: &falseValue,
484 container: &v1.Container{
485 Env: []v1.EnvVar{
486 {Name: "FOO", Value: "BAR"},
487 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
488 {Name: "TEST_SERVICE_PORT", Value: "8083"},
489 {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
490 {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
491 {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
492 {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
493 {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
494 },
495 },
496 nilLister: true,
497 expectedEnvs: []kubecontainer.EnvVar{
498 {Name: "FOO", Value: "BAR"},
499 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
500 {Name: "TEST_SERVICE_PORT", Value: "8083"},
501 {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
502 {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
503 {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
504 {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
505 {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
506 },
507 },
508 {
509 name: "api server = N; kubelet = Y",
510 ns: "test1",
511 enableServiceLinks: &falseValue,
512 container: &v1.Container{
513 Env: []v1.EnvVar{
514 {Name: "FOO", Value: "BAZ"},
515 },
516 },
517 nilLister: false,
518 expectedEnvs: []kubecontainer.EnvVar{
519 {Name: "FOO", Value: "BAZ"},
520 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
521 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
522 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
523 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
524 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
525 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
526 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
527 },
528 },
529 {
530 name: "api server = N; kubelet = Y; service env vars",
531 ns: "test1",
532 enableServiceLinks: &trueValue,
533 container: &v1.Container{
534 Env: []v1.EnvVar{
535 {Name: "FOO", Value: "BAZ"},
536 },
537 },
538 nilLister: false,
539 expectedEnvs: []kubecontainer.EnvVar{
540 {Name: "FOO", Value: "BAZ"},
541 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
542 {Name: "TEST_SERVICE_PORT", Value: "8083"},
543 {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
544 {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
545 {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
546 {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
547 {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
548 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
549 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
550 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
551 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
552 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
553 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
554 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
555 },
556 },
557 {
558 name: "master service in pod ns",
559 ns: "test2",
560 enableServiceLinks: &falseValue,
561 container: &v1.Container{
562 Env: []v1.EnvVar{
563 {Name: "FOO", Value: "ZAP"},
564 },
565 },
566 nilLister: false,
567 expectedEnvs: []kubecontainer.EnvVar{
568 {Name: "FOO", Value: "ZAP"},
569 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
570 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
571 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
572 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
573 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
574 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
575 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
576 },
577 },
578 {
579 name: "master service in pod ns, service env vars",
580 ns: "test2",
581 enableServiceLinks: &trueValue,
582 container: &v1.Container{
583 Env: []v1.EnvVar{
584 {Name: "FOO", Value: "ZAP"},
585 },
586 },
587 nilLister: false,
588 expectedEnvs: []kubecontainer.EnvVar{
589 {Name: "FOO", Value: "ZAP"},
590 {Name: "TEST_SERVICE_HOST", Value: "1.2.3.5"},
591 {Name: "TEST_SERVICE_PORT", Value: "8085"},
592 {Name: "TEST_PORT", Value: "tcp://1.2.3.5:8085"},
593 {Name: "TEST_PORT_8085_TCP", Value: "tcp://1.2.3.5:8085"},
594 {Name: "TEST_PORT_8085_TCP_PROTO", Value: "tcp"},
595 {Name: "TEST_PORT_8085_TCP_PORT", Value: "8085"},
596 {Name: "TEST_PORT_8085_TCP_ADDR", Value: "1.2.3.5"},
597 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.4"},
598 {Name: "KUBERNETES_SERVICE_PORT", Value: "8084"},
599 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.4:8084"},
600 {Name: "KUBERNETES_PORT_8084_TCP", Value: "tcp://1.2.3.4:8084"},
601 {Name: "KUBERNETES_PORT_8084_TCP_PROTO", Value: "tcp"},
602 {Name: "KUBERNETES_PORT_8084_TCP_PORT", Value: "8084"},
603 {Name: "KUBERNETES_PORT_8084_TCP_ADDR", Value: "1.2.3.4"},
604 },
605 },
606 {
607 name: "pod in master service ns",
608 ns: metav1.NamespaceDefault,
609 enableServiceLinks: &falseValue,
610 container: &v1.Container{},
611 nilLister: false,
612 expectedEnvs: []kubecontainer.EnvVar{
613 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
614 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
615 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
616 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
617 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
618 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
619 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
620 },
621 },
622 {
623 name: "pod in master service ns, service env vars",
624 ns: metav1.NamespaceDefault,
625 enableServiceLinks: &trueValue,
626 container: &v1.Container{},
627 nilLister: false,
628 expectedEnvs: []kubecontainer.EnvVar{
629 {Name: "NOT_SPECIAL_SERVICE_HOST", Value: "1.2.3.8"},
630 {Name: "NOT_SPECIAL_SERVICE_PORT", Value: "8088"},
631 {Name: "NOT_SPECIAL_PORT", Value: "tcp://1.2.3.8:8088"},
632 {Name: "NOT_SPECIAL_PORT_8088_TCP", Value: "tcp://1.2.3.8:8088"},
633 {Name: "NOT_SPECIAL_PORT_8088_TCP_PROTO", Value: "tcp"},
634 {Name: "NOT_SPECIAL_PORT_8088_TCP_PORT", Value: "8088"},
635 {Name: "NOT_SPECIAL_PORT_8088_TCP_ADDR", Value: "1.2.3.8"},
636 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
637 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
638 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
639 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
640 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
641 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
642 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
643 },
644 },
645 {
646 name: "downward api pod",
647 ns: "downward-api",
648 enableServiceLinks: &falseValue,
649 container: &v1.Container{
650 Env: []v1.EnvVar{
651 {
652 Name: "POD_NAME",
653 ValueFrom: &v1.EnvVarSource{
654 FieldRef: &v1.ObjectFieldSelector{
655 APIVersion: "v1",
656 FieldPath: "metadata.name",
657 },
658 },
659 },
660 {
661 Name: "POD_NAMESPACE",
662 ValueFrom: &v1.EnvVarSource{
663 FieldRef: &v1.ObjectFieldSelector{
664 APIVersion: "v1",
665 FieldPath: "metadata.namespace",
666 },
667 },
668 },
669 {
670 Name: "POD_NODE_NAME",
671 ValueFrom: &v1.EnvVarSource{
672 FieldRef: &v1.ObjectFieldSelector{
673 APIVersion: "v1",
674 FieldPath: "spec.nodeName",
675 },
676 },
677 },
678 {
679 Name: "POD_SERVICE_ACCOUNT_NAME",
680 ValueFrom: &v1.EnvVarSource{
681 FieldRef: &v1.ObjectFieldSelector{
682 APIVersion: "v1",
683 FieldPath: "spec.serviceAccountName",
684 },
685 },
686 },
687 {
688 Name: "POD_IP",
689 ValueFrom: &v1.EnvVarSource{
690 FieldRef: &v1.ObjectFieldSelector{
691 APIVersion: "v1",
692 FieldPath: "status.podIP",
693 },
694 },
695 },
696 {
697 Name: "POD_IPS",
698 ValueFrom: &v1.EnvVarSource{
699 FieldRef: &v1.ObjectFieldSelector{
700 APIVersion: "v1",
701 FieldPath: "status.podIPs",
702 },
703 },
704 },
705 {
706 Name: "HOST_IP",
707 ValueFrom: &v1.EnvVarSource{
708 FieldRef: &v1.ObjectFieldSelector{
709 APIVersion: "v1",
710 FieldPath: "status.hostIP",
711 },
712 },
713 },
714 {
715 Name: "HOST_IPS",
716 ValueFrom: &v1.EnvVarSource{
717 FieldRef: &v1.ObjectFieldSelector{
718 APIVersion: "v1",
719 FieldPath: "status.hostIPs",
720 },
721 },
722 },
723 },
724 },
725 podIPs: []string{"1.2.3.4", "fd00::6"},
726 nilLister: true,
727 expectedEnvs: []kubecontainer.EnvVar{
728 {Name: "POD_NAME", Value: "dapi-test-pod-name"},
729 {Name: "POD_NAMESPACE", Value: "downward-api"},
730 {Name: "POD_NODE_NAME", Value: "node-name"},
731 {Name: "POD_SERVICE_ACCOUNT_NAME", Value: "special"},
732 {Name: "POD_IP", Value: "1.2.3.4"},
733 {Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
734 {Name: "HOST_IP", Value: testKubeletHostIP},
735 {Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
736 },
737 },
738 {
739 name: "downward api pod ips reverse order",
740 ns: "downward-api",
741 enableServiceLinks: &falseValue,
742 container: &v1.Container{
743 Env: []v1.EnvVar{
744 {
745 Name: "POD_IP",
746 ValueFrom: &v1.EnvVarSource{
747 FieldRef: &v1.ObjectFieldSelector{
748 APIVersion: "v1",
749 FieldPath: "status.podIP",
750 },
751 },
752 },
753 {
754 Name: "POD_IPS",
755 ValueFrom: &v1.EnvVarSource{
756 FieldRef: &v1.ObjectFieldSelector{
757 APIVersion: "v1",
758 FieldPath: "status.podIPs",
759 },
760 },
761 },
762 {
763 Name: "HOST_IP",
764 ValueFrom: &v1.EnvVarSource{
765 FieldRef: &v1.ObjectFieldSelector{
766 APIVersion: "v1",
767 FieldPath: "status.hostIP",
768 },
769 },
770 },
771 {
772 Name: "HOST_IPS",
773 ValueFrom: &v1.EnvVarSource{
774 FieldRef: &v1.ObjectFieldSelector{
775 APIVersion: "v1",
776 FieldPath: "status.hostIPs",
777 },
778 },
779 },
780 },
781 },
782 podIPs: []string{"fd00::6", "1.2.3.4"},
783 nilLister: true,
784 expectedEnvs: []kubecontainer.EnvVar{
785 {Name: "POD_IP", Value: "1.2.3.4"},
786 {Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
787 {Name: "HOST_IP", Value: testKubeletHostIP},
788 {Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
789 },
790 },
791 {
792 name: "downward api pod ips multiple ips",
793 ns: "downward-api",
794 enableServiceLinks: &falseValue,
795 container: &v1.Container{
796 Env: []v1.EnvVar{
797 {
798 Name: "POD_IP",
799 ValueFrom: &v1.EnvVarSource{
800 FieldRef: &v1.ObjectFieldSelector{
801 APIVersion: "v1",
802 FieldPath: "status.podIP",
803 },
804 },
805 },
806 {
807 Name: "POD_IPS",
808 ValueFrom: &v1.EnvVarSource{
809 FieldRef: &v1.ObjectFieldSelector{
810 APIVersion: "v1",
811 FieldPath: "status.podIPs",
812 },
813 },
814 },
815 {
816 Name: "HOST_IP",
817 ValueFrom: &v1.EnvVarSource{
818 FieldRef: &v1.ObjectFieldSelector{
819 APIVersion: "v1",
820 FieldPath: "status.hostIP",
821 },
822 },
823 },
824 {
825 Name: "HOST_IPS",
826 ValueFrom: &v1.EnvVarSource{
827 FieldRef: &v1.ObjectFieldSelector{
828 APIVersion: "v1",
829 FieldPath: "status.hostIPs",
830 },
831 },
832 },
833 },
834 },
835 podIPs: []string{"1.2.3.4", "192.168.1.1.", "fd00::6"},
836 nilLister: true,
837 expectedEnvs: []kubecontainer.EnvVar{
838 {Name: "POD_IP", Value: "1.2.3.4"},
839 {Name: "POD_IPS", Value: "1.2.3.4,fd00::6"},
840 {Name: "HOST_IP", Value: testKubeletHostIP},
841 {Name: "HOST_IPS", Value: testKubeletHostIP + "," + testKubeletHostIPv6},
842 },
843 },
844 {
845 name: "env expansion",
846 ns: "test1",
847 enableServiceLinks: &falseValue,
848 container: &v1.Container{
849 Env: []v1.EnvVar{
850 {
851 Name: "TEST_LITERAL",
852 Value: "test-test-test",
853 },
854 {
855 Name: "POD_NAME",
856 ValueFrom: &v1.EnvVarSource{
857 FieldRef: &v1.ObjectFieldSelector{
858 APIVersion: "v1",
859 FieldPath: "metadata.name",
860 },
861 },
862 },
863 {
864 Name: "OUT_OF_ORDER_TEST",
865 Value: "$(OUT_OF_ORDER_TARGET)",
866 },
867 {
868 Name: "OUT_OF_ORDER_TARGET",
869 Value: "FOO",
870 },
871 {
872 Name: "EMPTY_VAR",
873 },
874 {
875 Name: "EMPTY_TEST",
876 Value: "foo-$(EMPTY_VAR)",
877 },
878 {
879 Name: "POD_NAME_TEST2",
880 Value: "test2-$(POD_NAME)",
881 },
882 {
883 Name: "POD_NAME_TEST3",
884 Value: "$(POD_NAME_TEST2)-3",
885 },
886 {
887 Name: "LITERAL_TEST",
888 Value: "literal-$(TEST_LITERAL)",
889 },
890 {
891 Name: "TEST_UNDEFINED",
892 Value: "$(UNDEFINED_VAR)",
893 },
894 },
895 },
896 nilLister: false,
897 expectedEnvs: []kubecontainer.EnvVar{
898 {
899 Name: "TEST_LITERAL",
900 Value: "test-test-test",
901 },
902 {
903 Name: "POD_NAME",
904 Value: "dapi-test-pod-name",
905 },
906 {
907 Name: "POD_NAME_TEST2",
908 Value: "test2-dapi-test-pod-name",
909 },
910 {
911 Name: "POD_NAME_TEST3",
912 Value: "test2-dapi-test-pod-name-3",
913 },
914 {
915 Name: "LITERAL_TEST",
916 Value: "literal-test-test-test",
917 },
918 {
919 Name: "OUT_OF_ORDER_TEST",
920 Value: "$(OUT_OF_ORDER_TARGET)",
921 },
922 {
923 Name: "OUT_OF_ORDER_TARGET",
924 Value: "FOO",
925 },
926 {
927 Name: "TEST_UNDEFINED",
928 Value: "$(UNDEFINED_VAR)",
929 },
930 {
931 Name: "EMPTY_VAR",
932 },
933 {
934 Name: "EMPTY_TEST",
935 Value: "foo-",
936 },
937 {
938 Name: "KUBERNETES_SERVICE_HOST",
939 Value: "1.2.3.1",
940 },
941 {
942 Name: "KUBERNETES_SERVICE_PORT",
943 Value: "8081",
944 },
945 {
946 Name: "KUBERNETES_PORT",
947 Value: "tcp://1.2.3.1:8081",
948 },
949 {
950 Name: "KUBERNETES_PORT_8081_TCP",
951 Value: "tcp://1.2.3.1:8081",
952 },
953 {
954 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
955 Value: "tcp",
956 },
957 {
958 Name: "KUBERNETES_PORT_8081_TCP_PORT",
959 Value: "8081",
960 },
961 {
962 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
963 Value: "1.2.3.1",
964 },
965 },
966 },
967 {
968 name: "env expansion, service env vars",
969 ns: "test1",
970 enableServiceLinks: &trueValue,
971 container: &v1.Container{
972 Env: []v1.EnvVar{
973 {
974 Name: "TEST_LITERAL",
975 Value: "test-test-test",
976 },
977 {
978 Name: "POD_NAME",
979 ValueFrom: &v1.EnvVarSource{
980 FieldRef: &v1.ObjectFieldSelector{
981 APIVersion: "v1",
982 FieldPath: "metadata.name",
983 },
984 },
985 },
986 {
987 Name: "OUT_OF_ORDER_TEST",
988 Value: "$(OUT_OF_ORDER_TARGET)",
989 },
990 {
991 Name: "OUT_OF_ORDER_TARGET",
992 Value: "FOO",
993 },
994 {
995 Name: "EMPTY_VAR",
996 },
997 {
998 Name: "EMPTY_TEST",
999 Value: "foo-$(EMPTY_VAR)",
1000 },
1001 {
1002 Name: "POD_NAME_TEST2",
1003 Value: "test2-$(POD_NAME)",
1004 },
1005 {
1006 Name: "POD_NAME_TEST3",
1007 Value: "$(POD_NAME_TEST2)-3",
1008 },
1009 {
1010 Name: "LITERAL_TEST",
1011 Value: "literal-$(TEST_LITERAL)",
1012 },
1013 {
1014 Name: "SERVICE_VAR_TEST",
1015 Value: "$(TEST_SERVICE_HOST):$(TEST_SERVICE_PORT)",
1016 },
1017 {
1018 Name: "TEST_UNDEFINED",
1019 Value: "$(UNDEFINED_VAR)",
1020 },
1021 },
1022 },
1023 nilLister: false,
1024 expectedEnvs: []kubecontainer.EnvVar{
1025 {
1026 Name: "TEST_LITERAL",
1027 Value: "test-test-test",
1028 },
1029 {
1030 Name: "POD_NAME",
1031 Value: "dapi-test-pod-name",
1032 },
1033 {
1034 Name: "POD_NAME_TEST2",
1035 Value: "test2-dapi-test-pod-name",
1036 },
1037 {
1038 Name: "POD_NAME_TEST3",
1039 Value: "test2-dapi-test-pod-name-3",
1040 },
1041 {
1042 Name: "LITERAL_TEST",
1043 Value: "literal-test-test-test",
1044 },
1045 {
1046 Name: "TEST_SERVICE_HOST",
1047 Value: "1.2.3.3",
1048 },
1049 {
1050 Name: "TEST_SERVICE_PORT",
1051 Value: "8083",
1052 },
1053 {
1054 Name: "TEST_PORT",
1055 Value: "tcp://1.2.3.3:8083",
1056 },
1057 {
1058 Name: "TEST_PORT_8083_TCP",
1059 Value: "tcp://1.2.3.3:8083",
1060 },
1061 {
1062 Name: "TEST_PORT_8083_TCP_PROTO",
1063 Value: "tcp",
1064 },
1065 {
1066 Name: "TEST_PORT_8083_TCP_PORT",
1067 Value: "8083",
1068 },
1069 {
1070 Name: "TEST_PORT_8083_TCP_ADDR",
1071 Value: "1.2.3.3",
1072 },
1073 {
1074 Name: "SERVICE_VAR_TEST",
1075 Value: "1.2.3.3:8083",
1076 },
1077 {
1078 Name: "OUT_OF_ORDER_TEST",
1079 Value: "$(OUT_OF_ORDER_TARGET)",
1080 },
1081 {
1082 Name: "OUT_OF_ORDER_TARGET",
1083 Value: "FOO",
1084 },
1085 {
1086 Name: "TEST_UNDEFINED",
1087 Value: "$(UNDEFINED_VAR)",
1088 },
1089 {
1090 Name: "EMPTY_VAR",
1091 },
1092 {
1093 Name: "EMPTY_TEST",
1094 Value: "foo-",
1095 },
1096 {
1097 Name: "KUBERNETES_SERVICE_HOST",
1098 Value: "1.2.3.1",
1099 },
1100 {
1101 Name: "KUBERNETES_SERVICE_PORT",
1102 Value: "8081",
1103 },
1104 {
1105 Name: "KUBERNETES_PORT",
1106 Value: "tcp://1.2.3.1:8081",
1107 },
1108 {
1109 Name: "KUBERNETES_PORT_8081_TCP",
1110 Value: "tcp://1.2.3.1:8081",
1111 },
1112 {
1113 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1114 Value: "tcp",
1115 },
1116 {
1117 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1118 Value: "8081",
1119 },
1120 {
1121 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1122 Value: "1.2.3.1",
1123 },
1124 },
1125 },
1126 {
1127 name: "configmapkeyref_missing_optional",
1128 ns: "test",
1129 enableServiceLinks: &falseValue,
1130 container: &v1.Container{
1131 Env: []v1.EnvVar{
1132 {
1133 Name: "POD_NAME",
1134 ValueFrom: &v1.EnvVarSource{
1135 ConfigMapKeyRef: &v1.ConfigMapKeySelector{
1136 LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"},
1137 Key: "key",
1138 Optional: &trueVal,
1139 },
1140 },
1141 },
1142 },
1143 },
1144 expectedEnvs: []kubecontainer.EnvVar{
1145 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
1146 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
1147 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
1148 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
1149 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
1150 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
1151 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
1152 },
1153 },
1154 {
1155 name: "configmapkeyref_missing_key_optional",
1156 ns: "test",
1157 enableServiceLinks: &falseValue,
1158 container: &v1.Container{
1159 Env: []v1.EnvVar{
1160 {
1161 Name: "POD_NAME",
1162 ValueFrom: &v1.EnvVarSource{
1163 ConfigMapKeyRef: &v1.ConfigMapKeySelector{
1164 LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"},
1165 Key: "key",
1166 Optional: &trueVal,
1167 },
1168 },
1169 },
1170 },
1171 },
1172 nilLister: true,
1173 configMap: &v1.ConfigMap{
1174 ObjectMeta: metav1.ObjectMeta{
1175 Namespace: "test1",
1176 Name: "test-configmap",
1177 },
1178 Data: map[string]string{
1179 "a": "b",
1180 },
1181 },
1182 expectedEnvs: nil,
1183 },
1184 {
1185 name: "secretkeyref_missing_optional",
1186 ns: "test",
1187 enableServiceLinks: &falseValue,
1188 container: &v1.Container{
1189 Env: []v1.EnvVar{
1190 {
1191 Name: "POD_NAME",
1192 ValueFrom: &v1.EnvVarSource{
1193 SecretKeyRef: &v1.SecretKeySelector{
1194 LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
1195 Key: "key",
1196 Optional: &trueVal,
1197 },
1198 },
1199 },
1200 },
1201 },
1202 expectedEnvs: []kubecontainer.EnvVar{
1203 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
1204 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
1205 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
1206 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
1207 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
1208 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
1209 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
1210 },
1211 },
1212 {
1213 name: "secretkeyref_missing_key_optional",
1214 ns: "test",
1215 enableServiceLinks: &falseValue,
1216 container: &v1.Container{
1217 Env: []v1.EnvVar{
1218 {
1219 Name: "POD_NAME",
1220 ValueFrom: &v1.EnvVarSource{
1221 SecretKeyRef: &v1.SecretKeySelector{
1222 LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"},
1223 Key: "key",
1224 Optional: &trueVal,
1225 },
1226 },
1227 },
1228 },
1229 },
1230 nilLister: true,
1231 secret: &v1.Secret{
1232 ObjectMeta: metav1.ObjectMeta{
1233 Namespace: "test1",
1234 Name: "test-secret",
1235 },
1236 Data: map[string][]byte{
1237 "a": []byte("b"),
1238 },
1239 },
1240 expectedEnvs: nil,
1241 },
1242 {
1243 name: "configmap",
1244 ns: "test1",
1245 enableServiceLinks: &falseValue,
1246 container: &v1.Container{
1247 EnvFrom: []v1.EnvFromSource{
1248 {
1249 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1250 },
1251 {
1252 Prefix: "p_",
1253 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1254 },
1255 },
1256 Env: []v1.EnvVar{
1257 {
1258 Name: "TEST_LITERAL",
1259 Value: "test-test-test",
1260 },
1261 {
1262 Name: "EXPANSION_TEST",
1263 Value: "$(REPLACE_ME)",
1264 },
1265 {
1266 Name: "DUPE_TEST",
1267 Value: "ENV_VAR",
1268 },
1269 },
1270 },
1271 nilLister: false,
1272 configMap: &v1.ConfigMap{
1273 ObjectMeta: metav1.ObjectMeta{
1274 Namespace: "test1",
1275 Name: "test-configmap",
1276 },
1277 Data: map[string]string{
1278 "REPLACE_ME": "FROM_CONFIG_MAP",
1279 "DUPE_TEST": "CONFIG_MAP",
1280 },
1281 },
1282 expectedEnvs: []kubecontainer.EnvVar{
1283 {
1284 Name: "TEST_LITERAL",
1285 Value: "test-test-test",
1286 },
1287 {
1288 Name: "REPLACE_ME",
1289 Value: "FROM_CONFIG_MAP",
1290 },
1291 {
1292 Name: "EXPANSION_TEST",
1293 Value: "FROM_CONFIG_MAP",
1294 },
1295 {
1296 Name: "DUPE_TEST",
1297 Value: "ENV_VAR",
1298 },
1299 {
1300 Name: "p_REPLACE_ME",
1301 Value: "FROM_CONFIG_MAP",
1302 },
1303 {
1304 Name: "p_DUPE_TEST",
1305 Value: "CONFIG_MAP",
1306 },
1307 {
1308 Name: "KUBERNETES_SERVICE_HOST",
1309 Value: "1.2.3.1",
1310 },
1311 {
1312 Name: "KUBERNETES_SERVICE_PORT",
1313 Value: "8081",
1314 },
1315 {
1316 Name: "KUBERNETES_PORT",
1317 Value: "tcp://1.2.3.1:8081",
1318 },
1319 {
1320 Name: "KUBERNETES_PORT_8081_TCP",
1321 Value: "tcp://1.2.3.1:8081",
1322 },
1323 {
1324 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1325 Value: "tcp",
1326 },
1327 {
1328 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1329 Value: "8081",
1330 },
1331 {
1332 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1333 Value: "1.2.3.1",
1334 },
1335 },
1336 },
1337 {
1338 name: "configmap allow prefix to start with a digital",
1339 ns: "test1",
1340 enableServiceLinks: &falseValue,
1341 enableRelaxedEnvironmentVariableValidation: true,
1342 container: &v1.Container{
1343 EnvFrom: []v1.EnvFromSource{
1344 {
1345 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1346 },
1347 {
1348 Prefix: "1_",
1349 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1350 },
1351 },
1352 Env: []v1.EnvVar{
1353 {
1354 Name: "TEST_LITERAL",
1355 Value: "test-test-test",
1356 },
1357 {
1358 Name: "EXPANSION_TEST",
1359 Value: "$(REPLACE_ME)",
1360 },
1361 {
1362 Name: "DUPE_TEST",
1363 Value: "ENV_VAR",
1364 },
1365 },
1366 },
1367 nilLister: false,
1368 configMap: &v1.ConfigMap{
1369 ObjectMeta: metav1.ObjectMeta{
1370 Namespace: "test1",
1371 Name: "test-configmap",
1372 },
1373 Data: map[string]string{
1374 "REPLACE_ME": "FROM_CONFIG_MAP",
1375 "DUPE_TEST": "CONFIG_MAP",
1376 },
1377 },
1378 expectedEnvs: []kubecontainer.EnvVar{
1379 {
1380 Name: "TEST_LITERAL",
1381 Value: "test-test-test",
1382 },
1383 {
1384 Name: "REPLACE_ME",
1385 Value: "FROM_CONFIG_MAP",
1386 },
1387 {
1388 Name: "EXPANSION_TEST",
1389 Value: "FROM_CONFIG_MAP",
1390 },
1391 {
1392 Name: "DUPE_TEST",
1393 Value: "ENV_VAR",
1394 },
1395 {
1396 Name: "1_REPLACE_ME",
1397 Value: "FROM_CONFIG_MAP",
1398 },
1399 {
1400 Name: "1_DUPE_TEST",
1401 Value: "CONFIG_MAP",
1402 },
1403 {
1404 Name: "KUBERNETES_SERVICE_HOST",
1405 Value: "1.2.3.1",
1406 },
1407 {
1408 Name: "KUBERNETES_SERVICE_PORT",
1409 Value: "8081",
1410 },
1411 {
1412 Name: "KUBERNETES_PORT",
1413 Value: "tcp://1.2.3.1:8081",
1414 },
1415 {
1416 Name: "KUBERNETES_PORT_8081_TCP",
1417 Value: "tcp://1.2.3.1:8081",
1418 },
1419 {
1420 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1421 Value: "tcp",
1422 },
1423 {
1424 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1425 Value: "8081",
1426 },
1427 {
1428 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1429 Value: "1.2.3.1",
1430 },
1431 },
1432 },
1433 {
1434 name: "configmap, service env vars",
1435 ns: "test1",
1436 enableServiceLinks: &trueValue,
1437 container: &v1.Container{
1438 EnvFrom: []v1.EnvFromSource{
1439 {
1440 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1441 },
1442 {
1443 Prefix: "p_",
1444 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1445 },
1446 },
1447 Env: []v1.EnvVar{
1448 {
1449 Name: "TEST_LITERAL",
1450 Value: "test-test-test",
1451 },
1452 {
1453 Name: "EXPANSION_TEST",
1454 Value: "$(REPLACE_ME)",
1455 },
1456 {
1457 Name: "DUPE_TEST",
1458 Value: "ENV_VAR",
1459 },
1460 },
1461 },
1462 nilLister: false,
1463 configMap: &v1.ConfigMap{
1464 ObjectMeta: metav1.ObjectMeta{
1465 Namespace: "test1",
1466 Name: "test-configmap",
1467 },
1468 Data: map[string]string{
1469 "REPLACE_ME": "FROM_CONFIG_MAP",
1470 "DUPE_TEST": "CONFIG_MAP",
1471 },
1472 },
1473 expectedEnvs: []kubecontainer.EnvVar{
1474 {
1475 Name: "TEST_LITERAL",
1476 Value: "test-test-test",
1477 },
1478 {
1479 Name: "TEST_SERVICE_HOST",
1480 Value: "1.2.3.3",
1481 },
1482 {
1483 Name: "TEST_SERVICE_PORT",
1484 Value: "8083",
1485 },
1486 {
1487 Name: "TEST_PORT",
1488 Value: "tcp://1.2.3.3:8083",
1489 },
1490 {
1491 Name: "TEST_PORT_8083_TCP",
1492 Value: "tcp://1.2.3.3:8083",
1493 },
1494 {
1495 Name: "TEST_PORT_8083_TCP_PROTO",
1496 Value: "tcp",
1497 },
1498 {
1499 Name: "TEST_PORT_8083_TCP_PORT",
1500 Value: "8083",
1501 },
1502 {
1503 Name: "TEST_PORT_8083_TCP_ADDR",
1504 Value: "1.2.3.3",
1505 },
1506 {
1507 Name: "REPLACE_ME",
1508 Value: "FROM_CONFIG_MAP",
1509 },
1510 {
1511 Name: "EXPANSION_TEST",
1512 Value: "FROM_CONFIG_MAP",
1513 },
1514 {
1515 Name: "DUPE_TEST",
1516 Value: "ENV_VAR",
1517 },
1518 {
1519 Name: "p_REPLACE_ME",
1520 Value: "FROM_CONFIG_MAP",
1521 },
1522 {
1523 Name: "p_DUPE_TEST",
1524 Value: "CONFIG_MAP",
1525 },
1526 {
1527 Name: "KUBERNETES_SERVICE_HOST",
1528 Value: "1.2.3.1",
1529 },
1530 {
1531 Name: "KUBERNETES_SERVICE_PORT",
1532 Value: "8081",
1533 },
1534 {
1535 Name: "KUBERNETES_PORT",
1536 Value: "tcp://1.2.3.1:8081",
1537 },
1538 {
1539 Name: "KUBERNETES_PORT_8081_TCP",
1540 Value: "tcp://1.2.3.1:8081",
1541 },
1542 {
1543 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1544 Value: "tcp",
1545 },
1546 {
1547 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1548 Value: "8081",
1549 },
1550 {
1551 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1552 Value: "1.2.3.1",
1553 },
1554 },
1555 },
1556 {
1557 name: "configmap_missing",
1558 ns: "test1",
1559 enableServiceLinks: &falseValue,
1560 container: &v1.Container{
1561 EnvFrom: []v1.EnvFromSource{
1562 {ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}}},
1563 },
1564 },
1565 expectedError: true,
1566 },
1567 {
1568 name: "configmap_missing_optional",
1569 ns: "test",
1570 enableServiceLinks: &falseValue,
1571 container: &v1.Container{
1572 EnvFrom: []v1.EnvFromSource{
1573 {ConfigMapRef: &v1.ConfigMapEnvSource{
1574 Optional: &trueVal,
1575 LocalObjectReference: v1.LocalObjectReference{Name: "missing-config-map"}}},
1576 },
1577 },
1578 expectedEnvs: []kubecontainer.EnvVar{
1579 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
1580 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
1581 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
1582 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
1583 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
1584 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
1585 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
1586 },
1587 },
1588 {
1589 name: "configmap_invalid_keys_valid",
1590 ns: "test",
1591 enableServiceLinks: &falseValue,
1592 container: &v1.Container{
1593 EnvFrom: []v1.EnvFromSource{
1594 {
1595 Prefix: "p_",
1596 ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-config-map"}},
1597 },
1598 },
1599 },
1600 configMap: &v1.ConfigMap{
1601 ObjectMeta: metav1.ObjectMeta{
1602 Namespace: "test1",
1603 Name: "test-configmap",
1604 },
1605 Data: map[string]string{
1606 "1234": "abc",
1607 },
1608 },
1609 expectedEnvs: []kubecontainer.EnvVar{
1610 {
1611 Name: "p_1234",
1612 Value: "abc",
1613 },
1614 {
1615 Name: "KUBERNETES_SERVICE_HOST",
1616 Value: "1.2.3.1",
1617 },
1618 {
1619 Name: "KUBERNETES_SERVICE_PORT",
1620 Value: "8081",
1621 },
1622 {
1623 Name: "KUBERNETES_PORT",
1624 Value: "tcp://1.2.3.1:8081",
1625 },
1626 {
1627 Name: "KUBERNETES_PORT_8081_TCP",
1628 Value: "tcp://1.2.3.1:8081",
1629 },
1630 {
1631 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1632 Value: "tcp",
1633 },
1634 {
1635 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1636 Value: "8081",
1637 },
1638 {
1639 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1640 Value: "1.2.3.1",
1641 },
1642 },
1643 },
1644 {
1645 name: "secret",
1646 ns: "test1",
1647 enableServiceLinks: &falseValue,
1648 container: &v1.Container{
1649 EnvFrom: []v1.EnvFromSource{
1650 {
1651 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1652 },
1653 {
1654 Prefix: "p_",
1655 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1656 },
1657 },
1658 Env: []v1.EnvVar{
1659 {
1660 Name: "TEST_LITERAL",
1661 Value: "test-test-test",
1662 },
1663 {
1664 Name: "EXPANSION_TEST",
1665 Value: "$(REPLACE_ME)",
1666 },
1667 {
1668 Name: "DUPE_TEST",
1669 Value: "ENV_VAR",
1670 },
1671 },
1672 },
1673 nilLister: false,
1674 secret: &v1.Secret{
1675 ObjectMeta: metav1.ObjectMeta{
1676 Namespace: "test1",
1677 Name: "test-secret",
1678 },
1679 Data: map[string][]byte{
1680 "REPLACE_ME": []byte("FROM_SECRET"),
1681 "DUPE_TEST": []byte("SECRET"),
1682 },
1683 },
1684 expectedEnvs: []kubecontainer.EnvVar{
1685 {
1686 Name: "TEST_LITERAL",
1687 Value: "test-test-test",
1688 },
1689 {
1690 Name: "REPLACE_ME",
1691 Value: "FROM_SECRET",
1692 },
1693 {
1694 Name: "EXPANSION_TEST",
1695 Value: "FROM_SECRET",
1696 },
1697 {
1698 Name: "DUPE_TEST",
1699 Value: "ENV_VAR",
1700 },
1701 {
1702 Name: "p_REPLACE_ME",
1703 Value: "FROM_SECRET",
1704 },
1705 {
1706 Name: "p_DUPE_TEST",
1707 Value: "SECRET",
1708 },
1709 {
1710 Name: "KUBERNETES_SERVICE_HOST",
1711 Value: "1.2.3.1",
1712 },
1713 {
1714 Name: "KUBERNETES_SERVICE_PORT",
1715 Value: "8081",
1716 },
1717 {
1718 Name: "KUBERNETES_PORT",
1719 Value: "tcp://1.2.3.1:8081",
1720 },
1721 {
1722 Name: "KUBERNETES_PORT_8081_TCP",
1723 Value: "tcp://1.2.3.1:8081",
1724 },
1725 {
1726 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1727 Value: "tcp",
1728 },
1729 {
1730 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1731 Value: "8081",
1732 },
1733 {
1734 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1735 Value: "1.2.3.1",
1736 },
1737 },
1738 },
1739 {
1740 name: "secret, service env vars",
1741 ns: "test1",
1742 enableServiceLinks: &trueValue,
1743 container: &v1.Container{
1744 EnvFrom: []v1.EnvFromSource{
1745 {
1746 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1747 },
1748 {
1749 Prefix: "p_",
1750 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1751 },
1752 },
1753 Env: []v1.EnvVar{
1754 {
1755 Name: "TEST_LITERAL",
1756 Value: "test-test-test",
1757 },
1758 {
1759 Name: "EXPANSION_TEST",
1760 Value: "$(REPLACE_ME)",
1761 },
1762 {
1763 Name: "DUPE_TEST",
1764 Value: "ENV_VAR",
1765 },
1766 },
1767 },
1768 nilLister: false,
1769 secret: &v1.Secret{
1770 ObjectMeta: metav1.ObjectMeta{
1771 Namespace: "test1",
1772 Name: "test-secret",
1773 },
1774 Data: map[string][]byte{
1775 "REPLACE_ME": []byte("FROM_SECRET"),
1776 "DUPE_TEST": []byte("SECRET"),
1777 },
1778 },
1779 expectedEnvs: []kubecontainer.EnvVar{
1780 {
1781 Name: "TEST_LITERAL",
1782 Value: "test-test-test",
1783 },
1784 {
1785 Name: "TEST_SERVICE_HOST",
1786 Value: "1.2.3.3",
1787 },
1788 {
1789 Name: "TEST_SERVICE_PORT",
1790 Value: "8083",
1791 },
1792 {
1793 Name: "TEST_PORT",
1794 Value: "tcp://1.2.3.3:8083",
1795 },
1796 {
1797 Name: "TEST_PORT_8083_TCP",
1798 Value: "tcp://1.2.3.3:8083",
1799 },
1800 {
1801 Name: "TEST_PORT_8083_TCP_PROTO",
1802 Value: "tcp",
1803 },
1804 {
1805 Name: "TEST_PORT_8083_TCP_PORT",
1806 Value: "8083",
1807 },
1808 {
1809 Name: "TEST_PORT_8083_TCP_ADDR",
1810 Value: "1.2.3.3",
1811 },
1812 {
1813 Name: "REPLACE_ME",
1814 Value: "FROM_SECRET",
1815 },
1816 {
1817 Name: "EXPANSION_TEST",
1818 Value: "FROM_SECRET",
1819 },
1820 {
1821 Name: "DUPE_TEST",
1822 Value: "ENV_VAR",
1823 },
1824 {
1825 Name: "p_REPLACE_ME",
1826 Value: "FROM_SECRET",
1827 },
1828 {
1829 Name: "p_DUPE_TEST",
1830 Value: "SECRET",
1831 },
1832 {
1833 Name: "KUBERNETES_SERVICE_HOST",
1834 Value: "1.2.3.1",
1835 },
1836 {
1837 Name: "KUBERNETES_SERVICE_PORT",
1838 Value: "8081",
1839 },
1840 {
1841 Name: "KUBERNETES_PORT",
1842 Value: "tcp://1.2.3.1:8081",
1843 },
1844 {
1845 Name: "KUBERNETES_PORT_8081_TCP",
1846 Value: "tcp://1.2.3.1:8081",
1847 },
1848 {
1849 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1850 Value: "tcp",
1851 },
1852 {
1853 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1854 Value: "8081",
1855 },
1856 {
1857 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1858 Value: "1.2.3.1",
1859 },
1860 },
1861 },
1862 {
1863 name: "secret_missing",
1864 ns: "test1",
1865 enableServiceLinks: &falseValue,
1866 container: &v1.Container{
1867 EnvFrom: []v1.EnvFromSource{
1868 {SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}}},
1869 },
1870 },
1871 expectedError: true,
1872 },
1873 {
1874 name: "secret_missing_optional",
1875 ns: "test",
1876 enableServiceLinks: &falseValue,
1877 container: &v1.Container{
1878 EnvFrom: []v1.EnvFromSource{
1879 {SecretRef: &v1.SecretEnvSource{
1880 LocalObjectReference: v1.LocalObjectReference{Name: "missing-secret"},
1881 Optional: &trueVal}},
1882 },
1883 },
1884 expectedEnvs: []kubecontainer.EnvVar{
1885 {Name: "KUBERNETES_SERVICE_HOST", Value: "1.2.3.1"},
1886 {Name: "KUBERNETES_SERVICE_PORT", Value: "8081"},
1887 {Name: "KUBERNETES_PORT", Value: "tcp://1.2.3.1:8081"},
1888 {Name: "KUBERNETES_PORT_8081_TCP", Value: "tcp://1.2.3.1:8081"},
1889 {Name: "KUBERNETES_PORT_8081_TCP_PROTO", Value: "tcp"},
1890 {Name: "KUBERNETES_PORT_8081_TCP_PORT", Value: "8081"},
1891 {Name: "KUBERNETES_PORT_8081_TCP_ADDR", Value: "1.2.3.1"},
1892 },
1893 },
1894 {
1895 name: "secret_invalid_keys_valid",
1896 ns: "test",
1897 enableServiceLinks: &falseValue,
1898 container: &v1.Container{
1899 EnvFrom: []v1.EnvFromSource{
1900 {
1901 Prefix: "p_",
1902 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1903 },
1904 },
1905 },
1906 secret: &v1.Secret{
1907 ObjectMeta: metav1.ObjectMeta{
1908 Namespace: "test1",
1909 Name: "test-secret",
1910 },
1911 Data: map[string][]byte{
1912 "1234.name": []byte("abc"),
1913 },
1914 },
1915 expectedEnvs: []kubecontainer.EnvVar{
1916 {
1917 Name: "p_1234.name",
1918 Value: "abc",
1919 },
1920 {
1921 Name: "KUBERNETES_SERVICE_HOST",
1922 Value: "1.2.3.1",
1923 },
1924 {
1925 Name: "KUBERNETES_SERVICE_PORT",
1926 Value: "8081",
1927 },
1928 {
1929 Name: "KUBERNETES_PORT",
1930 Value: "tcp://1.2.3.1:8081",
1931 },
1932 {
1933 Name: "KUBERNETES_PORT_8081_TCP",
1934 Value: "tcp://1.2.3.1:8081",
1935 },
1936 {
1937 Name: "KUBERNETES_PORT_8081_TCP_PROTO",
1938 Value: "tcp",
1939 },
1940 {
1941 Name: "KUBERNETES_PORT_8081_TCP_PORT",
1942 Value: "8081",
1943 },
1944 {
1945 Name: "KUBERNETES_PORT_8081_TCP_ADDR",
1946 Value: "1.2.3.1",
1947 },
1948 },
1949 },
1950 {
1951 name: "nil_enableServiceLinks",
1952 ns: "test",
1953 enableServiceLinks: nil,
1954 container: &v1.Container{
1955 EnvFrom: []v1.EnvFromSource{
1956 {
1957 Prefix: "p_",
1958 SecretRef: &v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: "test-secret"}},
1959 },
1960 },
1961 },
1962 secret: &v1.Secret{
1963 ObjectMeta: metav1.ObjectMeta{
1964 Namespace: "test1",
1965 Name: "test-secret",
1966 },
1967 Data: map[string][]byte{
1968 "1234.name": []byte("abc"),
1969 },
1970 },
1971 expectedError: true,
1972 },
1973 }
1974
1975 for _, tc := range testCases {
1976 t.Run(tc.name, func(t *testing.T) {
1977 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.RelaxedEnvironmentVariableValidation, tc.enableRelaxedEnvironmentVariableValidation)()
1978
1979 fakeRecorder := record.NewFakeRecorder(1)
1980 testKubelet := newTestKubelet(t, false )
1981 testKubelet.kubelet.recorder = fakeRecorder
1982 defer testKubelet.Cleanup()
1983 kl := testKubelet.kubelet
1984 if tc.nilLister {
1985 kl.serviceLister = nil
1986 } else if tc.unsyncedServices {
1987 kl.serviceLister = testServiceLister{}
1988 kl.serviceHasSynced = func() bool { return false }
1989 } else {
1990 kl.serviceLister = testServiceLister{services}
1991 kl.serviceHasSynced = func() bool { return true }
1992 }
1993
1994 testKubelet.fakeKubeClient.AddReactor("get", "configmaps", func(action core.Action) (bool, runtime.Object, error) {
1995 var err error
1996 if tc.configMap == nil {
1997 err = apierrors.NewNotFound(action.GetResource().GroupResource(), "configmap-name")
1998 }
1999 return true, tc.configMap, err
2000 })
2001 testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
2002 var err error
2003 if tc.secret == nil {
2004 err = apierrors.NewNotFound(action.GetResource().GroupResource(), "secret-name")
2005 }
2006 return true, tc.secret, err
2007 })
2008
2009 testKubelet.fakeKubeClient.AddReactor("get", "secrets", func(action core.Action) (bool, runtime.Object, error) {
2010 var err error
2011 if tc.secret == nil {
2012 err = errors.New("no secret defined")
2013 }
2014 return true, tc.secret, err
2015 })
2016
2017 testPod := &v1.Pod{
2018 ObjectMeta: metav1.ObjectMeta{
2019 Namespace: tc.ns,
2020 Name: "dapi-test-pod-name",
2021 Annotations: map[string]string{},
2022 },
2023 Spec: v1.PodSpec{
2024 ServiceAccountName: "special",
2025 NodeName: "node-name",
2026 EnableServiceLinks: tc.enableServiceLinks,
2027 },
2028 }
2029 podIP := ""
2030 if len(tc.podIPs) > 0 {
2031 podIP = tc.podIPs[0]
2032 }
2033 if tc.staticPod {
2034 testPod.Annotations[kubetypes.ConfigSourceAnnotationKey] = "file"
2035 }
2036
2037 result, err := kl.makeEnvironmentVariables(testPod, tc.container, podIP, tc.podIPs)
2038 select {
2039 case e := <-fakeRecorder.Events:
2040 assert.Equal(t, tc.expectedEvent, e)
2041 default:
2042 assert.Equal(t, "", tc.expectedEvent)
2043 }
2044 if tc.expectedError {
2045 assert.Error(t, err, tc.name)
2046 } else {
2047 assert.NoError(t, err, "[%s]", tc.name)
2048
2049 sort.Sort(envs(result))
2050 sort.Sort(envs(tc.expectedEnvs))
2051 assert.Equal(t, tc.expectedEnvs, result, "[%s] env entries", tc.name)
2052 }
2053 })
2054
2055 }
2056 }
2057
2058 func waitingState(cName string) v1.ContainerStatus {
2059 return waitingStateWithReason(cName, "")
2060 }
2061 func waitingStateWithReason(cName, reason string) v1.ContainerStatus {
2062 return v1.ContainerStatus{
2063 Name: cName,
2064 State: v1.ContainerState{
2065 Waiting: &v1.ContainerStateWaiting{Reason: reason},
2066 },
2067 }
2068 }
2069 func waitingStateWithLastTermination(cName string) v1.ContainerStatus {
2070 return v1.ContainerStatus{
2071 Name: cName,
2072 State: v1.ContainerState{
2073 Waiting: &v1.ContainerStateWaiting{},
2074 },
2075 LastTerminationState: v1.ContainerState{
2076 Terminated: &v1.ContainerStateTerminated{
2077 ExitCode: 0,
2078 },
2079 },
2080 }
2081 }
2082 func waitingStateWithNonZeroTermination(cName string) v1.ContainerStatus {
2083 return v1.ContainerStatus{
2084 Name: cName,
2085 State: v1.ContainerState{
2086 Waiting: &v1.ContainerStateWaiting{},
2087 },
2088 LastTerminationState: v1.ContainerState{
2089 Terminated: &v1.ContainerStateTerminated{
2090 ExitCode: -1,
2091 },
2092 },
2093 }
2094 }
2095 func runningState(cName string) v1.ContainerStatus {
2096 return v1.ContainerStatus{
2097 Name: cName,
2098 State: v1.ContainerState{
2099 Running: &v1.ContainerStateRunning{},
2100 },
2101 }
2102 }
2103 func startedState(cName string) v1.ContainerStatus {
2104 started := true
2105 return v1.ContainerStatus{
2106 Name: cName,
2107 State: v1.ContainerState{
2108 Running: &v1.ContainerStateRunning{},
2109 },
2110 Started: &started,
2111 }
2112 }
2113 func runningStateWithStartedAt(cName string, startedAt time.Time) v1.ContainerStatus {
2114 return v1.ContainerStatus{
2115 Name: cName,
2116 State: v1.ContainerState{
2117 Running: &v1.ContainerStateRunning{StartedAt: metav1.Time{Time: startedAt}},
2118 },
2119 }
2120 }
2121 func stoppedState(cName string) v1.ContainerStatus {
2122 return v1.ContainerStatus{
2123 Name: cName,
2124 State: v1.ContainerState{
2125 Terminated: &v1.ContainerStateTerminated{},
2126 },
2127 }
2128 }
2129 func succeededState(cName string) v1.ContainerStatus {
2130 return v1.ContainerStatus{
2131 Name: cName,
2132 State: v1.ContainerState{
2133 Terminated: &v1.ContainerStateTerminated{
2134 ExitCode: 0,
2135 },
2136 },
2137 }
2138 }
2139 func failedState(cName string) v1.ContainerStatus {
2140 return v1.ContainerStatus{
2141 Name: cName,
2142 State: v1.ContainerState{
2143 Terminated: &v1.ContainerStateTerminated{
2144 ExitCode: -1,
2145 },
2146 },
2147 }
2148 }
2149 func waitingWithLastTerminationUnknown(cName string, restartCount int32) v1.ContainerStatus {
2150 return v1.ContainerStatus{
2151 Name: cName,
2152 State: v1.ContainerState{
2153 Waiting: &v1.ContainerStateWaiting{Reason: "ContainerCreating"},
2154 },
2155 LastTerminationState: v1.ContainerState{
2156 Terminated: &v1.ContainerStateTerminated{
2157 Reason: "ContainerStatusUnknown",
2158 Message: "The container could not be located when the pod was deleted. The container used to be Running",
2159 ExitCode: 137,
2160 },
2161 },
2162 RestartCount: restartCount,
2163 }
2164 }
2165 func ready(status v1.ContainerStatus) v1.ContainerStatus {
2166 status.Ready = true
2167 return status
2168 }
2169 func withID(status v1.ContainerStatus, id string) v1.ContainerStatus {
2170 status.ContainerID = id
2171 return status
2172 }
2173
2174 func TestPodPhaseWithRestartAlways(t *testing.T) {
2175 desiredState := v1.PodSpec{
2176 NodeName: "machine",
2177 Containers: []v1.Container{
2178 {Name: "containerA"},
2179 {Name: "containerB"},
2180 },
2181 RestartPolicy: v1.RestartPolicyAlways,
2182 }
2183
2184 tests := []struct {
2185 pod *v1.Pod
2186 podIsTerminal bool
2187 status v1.PodPhase
2188 test string
2189 }{
2190 {
2191 &v1.Pod{Spec: desiredState, Status: v1.PodStatus{}},
2192 false,
2193 v1.PodPending,
2194 "waiting",
2195 },
2196 {
2197 &v1.Pod{
2198 Spec: desiredState,
2199 Status: v1.PodStatus{
2200 ContainerStatuses: []v1.ContainerStatus{
2201 runningState("containerA"),
2202 runningState("containerB"),
2203 },
2204 },
2205 },
2206 false,
2207 v1.PodRunning,
2208 "all running",
2209 },
2210 {
2211 &v1.Pod{
2212 Spec: desiredState,
2213 Status: v1.PodStatus{
2214 ContainerStatuses: []v1.ContainerStatus{
2215 stoppedState("containerA"),
2216 stoppedState("containerB"),
2217 },
2218 },
2219 },
2220 false,
2221 v1.PodRunning,
2222 "all stopped with restart always",
2223 },
2224 {
2225 &v1.Pod{
2226 Spec: desiredState,
2227 Status: v1.PodStatus{
2228 ContainerStatuses: []v1.ContainerStatus{
2229 succeededState("containerA"),
2230 succeededState("containerB"),
2231 },
2232 },
2233 },
2234 true,
2235 v1.PodSucceeded,
2236 "all succeeded with restart always, but the pod is terminal",
2237 },
2238 {
2239 &v1.Pod{
2240 Spec: desiredState,
2241 Status: v1.PodStatus{
2242 ContainerStatuses: []v1.ContainerStatus{
2243 succeededState("containerA"),
2244 failedState("containerB"),
2245 },
2246 },
2247 },
2248 true,
2249 v1.PodFailed,
2250 "all stopped with restart always, but the pod is terminal",
2251 },
2252 {
2253 &v1.Pod{
2254 Spec: desiredState,
2255 Status: v1.PodStatus{
2256 ContainerStatuses: []v1.ContainerStatus{
2257 runningState("containerA"),
2258 stoppedState("containerB"),
2259 },
2260 },
2261 },
2262 false,
2263 v1.PodRunning,
2264 "mixed state #1 with restart always",
2265 },
2266 {
2267 &v1.Pod{
2268 Spec: desiredState,
2269 Status: v1.PodStatus{
2270 ContainerStatuses: []v1.ContainerStatus{
2271 runningState("containerA"),
2272 },
2273 },
2274 },
2275 false,
2276 v1.PodPending,
2277 "mixed state #2 with restart always",
2278 },
2279 {
2280 &v1.Pod{
2281 Spec: desiredState,
2282 Status: v1.PodStatus{
2283 ContainerStatuses: []v1.ContainerStatus{
2284 runningState("containerA"),
2285 waitingState("containerB"),
2286 },
2287 },
2288 },
2289 false,
2290 v1.PodPending,
2291 "mixed state #3 with restart always",
2292 },
2293 {
2294 &v1.Pod{
2295 Spec: desiredState,
2296 Status: v1.PodStatus{
2297 ContainerStatuses: []v1.ContainerStatus{
2298 runningState("containerA"),
2299 waitingStateWithLastTermination("containerB"),
2300 },
2301 },
2302 },
2303 false,
2304 v1.PodRunning,
2305 "backoff crashloop container with restart always",
2306 },
2307 }
2308 for _, test := range tests {
2309 status := getPhase(test.pod, test.pod.Status.ContainerStatuses, test.podIsTerminal)
2310 assert.Equal(t, test.status, status, "[test %s]", test.test)
2311 }
2312 }
2313
2314 func TestPodPhaseWithRestartAlwaysInitContainers(t *testing.T) {
2315 desiredState := v1.PodSpec{
2316 NodeName: "machine",
2317 InitContainers: []v1.Container{
2318 {Name: "containerX"},
2319 },
2320 Containers: []v1.Container{
2321 {Name: "containerA"},
2322 {Name: "containerB"},
2323 },
2324 RestartPolicy: v1.RestartPolicyAlways,
2325 }
2326
2327 tests := []struct {
2328 pod *v1.Pod
2329 status v1.PodPhase
2330 test string
2331 }{
2332 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
2333 {
2334 &v1.Pod{
2335 Spec: desiredState,
2336 Status: v1.PodStatus{
2337 InitContainerStatuses: []v1.ContainerStatus{
2338 runningState("containerX"),
2339 },
2340 },
2341 },
2342 v1.PodPending,
2343 "init container running",
2344 },
2345 {
2346 &v1.Pod{
2347 Spec: desiredState,
2348 Status: v1.PodStatus{
2349 InitContainerStatuses: []v1.ContainerStatus{
2350 failedState("containerX"),
2351 },
2352 },
2353 },
2354 v1.PodPending,
2355 "init container terminated non-zero",
2356 },
2357 {
2358 &v1.Pod{
2359 Spec: desiredState,
2360 Status: v1.PodStatus{
2361 InitContainerStatuses: []v1.ContainerStatus{
2362 waitingStateWithLastTermination("containerX"),
2363 },
2364 },
2365 },
2366 v1.PodPending,
2367 "init container waiting, terminated zero",
2368 },
2369 {
2370 &v1.Pod{
2371 Spec: desiredState,
2372 Status: v1.PodStatus{
2373 InitContainerStatuses: []v1.ContainerStatus{
2374 waitingStateWithNonZeroTermination("containerX"),
2375 },
2376 },
2377 },
2378 v1.PodPending,
2379 "init container waiting, terminated non-zero",
2380 },
2381 {
2382 &v1.Pod{
2383 Spec: desiredState,
2384 Status: v1.PodStatus{
2385 InitContainerStatuses: []v1.ContainerStatus{
2386 waitingState("containerX"),
2387 },
2388 },
2389 },
2390 v1.PodPending,
2391 "init container waiting, not terminated",
2392 },
2393 {
2394 &v1.Pod{
2395 Spec: desiredState,
2396 Status: v1.PodStatus{
2397 InitContainerStatuses: []v1.ContainerStatus{
2398 succeededState("containerX"),
2399 },
2400 ContainerStatuses: []v1.ContainerStatus{
2401 runningState("containerA"),
2402 runningState("containerB"),
2403 },
2404 },
2405 },
2406 v1.PodRunning,
2407 "init container succeeded",
2408 },
2409 }
2410 for _, test := range tests {
2411 statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
2412 status := getPhase(test.pod, statusInfo, false)
2413 assert.Equal(t, test.status, status, "[test %s]", test.test)
2414 }
2415 }
2416
2417 func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
2418 desiredState := v1.PodSpec{
2419 NodeName: "machine",
2420 InitContainers: []v1.Container{
2421 {Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
2422 },
2423 Containers: []v1.Container{
2424 {Name: "containerA"},
2425 {Name: "containerB"},
2426 },
2427 RestartPolicy: v1.RestartPolicyAlways,
2428 }
2429
2430 tests := []struct {
2431 pod *v1.Pod
2432 podIsTerminal bool
2433 status v1.PodPhase
2434 test string
2435 }{
2436 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, false, v1.PodPending, "empty, waiting"},
2437 {
2438 &v1.Pod{
2439 Spec: desiredState,
2440 Status: v1.PodStatus{
2441 InitContainerStatuses: []v1.ContainerStatus{
2442 runningState("containerX"),
2443 },
2444 },
2445 },
2446 false,
2447 v1.PodPending,
2448 "restartable init container running",
2449 },
2450 {
2451 &v1.Pod{
2452 Spec: desiredState,
2453 Status: v1.PodStatus{
2454 InitContainerStatuses: []v1.ContainerStatus{
2455 stoppedState("containerX"),
2456 },
2457 },
2458 },
2459 false,
2460 v1.PodPending,
2461 "restartable init container stopped",
2462 },
2463 {
2464 &v1.Pod{
2465 Spec: desiredState,
2466 Status: v1.PodStatus{
2467 InitContainerStatuses: []v1.ContainerStatus{
2468 waitingStateWithLastTermination("containerX"),
2469 },
2470 },
2471 },
2472 false,
2473 v1.PodPending,
2474 "restartable init container waiting, terminated zero",
2475 },
2476 {
2477 &v1.Pod{
2478 Spec: desiredState,
2479 Status: v1.PodStatus{
2480 InitContainerStatuses: []v1.ContainerStatus{
2481 waitingStateWithNonZeroTermination("containerX"),
2482 },
2483 },
2484 },
2485 false,
2486 v1.PodPending,
2487 "restartable init container waiting, terminated non-zero",
2488 },
2489 {
2490 &v1.Pod{
2491 Spec: desiredState,
2492 Status: v1.PodStatus{
2493 InitContainerStatuses: []v1.ContainerStatus{
2494 waitingState("containerX"),
2495 },
2496 },
2497 },
2498 false,
2499 v1.PodPending,
2500 "restartable init container waiting, not terminated",
2501 },
2502 {
2503 &v1.Pod{
2504 Spec: desiredState,
2505 Status: v1.PodStatus{
2506 InitContainerStatuses: []v1.ContainerStatus{
2507 startedState("containerX"),
2508 },
2509 ContainerStatuses: []v1.ContainerStatus{
2510 runningState("containerA"),
2511 },
2512 },
2513 },
2514 false,
2515 v1.PodPending,
2516 "restartable init container started, 1/2 regular container running",
2517 },
2518 {
2519 &v1.Pod{
2520 Spec: desiredState,
2521 Status: v1.PodStatus{
2522 InitContainerStatuses: []v1.ContainerStatus{
2523 startedState("containerX"),
2524 },
2525 ContainerStatuses: []v1.ContainerStatus{
2526 runningState("containerA"),
2527 runningState("containerB"),
2528 },
2529 },
2530 },
2531 false,
2532 v1.PodRunning,
2533 "restartable init container started, all regular containers running",
2534 },
2535 {
2536 &v1.Pod{
2537 Spec: desiredState,
2538 Status: v1.PodStatus{
2539 InitContainerStatuses: []v1.ContainerStatus{
2540 runningState("containerX"),
2541 },
2542 ContainerStatuses: []v1.ContainerStatus{
2543 runningState("containerA"),
2544 runningState("containerB"),
2545 },
2546 },
2547 },
2548 false,
2549 v1.PodRunning,
2550 "restartable init container running, all regular containers running",
2551 },
2552 {
2553 &v1.Pod{
2554 Spec: desiredState,
2555 Status: v1.PodStatus{
2556 InitContainerStatuses: []v1.ContainerStatus{
2557 stoppedState("containerX"),
2558 },
2559 ContainerStatuses: []v1.ContainerStatus{
2560 runningState("containerA"),
2561 runningState("containerB"),
2562 },
2563 },
2564 },
2565 false,
2566 v1.PodRunning,
2567 "restartable init container stopped, all regular containers running",
2568 },
2569 {
2570 &v1.Pod{
2571 Spec: desiredState,
2572 Status: v1.PodStatus{
2573 InitContainerStatuses: []v1.ContainerStatus{
2574 waitingStateWithLastTermination("containerX"),
2575 },
2576 ContainerStatuses: []v1.ContainerStatus{
2577 runningState("containerA"),
2578 runningState("containerB"),
2579 },
2580 },
2581 },
2582 false,
2583 v1.PodRunning,
2584 "backoff crashloop restartable init container, all regular containers running",
2585 },
2586 {
2587 &v1.Pod{
2588 Spec: desiredState,
2589 Status: v1.PodStatus{
2590 InitContainerStatuses: []v1.ContainerStatus{
2591 failedState("containerX"),
2592 },
2593 ContainerStatuses: []v1.ContainerStatus{
2594 succeededState("containerA"),
2595 succeededState("containerB"),
2596 },
2597 },
2598 },
2599 true,
2600 v1.PodSucceeded,
2601 "all regular containers succeeded and restartable init container failed with restart always, but the pod is terminal",
2602 },
2603 {
2604 &v1.Pod{
2605 Spec: desiredState,
2606 Status: v1.PodStatus{
2607 InitContainerStatuses: []v1.ContainerStatus{
2608 succeededState("containerX"),
2609 },
2610 ContainerStatuses: []v1.ContainerStatus{
2611 succeededState("containerA"),
2612 succeededState("containerB"),
2613 },
2614 },
2615 },
2616 true,
2617 v1.PodSucceeded,
2618 "all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal",
2619 },
2620 }
2621 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)()
2622 for _, test := range tests {
2623 statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
2624 status := getPhase(test.pod, statusInfo, test.podIsTerminal)
2625 assert.Equal(t, test.status, status, "[test %s]", test.test)
2626 }
2627 }
2628
2629 func TestPodPhaseWithRestartNever(t *testing.T) {
2630 desiredState := v1.PodSpec{
2631 NodeName: "machine",
2632 Containers: []v1.Container{
2633 {Name: "containerA"},
2634 {Name: "containerB"},
2635 },
2636 RestartPolicy: v1.RestartPolicyNever,
2637 }
2638
2639 tests := []struct {
2640 pod *v1.Pod
2641 status v1.PodPhase
2642 test string
2643 }{
2644 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
2645 {
2646 &v1.Pod{
2647 Spec: desiredState,
2648 Status: v1.PodStatus{
2649 ContainerStatuses: []v1.ContainerStatus{
2650 runningState("containerA"),
2651 runningState("containerB"),
2652 },
2653 },
2654 },
2655 v1.PodRunning,
2656 "all running with restart never",
2657 },
2658 {
2659 &v1.Pod{
2660 Spec: desiredState,
2661 Status: v1.PodStatus{
2662 ContainerStatuses: []v1.ContainerStatus{
2663 succeededState("containerA"),
2664 succeededState("containerB"),
2665 },
2666 },
2667 },
2668 v1.PodSucceeded,
2669 "all succeeded with restart never",
2670 },
2671 {
2672 &v1.Pod{
2673 Spec: desiredState,
2674 Status: v1.PodStatus{
2675 ContainerStatuses: []v1.ContainerStatus{
2676 failedState("containerA"),
2677 failedState("containerB"),
2678 },
2679 },
2680 },
2681 v1.PodFailed,
2682 "all failed with restart never",
2683 },
2684 {
2685 &v1.Pod{
2686 Spec: desiredState,
2687 Status: v1.PodStatus{
2688 ContainerStatuses: []v1.ContainerStatus{
2689 runningState("containerA"),
2690 succeededState("containerB"),
2691 },
2692 },
2693 },
2694 v1.PodRunning,
2695 "mixed state #1 with restart never",
2696 },
2697 {
2698 &v1.Pod{
2699 Spec: desiredState,
2700 Status: v1.PodStatus{
2701 ContainerStatuses: []v1.ContainerStatus{
2702 runningState("containerA"),
2703 },
2704 },
2705 },
2706 v1.PodPending,
2707 "mixed state #2 with restart never",
2708 },
2709 {
2710 &v1.Pod{
2711 Spec: desiredState,
2712 Status: v1.PodStatus{
2713 ContainerStatuses: []v1.ContainerStatus{
2714 runningState("containerA"),
2715 waitingState("containerB"),
2716 },
2717 },
2718 },
2719 v1.PodPending,
2720 "mixed state #3 with restart never",
2721 },
2722 }
2723 for _, test := range tests {
2724 status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false)
2725 assert.Equal(t, test.status, status, "[test %s]", test.test)
2726 }
2727 }
2728
2729 func TestPodPhaseWithRestartNeverInitContainers(t *testing.T) {
2730 desiredState := v1.PodSpec{
2731 NodeName: "machine",
2732 InitContainers: []v1.Container{
2733 {Name: "containerX"},
2734 },
2735 Containers: []v1.Container{
2736 {Name: "containerA"},
2737 {Name: "containerB"},
2738 },
2739 RestartPolicy: v1.RestartPolicyNever,
2740 }
2741
2742 tests := []struct {
2743 pod *v1.Pod
2744 status v1.PodPhase
2745 test string
2746 }{
2747 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
2748 {
2749 &v1.Pod{
2750 Spec: desiredState,
2751 Status: v1.PodStatus{
2752 InitContainerStatuses: []v1.ContainerStatus{
2753 runningState("containerX"),
2754 },
2755 },
2756 },
2757 v1.PodPending,
2758 "init container running",
2759 },
2760 {
2761 &v1.Pod{
2762 Spec: desiredState,
2763 Status: v1.PodStatus{
2764 InitContainerStatuses: []v1.ContainerStatus{
2765 failedState("containerX"),
2766 },
2767 },
2768 },
2769 v1.PodFailed,
2770 "init container terminated non-zero",
2771 },
2772 {
2773 &v1.Pod{
2774 Spec: desiredState,
2775 Status: v1.PodStatus{
2776 InitContainerStatuses: []v1.ContainerStatus{
2777 waitingStateWithLastTermination("containerX"),
2778 },
2779 },
2780 },
2781 v1.PodPending,
2782 "init container waiting, terminated zero",
2783 },
2784 {
2785 &v1.Pod{
2786 Spec: desiredState,
2787 Status: v1.PodStatus{
2788 InitContainerStatuses: []v1.ContainerStatus{
2789 waitingStateWithNonZeroTermination("containerX"),
2790 },
2791 },
2792 },
2793 v1.PodFailed,
2794 "init container waiting, terminated non-zero",
2795 },
2796 {
2797 &v1.Pod{
2798 Spec: desiredState,
2799 Status: v1.PodStatus{
2800 InitContainerStatuses: []v1.ContainerStatus{
2801 waitingState("containerX"),
2802 },
2803 },
2804 },
2805 v1.PodPending,
2806 "init container waiting, not terminated",
2807 },
2808 {
2809 &v1.Pod{
2810 Spec: desiredState,
2811 Status: v1.PodStatus{
2812 InitContainerStatuses: []v1.ContainerStatus{
2813 succeededState("containerX"),
2814 },
2815 ContainerStatuses: []v1.ContainerStatus{
2816 runningState("containerA"),
2817 runningState("containerB"),
2818 },
2819 },
2820 },
2821 v1.PodRunning,
2822 "init container succeeded",
2823 },
2824 }
2825 for _, test := range tests {
2826 statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
2827 status := getPhase(test.pod, statusInfo, false)
2828 assert.Equal(t, test.status, status, "[test %s]", test.test)
2829 }
2830 }
2831
2832 func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
2833 desiredState := v1.PodSpec{
2834 NodeName: "machine",
2835 InitContainers: []v1.Container{
2836 {Name: "containerX", RestartPolicy: &containerRestartPolicyAlways},
2837 },
2838 Containers: []v1.Container{
2839 {Name: "containerA"},
2840 {Name: "containerB"},
2841 },
2842 RestartPolicy: v1.RestartPolicyNever,
2843 }
2844
2845 tests := []struct {
2846 pod *v1.Pod
2847 status v1.PodPhase
2848 test string
2849 }{
2850 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "empty, waiting"},
2851 {
2852 &v1.Pod{
2853 Spec: desiredState,
2854 Status: v1.PodStatus{
2855 InitContainerStatuses: []v1.ContainerStatus{
2856 runningState("containerX"),
2857 },
2858 },
2859 },
2860 v1.PodPending,
2861 "restartable init container running",
2862 },
2863 {
2864 &v1.Pod{
2865 Spec: desiredState,
2866 Status: v1.PodStatus{
2867 InitContainerStatuses: []v1.ContainerStatus{
2868 stoppedState("containerX"),
2869 },
2870 },
2871 },
2872 v1.PodPending,
2873 "restartable init container stopped",
2874 },
2875 {
2876 &v1.Pod{
2877 Spec: desiredState,
2878 Status: v1.PodStatus{
2879 InitContainerStatuses: []v1.ContainerStatus{
2880 waitingStateWithLastTermination("containerX"),
2881 },
2882 },
2883 },
2884 v1.PodPending,
2885 "restartable init container waiting, terminated zero",
2886 },
2887 {
2888 &v1.Pod{
2889 Spec: desiredState,
2890 Status: v1.PodStatus{
2891 InitContainerStatuses: []v1.ContainerStatus{
2892 waitingStateWithNonZeroTermination("containerX"),
2893 },
2894 },
2895 },
2896 v1.PodPending,
2897 "restartable init container waiting, terminated non-zero",
2898 },
2899 {
2900 &v1.Pod{
2901 Spec: desiredState,
2902 Status: v1.PodStatus{
2903 InitContainerStatuses: []v1.ContainerStatus{
2904 waitingState("containerX"),
2905 },
2906 },
2907 },
2908 v1.PodPending,
2909 "restartable init container waiting, not terminated",
2910 },
2911 {
2912 &v1.Pod{
2913 Spec: desiredState,
2914 Status: v1.PodStatus{
2915 InitContainerStatuses: []v1.ContainerStatus{
2916 startedState("containerX"),
2917 },
2918 ContainerStatuses: []v1.ContainerStatus{
2919 runningState("containerA"),
2920 },
2921 },
2922 },
2923 v1.PodPending,
2924 "restartable init container started, one main container running",
2925 },
2926 {
2927 &v1.Pod{
2928 Spec: desiredState,
2929 Status: v1.PodStatus{
2930 InitContainerStatuses: []v1.ContainerStatus{
2931 startedState("containerX"),
2932 },
2933 ContainerStatuses: []v1.ContainerStatus{
2934 succeededState("containerA"),
2935 succeededState("containerB"),
2936 },
2937 },
2938 },
2939 v1.PodRunning,
2940 "restartable init container started, main containers succeeded",
2941 },
2942 {
2943 &v1.Pod{
2944 Spec: desiredState,
2945 Status: v1.PodStatus{
2946 InitContainerStatuses: []v1.ContainerStatus{
2947 runningState("containerX"),
2948 },
2949 ContainerStatuses: []v1.ContainerStatus{
2950 succeededState("containerA"),
2951 succeededState("containerB"),
2952 },
2953 },
2954 },
2955 v1.PodRunning,
2956 "restartable init container running, main containers succeeded",
2957 },
2958 {
2959 &v1.Pod{
2960 Spec: desiredState,
2961 Status: v1.PodStatus{
2962 InitContainerStatuses: []v1.ContainerStatus{
2963 succeededState("containerX"),
2964 },
2965 ContainerStatuses: []v1.ContainerStatus{
2966 succeededState("containerA"),
2967 succeededState("containerB"),
2968 },
2969 },
2970 },
2971 v1.PodSucceeded,
2972 "all containers succeeded",
2973 },
2974 {
2975 &v1.Pod{
2976 Spec: desiredState,
2977 Status: v1.PodStatus{
2978 InitContainerStatuses: []v1.ContainerStatus{
2979 failedState("containerX"),
2980 },
2981 ContainerStatuses: []v1.ContainerStatus{
2982 succeededState("containerA"),
2983 succeededState("containerB"),
2984 },
2985 },
2986 },
2987 v1.PodSucceeded,
2988 "restartable init container terminated non-zero, main containers succeeded",
2989 },
2990 {
2991 &v1.Pod{
2992 Spec: desiredState,
2993 Status: v1.PodStatus{
2994 InitContainerStatuses: []v1.ContainerStatus{
2995 waitingStateWithLastTermination("containerX"),
2996 },
2997 ContainerStatuses: []v1.ContainerStatus{
2998 succeededState("containerA"),
2999 succeededState("containerB"),
3000 },
3001 },
3002 },
3003 v1.PodSucceeded,
3004 "backoff crashloop restartable init container, main containers succeeded",
3005 },
3006 {
3007 &v1.Pod{
3008 Spec: desiredState,
3009 Status: v1.PodStatus{
3010 InitContainerStatuses: []v1.ContainerStatus{
3011 waitingStateWithNonZeroTermination("containerX"),
3012 },
3013 ContainerStatuses: []v1.ContainerStatus{
3014 succeededState("containerA"),
3015 succeededState("containerB"),
3016 },
3017 },
3018 },
3019 v1.PodSucceeded,
3020 "backoff crashloop with non-zero restartable init container, main containers succeeded",
3021 },
3022 }
3023 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)()
3024 for _, test := range tests {
3025 statusInfo := append(test.pod.Status.InitContainerStatuses[:], test.pod.Status.ContainerStatuses[:]...)
3026 status := getPhase(test.pod, statusInfo, false)
3027 assert.Equal(t, test.status, status, "[test %s]", test.test)
3028 }
3029 }
3030
3031 func TestPodPhaseWithRestartOnFailure(t *testing.T) {
3032 desiredState := v1.PodSpec{
3033 NodeName: "machine",
3034 Containers: []v1.Container{
3035 {Name: "containerA"},
3036 {Name: "containerB"},
3037 },
3038 RestartPolicy: v1.RestartPolicyOnFailure,
3039 }
3040
3041 tests := []struct {
3042 pod *v1.Pod
3043 status v1.PodPhase
3044 test string
3045 }{
3046 {&v1.Pod{Spec: desiredState, Status: v1.PodStatus{}}, v1.PodPending, "waiting"},
3047 {
3048 &v1.Pod{
3049 Spec: desiredState,
3050 Status: v1.PodStatus{
3051 ContainerStatuses: []v1.ContainerStatus{
3052 runningState("containerA"),
3053 runningState("containerB"),
3054 },
3055 },
3056 },
3057 v1.PodRunning,
3058 "all running with restart onfailure",
3059 },
3060 {
3061 &v1.Pod{
3062 Spec: desiredState,
3063 Status: v1.PodStatus{
3064 ContainerStatuses: []v1.ContainerStatus{
3065 succeededState("containerA"),
3066 succeededState("containerB"),
3067 },
3068 },
3069 },
3070 v1.PodSucceeded,
3071 "all succeeded with restart onfailure",
3072 },
3073 {
3074 &v1.Pod{
3075 Spec: desiredState,
3076 Status: v1.PodStatus{
3077 ContainerStatuses: []v1.ContainerStatus{
3078 failedState("containerA"),
3079 failedState("containerB"),
3080 },
3081 },
3082 },
3083 v1.PodRunning,
3084 "all failed with restart never",
3085 },
3086 {
3087 &v1.Pod{
3088 Spec: desiredState,
3089 Status: v1.PodStatus{
3090 ContainerStatuses: []v1.ContainerStatus{
3091 runningState("containerA"),
3092 succeededState("containerB"),
3093 },
3094 },
3095 },
3096 v1.PodRunning,
3097 "mixed state #1 with restart onfailure",
3098 },
3099 {
3100 &v1.Pod{
3101 Spec: desiredState,
3102 Status: v1.PodStatus{
3103 ContainerStatuses: []v1.ContainerStatus{
3104 runningState("containerA"),
3105 },
3106 },
3107 },
3108 v1.PodPending,
3109 "mixed state #2 with restart onfailure",
3110 },
3111 {
3112 &v1.Pod{
3113 Spec: desiredState,
3114 Status: v1.PodStatus{
3115 ContainerStatuses: []v1.ContainerStatus{
3116 runningState("containerA"),
3117 waitingState("containerB"),
3118 },
3119 },
3120 },
3121 v1.PodPending,
3122 "mixed state #3 with restart onfailure",
3123 },
3124 {
3125 &v1.Pod{
3126 Spec: desiredState,
3127 Status: v1.PodStatus{
3128 ContainerStatuses: []v1.ContainerStatus{
3129 runningState("containerA"),
3130 waitingStateWithLastTermination("containerB"),
3131 },
3132 },
3133 },
3134 v1.PodRunning,
3135 "backoff crashloop container with restart onfailure",
3136 },
3137 }
3138 for _, test := range tests {
3139 status := getPhase(test.pod, test.pod.Status.ContainerStatuses, false)
3140 assert.Equal(t, test.status, status, "[test %s]", test.test)
3141 }
3142 }
3143
3144
3145
3146
3147
3148 func TestConvertToAPIContainerStatuses(t *testing.T) {
3149 desiredState := v1.PodSpec{
3150 NodeName: "machine",
3151 Containers: []v1.Container{
3152 {Name: "containerA"},
3153 {Name: "containerB"},
3154 },
3155 RestartPolicy: v1.RestartPolicyAlways,
3156 }
3157 now := metav1.Now()
3158
3159 tests := []struct {
3160 name string
3161 pod *v1.Pod
3162 currentStatus *kubecontainer.PodStatus
3163 previousStatus []v1.ContainerStatus
3164 containers []v1.Container
3165 hasInitContainers bool
3166 isInitContainer bool
3167 expected []v1.ContainerStatus
3168 }{
3169 {
3170 name: "no current status, with previous statuses and deletion",
3171 pod: &v1.Pod{
3172 Spec: desiredState,
3173 Status: v1.PodStatus{
3174 ContainerStatuses: []v1.ContainerStatus{
3175 runningState("containerA"),
3176 runningState("containerB"),
3177 },
3178 },
3179 ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
3180 },
3181 currentStatus: &kubecontainer.PodStatus{},
3182 previousStatus: []v1.ContainerStatus{
3183 runningState("containerA"),
3184 runningState("containerB"),
3185 },
3186 containers: desiredState.Containers,
3187
3188
3189 expected: []v1.ContainerStatus{
3190 waitingWithLastTerminationUnknown("containerA", 0),
3191 waitingWithLastTerminationUnknown("containerB", 0),
3192 },
3193 },
3194 {
3195 name: "no current status, with previous statuses and no deletion",
3196 pod: &v1.Pod{
3197 Spec: desiredState,
3198 Status: v1.PodStatus{
3199 ContainerStatuses: []v1.ContainerStatus{
3200 runningState("containerA"),
3201 runningState("containerB"),
3202 },
3203 },
3204 },
3205 currentStatus: &kubecontainer.PodStatus{},
3206 previousStatus: []v1.ContainerStatus{
3207 runningState("containerA"),
3208 runningState("containerB"),
3209 },
3210 containers: desiredState.Containers,
3211
3212
3213 expected: []v1.ContainerStatus{
3214 waitingWithLastTerminationUnknown("containerA", 1),
3215 waitingWithLastTerminationUnknown("containerB", 1),
3216 },
3217 },
3218 }
3219 for _, test := range tests {
3220 t.Run(test.name, func(t *testing.T) {
3221 testKubelet := newTestKubelet(t, false )
3222 defer testKubelet.Cleanup()
3223 kl := testKubelet.kubelet
3224 containerStatuses := kl.convertToAPIContainerStatuses(
3225 test.pod,
3226 test.currentStatus,
3227 test.previousStatus,
3228 test.containers,
3229 test.hasInitContainers,
3230 test.isInitContainer,
3231 )
3232 for i, status := range containerStatuses {
3233 assert.Equal(t, test.expected[i], status, "[test %s]", test.name)
3234 }
3235 })
3236 }
3237 }
3238
3239 func Test_generateAPIPodStatus(t *testing.T) {
3240 desiredState := v1.PodSpec{
3241 NodeName: "machine",
3242 Containers: []v1.Container{
3243 {Name: "containerA"},
3244 {Name: "containerB"},
3245 },
3246 RestartPolicy: v1.RestartPolicyAlways,
3247 }
3248 sandboxReadyStatus := &kubecontainer.PodStatus{
3249 SandboxStatuses: []*runtimeapi.PodSandboxStatus{
3250 {
3251 Network: &runtimeapi.PodSandboxNetworkStatus{
3252 Ip: "10.0.0.10",
3253 },
3254 Metadata: &runtimeapi.PodSandboxMetadata{Attempt: uint32(0)},
3255 State: runtimeapi.PodSandboxState_SANDBOX_READY,
3256 },
3257 },
3258 }
3259
3260 now := metav1.Now()
3261 normalized_now := now.Rfc3339Copy()
3262
3263 tests := []struct {
3264 name string
3265 pod *v1.Pod
3266 currentStatus *kubecontainer.PodStatus
3267 unreadyContainer []string
3268 previousStatus v1.PodStatus
3269 isPodTerminal bool
3270 enablePodDisruptionConditions bool
3271 expected v1.PodStatus
3272 expectedPodDisruptionCondition v1.PodCondition
3273 expectedPodReadyToStartContainersCondition v1.PodCondition
3274 }{
3275 {
3276 name: "pod disruption condition is copied over and the phase is set to failed when deleted; PodDisruptionConditions enabled",
3277 pod: &v1.Pod{
3278 Spec: desiredState,
3279 Status: v1.PodStatus{
3280 ContainerStatuses: []v1.ContainerStatus{
3281 runningState("containerA"),
3282 runningState("containerB"),
3283 },
3284 Conditions: []v1.PodCondition{{
3285 Type: v1.DisruptionTarget,
3286 Status: v1.ConditionTrue,
3287 LastTransitionTime: normalized_now,
3288 }},
3289 },
3290 ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
3291 },
3292 currentStatus: sandboxReadyStatus,
3293 previousStatus: v1.PodStatus{
3294 ContainerStatuses: []v1.ContainerStatus{
3295 runningState("containerA"),
3296 runningState("containerB"),
3297 },
3298 Conditions: []v1.PodCondition{{
3299 Type: v1.DisruptionTarget,
3300 Status: v1.ConditionTrue,
3301 LastTransitionTime: normalized_now,
3302 }},
3303 },
3304 isPodTerminal: true,
3305 enablePodDisruptionConditions: true,
3306 expected: v1.PodStatus{
3307 Phase: v1.PodFailed,
3308 HostIP: "127.0.0.1",
3309 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3310 QOSClass: v1.PodQOSBestEffort,
3311 Conditions: []v1.PodCondition{
3312 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3313 {Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
3314 {Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodFailed"},
3315 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3316 },
3317 ContainerStatuses: []v1.ContainerStatus{
3318 ready(waitingWithLastTerminationUnknown("containerA", 0)),
3319 ready(waitingWithLastTerminationUnknown("containerB", 0)),
3320 },
3321 },
3322 expectedPodDisruptionCondition: v1.PodCondition{
3323 Type: v1.DisruptionTarget,
3324 Status: v1.ConditionTrue,
3325 LastTransitionTime: normalized_now,
3326 },
3327 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3328 Type: v1.PodReadyToStartContainers,
3329 Status: v1.ConditionTrue,
3330 },
3331 },
3332 {
3333 name: "current status ready, with previous statuses and deletion",
3334 pod: &v1.Pod{
3335 Spec: desiredState,
3336 Status: v1.PodStatus{
3337 ContainerStatuses: []v1.ContainerStatus{
3338 runningState("containerA"),
3339 runningState("containerB"),
3340 },
3341 },
3342 ObjectMeta: metav1.ObjectMeta{Name: "my-pod", DeletionTimestamp: &now},
3343 },
3344 currentStatus: sandboxReadyStatus,
3345 previousStatus: v1.PodStatus{
3346 ContainerStatuses: []v1.ContainerStatus{
3347 runningState("containerA"),
3348 runningState("containerB"),
3349 },
3350 },
3351 expected: v1.PodStatus{
3352 Phase: v1.PodRunning,
3353 HostIP: "127.0.0.1",
3354 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3355 QOSClass: v1.PodQOSBestEffort,
3356 Conditions: []v1.PodCondition{
3357 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3358 {Type: v1.PodReady, Status: v1.ConditionTrue},
3359 {Type: v1.ContainersReady, Status: v1.ConditionTrue},
3360 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3361 },
3362 ContainerStatuses: []v1.ContainerStatus{
3363 ready(waitingWithLastTerminationUnknown("containerA", 0)),
3364 ready(waitingWithLastTerminationUnknown("containerB", 0)),
3365 },
3366 },
3367 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3368 Type: v1.PodReadyToStartContainers,
3369 Status: v1.ConditionTrue,
3370 },
3371 },
3372 {
3373 name: "current status ready, with previous statuses and no deletion",
3374 pod: &v1.Pod{
3375 Spec: desiredState,
3376 Status: v1.PodStatus{
3377 ContainerStatuses: []v1.ContainerStatus{
3378 runningState("containerA"),
3379 runningState("containerB"),
3380 },
3381 },
3382 },
3383 currentStatus: sandboxReadyStatus,
3384 previousStatus: v1.PodStatus{
3385 ContainerStatuses: []v1.ContainerStatus{
3386 runningState("containerA"),
3387 runningState("containerB"),
3388 },
3389 },
3390 expected: v1.PodStatus{
3391 Phase: v1.PodRunning,
3392 HostIP: "127.0.0.1",
3393 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3394 QOSClass: v1.PodQOSBestEffort,
3395 Conditions: []v1.PodCondition{
3396 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3397 {Type: v1.PodReady, Status: v1.ConditionTrue},
3398 {Type: v1.ContainersReady, Status: v1.ConditionTrue},
3399 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3400 },
3401 ContainerStatuses: []v1.ContainerStatus{
3402 ready(waitingWithLastTerminationUnknown("containerA", 1)),
3403 ready(waitingWithLastTerminationUnknown("containerB", 1)),
3404 },
3405 },
3406 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3407 Type: v1.PodReadyToStartContainers,
3408 Status: v1.ConditionTrue,
3409 },
3410 },
3411 {
3412 name: "terminal phase cannot be changed (apiserver previous is succeeded)",
3413 pod: &v1.Pod{
3414 Spec: desiredState,
3415 Status: v1.PodStatus{
3416 Phase: v1.PodSucceeded,
3417 ContainerStatuses: []v1.ContainerStatus{
3418 runningState("containerA"),
3419 runningState("containerB"),
3420 },
3421 },
3422 },
3423 currentStatus: &kubecontainer.PodStatus{},
3424 previousStatus: v1.PodStatus{
3425 ContainerStatuses: []v1.ContainerStatus{
3426 runningState("containerA"),
3427 runningState("containerB"),
3428 },
3429 },
3430 expected: v1.PodStatus{
3431 Phase: v1.PodSucceeded,
3432 HostIP: "127.0.0.1",
3433 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3434 QOSClass: v1.PodQOSBestEffort,
3435 Conditions: []v1.PodCondition{
3436 {Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
3437 {Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3438 {Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3439 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3440 },
3441 ContainerStatuses: []v1.ContainerStatus{
3442 ready(waitingWithLastTerminationUnknown("containerA", 1)),
3443 ready(waitingWithLastTerminationUnknown("containerB", 1)),
3444 },
3445 },
3446 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3447 Type: v1.PodReadyToStartContainers,
3448 Status: v1.ConditionFalse,
3449 },
3450 },
3451 {
3452 name: "terminal phase from previous status must remain terminal, restartAlways",
3453 pod: &v1.Pod{
3454 Spec: desiredState,
3455 Status: v1.PodStatus{
3456 Phase: v1.PodRunning,
3457 ContainerStatuses: []v1.ContainerStatus{
3458 runningState("containerA"),
3459 runningState("containerB"),
3460 },
3461 },
3462 },
3463 currentStatus: &kubecontainer.PodStatus{},
3464 previousStatus: v1.PodStatus{
3465 Phase: v1.PodSucceeded,
3466 ContainerStatuses: []v1.ContainerStatus{
3467 runningState("containerA"),
3468 runningState("containerB"),
3469 },
3470
3471 Reason: "Test",
3472 Message: "test",
3473 },
3474 expected: v1.PodStatus{
3475 Phase: v1.PodSucceeded,
3476 HostIP: "127.0.0.1",
3477 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3478 QOSClass: v1.PodQOSBestEffort,
3479 Conditions: []v1.PodCondition{
3480 {Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
3481 {Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3482 {Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3483 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3484 },
3485 ContainerStatuses: []v1.ContainerStatus{
3486 ready(waitingWithLastTerminationUnknown("containerA", 1)),
3487 ready(waitingWithLastTerminationUnknown("containerB", 1)),
3488 },
3489 Reason: "Test",
3490 Message: "test",
3491 },
3492 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3493 Type: v1.PodReadyToStartContainers,
3494 Status: v1.ConditionFalse,
3495 },
3496 },
3497 {
3498 name: "terminal phase from previous status must remain terminal, restartNever",
3499 pod: &v1.Pod{
3500 Spec: v1.PodSpec{
3501 NodeName: "machine",
3502 Containers: []v1.Container{
3503 {Name: "containerA"},
3504 {Name: "containerB"},
3505 },
3506 RestartPolicy: v1.RestartPolicyNever,
3507 },
3508 Status: v1.PodStatus{
3509 Phase: v1.PodRunning,
3510 ContainerStatuses: []v1.ContainerStatus{
3511 runningState("containerA"),
3512 runningState("containerB"),
3513 },
3514 },
3515 },
3516 currentStatus: &kubecontainer.PodStatus{},
3517 previousStatus: v1.PodStatus{
3518 Phase: v1.PodSucceeded,
3519 ContainerStatuses: []v1.ContainerStatus{
3520 succeededState("containerA"),
3521 succeededState("containerB"),
3522 },
3523
3524 Reason: "Test",
3525 Message: "test",
3526 },
3527 expected: v1.PodStatus{
3528 Phase: v1.PodSucceeded,
3529 HostIP: "127.0.0.1",
3530 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3531 QOSClass: v1.PodQOSBestEffort,
3532 Conditions: []v1.PodCondition{
3533 {Type: v1.PodInitialized, Status: v1.ConditionTrue, Reason: "PodCompleted"},
3534 {Type: v1.PodReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3535 {Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: "PodCompleted"},
3536 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3537 },
3538 ContainerStatuses: []v1.ContainerStatus{
3539 ready(succeededState("containerA")),
3540 ready(succeededState("containerB")),
3541 },
3542 Reason: "Test",
3543 Message: "test",
3544 },
3545 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3546 Type: v1.PodReadyToStartContainers,
3547 Status: v1.ConditionFalse,
3548 },
3549 },
3550 {
3551 name: "running can revert to pending",
3552 pod: &v1.Pod{
3553 Spec: desiredState,
3554 Status: v1.PodStatus{
3555 Phase: v1.PodRunning,
3556 ContainerStatuses: []v1.ContainerStatus{
3557 runningState("containerA"),
3558 runningState("containerB"),
3559 },
3560 },
3561 },
3562 currentStatus: sandboxReadyStatus,
3563 previousStatus: v1.PodStatus{
3564 ContainerStatuses: []v1.ContainerStatus{
3565 waitingState("containerA"),
3566 waitingState("containerB"),
3567 },
3568 },
3569 expected: v1.PodStatus{
3570 Phase: v1.PodPending,
3571 HostIP: "127.0.0.1",
3572 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3573 QOSClass: v1.PodQOSBestEffort,
3574 Conditions: []v1.PodCondition{
3575 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3576 {Type: v1.PodReady, Status: v1.ConditionTrue},
3577 {Type: v1.ContainersReady, Status: v1.ConditionTrue},
3578 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3579 },
3580 ContainerStatuses: []v1.ContainerStatus{
3581 ready(waitingStateWithReason("containerA", "ContainerCreating")),
3582 ready(waitingStateWithReason("containerB", "ContainerCreating")),
3583 },
3584 },
3585 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3586 Type: v1.PodReadyToStartContainers,
3587 Status: v1.ConditionTrue,
3588 },
3589 },
3590 {
3591 name: "reason and message are preserved when phase doesn't change",
3592 pod: &v1.Pod{
3593 Spec: desiredState,
3594 Status: v1.PodStatus{
3595 Phase: v1.PodRunning,
3596 ContainerStatuses: []v1.ContainerStatus{
3597 waitingState("containerA"),
3598 waitingState("containerB"),
3599 },
3600 },
3601 },
3602 currentStatus: &kubecontainer.PodStatus{
3603 SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
3604 ContainerStatuses: []*kubecontainer.Status{
3605 {
3606 ID: kubecontainer.ContainerID{ID: "foo"},
3607 Name: "containerB",
3608 StartedAt: time.Unix(1, 0).UTC(),
3609 State: kubecontainer.ContainerStateRunning,
3610 },
3611 },
3612 },
3613 previousStatus: v1.PodStatus{
3614 Phase: v1.PodPending,
3615 Reason: "Test",
3616 Message: "test",
3617 ContainerStatuses: []v1.ContainerStatus{
3618 waitingState("containerA"),
3619 runningState("containerB"),
3620 },
3621 },
3622 expected: v1.PodStatus{
3623 Phase: v1.PodPending,
3624 Reason: "Test",
3625 Message: "test",
3626 HostIP: "127.0.0.1",
3627 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3628 QOSClass: v1.PodQOSBestEffort,
3629 Conditions: []v1.PodCondition{
3630 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3631 {Type: v1.PodReady, Status: v1.ConditionTrue},
3632 {Type: v1.ContainersReady, Status: v1.ConditionTrue},
3633 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3634 },
3635 ContainerStatuses: []v1.ContainerStatus{
3636 ready(waitingStateWithReason("containerA", "ContainerCreating")),
3637 ready(withID(runningStateWithStartedAt("containerB", time.Unix(1, 0).UTC()), "://foo")),
3638 },
3639 },
3640 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3641 Type: v1.PodReadyToStartContainers,
3642 Status: v1.ConditionTrue,
3643 },
3644 },
3645 {
3646 name: "reason and message are cleared when phase changes",
3647 pod: &v1.Pod{
3648 Spec: desiredState,
3649 Status: v1.PodStatus{
3650 Phase: v1.PodPending,
3651 ContainerStatuses: []v1.ContainerStatus{
3652 waitingState("containerA"),
3653 waitingState("containerB"),
3654 },
3655 },
3656 },
3657 currentStatus: &kubecontainer.PodStatus{
3658 SandboxStatuses: sandboxReadyStatus.SandboxStatuses,
3659 ContainerStatuses: []*kubecontainer.Status{
3660 {
3661 ID: kubecontainer.ContainerID{ID: "c1"},
3662 Name: "containerA",
3663 StartedAt: time.Unix(1, 0).UTC(),
3664 State: kubecontainer.ContainerStateRunning,
3665 },
3666 {
3667 ID: kubecontainer.ContainerID{ID: "c2"},
3668 Name: "containerB",
3669 StartedAt: time.Unix(2, 0).UTC(),
3670 State: kubecontainer.ContainerStateRunning,
3671 },
3672 },
3673 },
3674 previousStatus: v1.PodStatus{
3675 Phase: v1.PodPending,
3676 Reason: "Test",
3677 Message: "test",
3678 ContainerStatuses: []v1.ContainerStatus{
3679 runningState("containerA"),
3680 runningState("containerB"),
3681 },
3682 },
3683 expected: v1.PodStatus{
3684 Phase: v1.PodRunning,
3685 HostIP: "127.0.0.1",
3686 HostIPs: []v1.HostIP{{IP: "127.0.0.1"}, {IP: "::1"}},
3687 QOSClass: v1.PodQOSBestEffort,
3688 Conditions: []v1.PodCondition{
3689 {Type: v1.PodInitialized, Status: v1.ConditionTrue},
3690 {Type: v1.PodReady, Status: v1.ConditionTrue},
3691 {Type: v1.ContainersReady, Status: v1.ConditionTrue},
3692 {Type: v1.PodScheduled, Status: v1.ConditionTrue},
3693 },
3694 ContainerStatuses: []v1.ContainerStatus{
3695 ready(withID(runningStateWithStartedAt("containerA", time.Unix(1, 0).UTC()), "://c1")),
3696 ready(withID(runningStateWithStartedAt("containerB", time.Unix(2, 0).UTC()), "://c2")),
3697 },
3698 },
3699 expectedPodReadyToStartContainersCondition: v1.PodCondition{
3700 Type: v1.PodReadyToStartContainers,
3701 Status: v1.ConditionTrue,
3702 },
3703 },
3704 }
3705 for _, test := range tests {
3706 for _, enablePodReadyToStartContainersCondition := range []bool{false, true} {
3707 t.Run(test.name, func(t *testing.T) {
3708 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodDisruptionConditions, test.enablePodDisruptionConditions)()
3709 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodReadyToStartContainersCondition, enablePodReadyToStartContainersCondition)()
3710 testKubelet := newTestKubelet(t, false )
3711 defer testKubelet.Cleanup()
3712 kl := testKubelet.kubelet
3713 kl.statusManager.SetPodStatus(test.pod, test.previousStatus)
3714 for _, name := range test.unreadyContainer {
3715 kl.readinessManager.Set(kubecontainer.BuildContainerID("", findContainerStatusByName(test.expected, name).ContainerID), results.Failure, test.pod)
3716 }
3717 expected := test.expected.DeepCopy()
3718 actual := kl.generateAPIPodStatus(test.pod, test.currentStatus, test.isPodTerminal)
3719 if enablePodReadyToStartContainersCondition {
3720 expected.Conditions = append([]v1.PodCondition{test.expectedPodReadyToStartContainersCondition}, expected.Conditions...)
3721 }
3722 if test.enablePodDisruptionConditions {
3723 expected.Conditions = append([]v1.PodCondition{test.expectedPodDisruptionCondition}, expected.Conditions...)
3724 }
3725 if !apiequality.Semantic.DeepEqual(*expected, actual) {
3726 t.Fatalf("Unexpected status: %s", cmp.Diff(*expected, actual))
3727 }
3728 })
3729 }
3730 }
3731 }
3732
3733 func Test_generateAPIPodStatusForInPlaceVPAEnabled(t *testing.T) {
3734 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
3735 testContainerName := "ctr0"
3736 testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
3737
3738 CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
3739 CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
3740 CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
3741 CPU1AndMem1GAndStorage2GAndCustomResource := CPU1AndMem1GAndStorage2G.DeepCopy()
3742 CPU1AndMem1GAndStorage2GAndCustomResource["unknown-resource"] = resource.MustParse("1")
3743
3744 testKubecontainerPodStatus := kubecontainer.PodStatus{
3745 ContainerStatuses: []*kubecontainer.Status{
3746 {
3747 ID: testContainerID,
3748 Name: testContainerName,
3749 Resources: &kubecontainer.ContainerResources{
3750 CPURequest: CPU1AndMem1G.Cpu(),
3751 MemoryRequest: CPU1AndMem1G.Memory(),
3752 CPULimit: CPU1AndMem1G.Cpu(),
3753 MemoryLimit: CPU1AndMem1G.Memory(),
3754 },
3755 },
3756 },
3757 }
3758
3759 tests := []struct {
3760 name string
3761 pod *v1.Pod
3762 oldStatus *v1.PodStatus
3763 }{
3764 {
3765 name: "custom resource in ResourcesAllocated, resize should be null",
3766 pod: &v1.Pod{
3767 ObjectMeta: metav1.ObjectMeta{
3768 UID: "1234560",
3769 Name: "foo0",
3770 Namespace: "bar0",
3771 },
3772 Spec: v1.PodSpec{
3773 NodeName: "machine",
3774 Containers: []v1.Container{
3775 {
3776 Name: testContainerName,
3777 Image: "img",
3778 Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2GAndCustomResource, Requests: CPU1AndMem1GAndStorage2GAndCustomResource},
3779 },
3780 },
3781 RestartPolicy: v1.RestartPolicyAlways,
3782 },
3783 Status: v1.PodStatus{
3784 ContainerStatuses: []v1.ContainerStatus{
3785 {
3786 Name: testContainerName,
3787 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
3788 AllocatedResources: CPU1AndMem1GAndStorage2GAndCustomResource,
3789 },
3790 },
3791 Resize: "InProgress",
3792 },
3793 },
3794 },
3795 {
3796 name: "cpu/memory resource in ResourcesAllocated, resize should be null",
3797 pod: &v1.Pod{
3798 ObjectMeta: metav1.ObjectMeta{
3799 UID: "1234560",
3800 Name: "foo0",
3801 Namespace: "bar0",
3802 },
3803 Spec: v1.PodSpec{
3804 NodeName: "machine",
3805 Containers: []v1.Container{
3806 {
3807 Name: testContainerName,
3808 Image: "img",
3809 Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
3810 },
3811 },
3812 RestartPolicy: v1.RestartPolicyAlways,
3813 },
3814 Status: v1.PodStatus{
3815 ContainerStatuses: []v1.ContainerStatus{
3816 {
3817 Name: testContainerName,
3818 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
3819 AllocatedResources: CPU1AndMem1GAndStorage2G,
3820 },
3821 },
3822 Resize: "InProgress",
3823 },
3824 },
3825 },
3826 }
3827 for _, test := range tests {
3828 t.Run(test.name, func(t *testing.T) {
3829 testKubelet := newTestKubelet(t, false )
3830 defer testKubelet.Cleanup()
3831 kl := testKubelet.kubelet
3832
3833 oldStatus := test.pod.Status
3834 kl.statusManager = status.NewFakeManager()
3835 kl.statusManager.SetPodStatus(test.pod, oldStatus)
3836 actual := kl.generateAPIPodStatus(test.pod, &testKubecontainerPodStatus , false )
3837
3838 if actual.Resize != "" {
3839 t.Fatalf("Unexpected Resize status: %s", actual.Resize)
3840 }
3841 })
3842 }
3843 }
3844
3845 func findContainerStatusByName(status v1.PodStatus, name string) *v1.ContainerStatus {
3846 for i, c := range status.InitContainerStatuses {
3847 if c.Name == name {
3848 return &status.InitContainerStatuses[i]
3849 }
3850 }
3851 for i, c := range status.ContainerStatuses {
3852 if c.Name == name {
3853 return &status.ContainerStatuses[i]
3854 }
3855 }
3856 for i, c := range status.EphemeralContainerStatuses {
3857 if c.Name == name {
3858 return &status.EphemeralContainerStatuses[i]
3859 }
3860 }
3861 return nil
3862 }
3863
3864 func TestGetExec(t *testing.T) {
3865 const (
3866 podName = "podFoo"
3867 podNamespace = "nsFoo"
3868 podUID types.UID = "12345678"
3869 containerID = "containerFoo"
3870 tty = true
3871 )
3872 var (
3873 podFullName = kubecontainer.GetPodFullName(podWithUIDNameNs(podUID, podName, podNamespace))
3874 )
3875
3876 testcases := []struct {
3877 description string
3878 podFullName string
3879 container string
3880 command []string
3881 expectError bool
3882 }{{
3883 description: "success case",
3884 podFullName: podFullName,
3885 container: containerID,
3886 command: []string{"ls"},
3887 expectError: false,
3888 }, {
3889 description: "no such pod",
3890 podFullName: "bar" + podFullName,
3891 container: containerID,
3892 command: []string{"ls"},
3893 expectError: true,
3894 }, {
3895 description: "no such container",
3896 podFullName: podFullName,
3897 container: "containerBar",
3898 command: []string{"ls"},
3899 expectError: true,
3900 }, {
3901 description: "null exec command",
3902 podFullName: podFullName,
3903 container: containerID,
3904 expectError: false,
3905 }, {
3906 description: "multi exec commands",
3907 podFullName: podFullName,
3908 container: containerID,
3909 command: []string{"bash", "-c", "ls"},
3910 expectError: false,
3911 }}
3912
3913 for _, tc := range testcases {
3914 t.Run(tc.description, func(t *testing.T) {
3915 ctx := context.Background()
3916 testKubelet := newTestKubelet(t, false )
3917 defer testKubelet.Cleanup()
3918 kubelet := testKubelet.kubelet
3919 testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
3920 {Pod: &kubecontainer.Pod{
3921 ID: podUID,
3922 Name: podName,
3923 Namespace: podNamespace,
3924 Containers: []*kubecontainer.Container{
3925 {Name: containerID,
3926 ID: kubecontainer.ContainerID{Type: "test", ID: containerID},
3927 },
3928 },
3929 }},
3930 }
3931
3932 description := "streaming - " + tc.description
3933 fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
3934 kubelet.containerRuntime = fakeRuntime
3935 kubelet.streamingRuntime = fakeRuntime
3936
3937 redirect, err := kubelet.GetExec(ctx, tc.podFullName, podUID, tc.container, tc.command, remotecommand.Options{})
3938 if tc.expectError {
3939 assert.Error(t, err, description)
3940 } else {
3941 assert.NoError(t, err, description)
3942 assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
3943 }
3944 })
3945 }
3946 }
3947
3948 func TestGetPortForward(t *testing.T) {
3949 const (
3950 podName = "podFoo"
3951 podNamespace = "nsFoo"
3952 podUID types.UID = "12345678"
3953 port int32 = 5000
3954 )
3955
3956 testcases := []struct {
3957 description string
3958 podName string
3959 expectError bool
3960 }{{
3961 description: "success case",
3962 podName: podName,
3963 }, {
3964 description: "no such pod",
3965 podName: "bar",
3966 expectError: true,
3967 }}
3968
3969 for _, tc := range testcases {
3970 ctx := context.Background()
3971 testKubelet := newTestKubelet(t, false )
3972 defer testKubelet.Cleanup()
3973 kubelet := testKubelet.kubelet
3974 testKubelet.fakeRuntime.PodList = []*containertest.FakePod{
3975 {Pod: &kubecontainer.Pod{
3976 ID: podUID,
3977 Name: podName,
3978 Namespace: podNamespace,
3979 Containers: []*kubecontainer.Container{
3980 {Name: "foo",
3981 ID: kubecontainer.ContainerID{Type: "test", ID: "foo"},
3982 },
3983 },
3984 }},
3985 }
3986
3987 description := "streaming - " + tc.description
3988 fakeRuntime := &containertest.FakeStreamingRuntime{FakeRuntime: testKubelet.fakeRuntime}
3989 kubelet.containerRuntime = fakeRuntime
3990 kubelet.streamingRuntime = fakeRuntime
3991
3992 redirect, err := kubelet.GetPortForward(ctx, tc.podName, podNamespace, podUID, portforward.V4Options{})
3993 if tc.expectError {
3994 assert.Error(t, err, description)
3995 } else {
3996 assert.NoError(t, err, description)
3997 assert.Equal(t, containertest.FakeHost, redirect.Host, description+": redirect")
3998 }
3999 }
4000 }
4001
4002 func TestTruncatePodHostname(t *testing.T) {
4003 for c, test := range map[string]struct {
4004 input string
4005 output string
4006 }{
4007 "valid hostname": {
4008 input: "test.pod.hostname",
4009 output: "test.pod.hostname",
4010 },
4011 "too long hostname": {
4012 input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567.",
4013 output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.1234567",
4014 },
4015 "hostname end with .": {
4016 input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456.1234567.",
4017 output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456",
4018 },
4019 "hostname end with -": {
4020 input: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456-1234567.",
4021 output: "1234567.1234567.1234567.1234567.1234567.1234567.1234567.123456",
4022 },
4023 } {
4024 t.Logf("TestCase: %q", c)
4025 output, err := truncatePodHostnameIfNeeded("test-pod", test.input)
4026 assert.NoError(t, err)
4027 assert.Equal(t, test.output, output)
4028 }
4029 }
4030
4031 func TestGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
4032 testcases := []struct {
4033 name string
4034 nodeAddresses []v1.NodeAddress
4035 criPodIPs []string
4036 podIPs []v1.PodIP
4037 }{
4038 {
4039 name: "Simple",
4040 nodeAddresses: []v1.NodeAddress{
4041 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4042 },
4043 podIPs: []v1.PodIP{
4044 {IP: "10.0.0.1"},
4045 },
4046 },
4047 {
4048 name: "InternalIP is preferred over ExternalIP",
4049 nodeAddresses: []v1.NodeAddress{
4050 {Type: v1.NodeExternalIP, Address: "192.168.0.1"},
4051 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4052 },
4053 podIPs: []v1.PodIP{
4054 {IP: "10.0.0.1"},
4055 },
4056 },
4057 {
4058 name: "Single-stack addresses in dual-stack cluster",
4059 nodeAddresses: []v1.NodeAddress{
4060 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4061 },
4062 podIPs: []v1.PodIP{
4063 {IP: "10.0.0.1"},
4064 },
4065 },
4066 {
4067 name: "Multiple single-stack addresses in dual-stack cluster",
4068 nodeAddresses: []v1.NodeAddress{
4069 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4070 {Type: v1.NodeInternalIP, Address: "10.0.0.2"},
4071 {Type: v1.NodeExternalIP, Address: "192.168.0.1"},
4072 },
4073 podIPs: []v1.PodIP{
4074 {IP: "10.0.0.1"},
4075 },
4076 },
4077 {
4078 name: "Dual-stack addresses in dual-stack cluster",
4079 nodeAddresses: []v1.NodeAddress{
4080 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4081 {Type: v1.NodeInternalIP, Address: "fd01::1234"},
4082 },
4083 podIPs: []v1.PodIP{
4084 {IP: "10.0.0.1"},
4085 {IP: "fd01::1234"},
4086 },
4087 },
4088 {
4089 name: "CRI PodIPs override NodeAddresses",
4090 nodeAddresses: []v1.NodeAddress{
4091 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4092 {Type: v1.NodeInternalIP, Address: "fd01::1234"},
4093 },
4094 criPodIPs: []string{"192.168.0.1"},
4095 podIPs: []v1.PodIP{
4096 {IP: "192.168.0.1"},
4097 {IP: "fd01::1234"},
4098 },
4099 },
4100 {
4101 name: "CRI dual-stack PodIPs override NodeAddresses",
4102 nodeAddresses: []v1.NodeAddress{
4103 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4104 {Type: v1.NodeInternalIP, Address: "fd01::1234"},
4105 },
4106 criPodIPs: []string{"192.168.0.1", "2001:db8::2"},
4107 podIPs: []v1.PodIP{
4108 {IP: "192.168.0.1"},
4109 {IP: "2001:db8::2"},
4110 },
4111 },
4112 {
4113
4114 name: "CRI dual-stack PodIPs override NodeAddresses prefer IPv4",
4115 nodeAddresses: []v1.NodeAddress{
4116 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4117 {Type: v1.NodeInternalIP, Address: "fd01::1234"},
4118 },
4119 criPodIPs: []string{"2001:db8::2", "192.168.0.1"},
4120 podIPs: []v1.PodIP{
4121 {IP: "192.168.0.1"},
4122 {IP: "2001:db8::2"},
4123 },
4124 },
4125 }
4126
4127 for _, tc := range testcases {
4128 t.Run(tc.name, func(t *testing.T) {
4129 testKubelet := newTestKubelet(t, false )
4130 defer testKubelet.Cleanup()
4131 kl := testKubelet.kubelet
4132
4133 kl.nodeLister = testNodeLister{nodes: []*v1.Node{
4134 {
4135 ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
4136 Status: v1.NodeStatus{
4137 Addresses: tc.nodeAddresses,
4138 },
4139 },
4140 }}
4141
4142 pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
4143 pod.Spec.HostNetwork = true
4144
4145 criStatus := &kubecontainer.PodStatus{
4146 ID: pod.UID,
4147 Name: pod.Name,
4148 Namespace: pod.Namespace,
4149 IPs: tc.criPodIPs,
4150 }
4151
4152 status := kl.generateAPIPodStatus(pod, criStatus, false)
4153 if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
4154 t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
4155 }
4156 if tc.criPodIPs == nil && status.HostIP != status.PodIPs[0].IP {
4157 t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
4158 }
4159 })
4160 }
4161 }
4162
4163 func TestNodeAddressUpdatesGenerateAPIPodStatusHostNetworkPodIPs(t *testing.T) {
4164 testcases := []struct {
4165 name string
4166 nodeIPs []string
4167 nodeAddresses []v1.NodeAddress
4168 expectedPodIPs []v1.PodIP
4169 }{
4170
4171 {
4172 name: "Immutable after update node addresses single-stack",
4173 nodeIPs: []string{"10.0.0.1"},
4174 nodeAddresses: []v1.NodeAddress{
4175 {Type: v1.NodeInternalIP, Address: "1.1.1.1"},
4176 },
4177 expectedPodIPs: []v1.PodIP{
4178 {IP: "10.0.0.1"},
4179 },
4180 },
4181 {
4182 name: "Immutable after update node addresses dual-stack - primary address",
4183 nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
4184 nodeAddresses: []v1.NodeAddress{
4185 {Type: v1.NodeInternalIP, Address: "1.1.1.1"},
4186 {Type: v1.NodeInternalIP, Address: "2001:db8::2"},
4187 },
4188 expectedPodIPs: []v1.PodIP{
4189 {IP: "10.0.0.1"},
4190 {IP: "2001:db8::2"},
4191 },
4192 },
4193 {
4194 name: "Immutable after update node addresses dual-stack - secondary address",
4195 nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
4196 nodeAddresses: []v1.NodeAddress{
4197 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4198 {Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
4199 },
4200 expectedPodIPs: []v1.PodIP{
4201 {IP: "10.0.0.1"},
4202 {IP: "2001:db8::2"},
4203 },
4204 },
4205 {
4206 name: "Immutable after update node addresses dual-stack - primary and secondary address",
4207 nodeIPs: []string{"10.0.0.1", "2001:db8::2"},
4208 nodeAddresses: []v1.NodeAddress{
4209 {Type: v1.NodeInternalIP, Address: "1.1.1.1"},
4210 {Type: v1.NodeInternalIP, Address: "2001:db8:1:2:3::2"},
4211 },
4212 expectedPodIPs: []v1.PodIP{
4213 {IP: "10.0.0.1"},
4214 {IP: "2001:db8::2"},
4215 },
4216 },
4217 {
4218 name: "Update secondary after new secondary address dual-stack",
4219 nodeIPs: []string{"10.0.0.1"},
4220 nodeAddresses: []v1.NodeAddress{
4221 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4222 {Type: v1.NodeInternalIP, Address: "2001:db8::2"},
4223 },
4224 expectedPodIPs: []v1.PodIP{
4225 {IP: "10.0.0.1"},
4226 {IP: "2001:db8::2"},
4227 },
4228 },
4229 {
4230 name: "Update secondary after new secondary address dual-stack - reverse order",
4231 nodeIPs: []string{"2001:db8::2"},
4232 nodeAddresses: []v1.NodeAddress{
4233 {Type: v1.NodeInternalIP, Address: "10.0.0.1"},
4234 {Type: v1.NodeInternalIP, Address: "2001:db8::2"},
4235 },
4236 expectedPodIPs: []v1.PodIP{
4237 {IP: "2001:db8::2"},
4238 },
4239 },
4240 }
4241
4242 for _, tc := range testcases {
4243 t.Run(tc.name, func(t *testing.T) {
4244 testKubelet := newTestKubelet(t, false )
4245 defer testKubelet.Cleanup()
4246 kl := testKubelet.kubelet
4247 for _, ip := range tc.nodeIPs {
4248 kl.nodeIPs = append(kl.nodeIPs, netutils.ParseIPSloppy(ip))
4249 }
4250 kl.nodeLister = testNodeLister{nodes: []*v1.Node{
4251 {
4252 ObjectMeta: metav1.ObjectMeta{Name: string(kl.nodeName)},
4253 Status: v1.NodeStatus{
4254 Addresses: tc.nodeAddresses,
4255 },
4256 },
4257 }}
4258
4259 pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
4260 pod.Spec.HostNetwork = true
4261 for _, ip := range tc.nodeIPs {
4262 pod.Status.PodIPs = append(pod.Status.PodIPs, v1.PodIP{IP: ip})
4263 }
4264 if len(pod.Status.PodIPs) > 0 {
4265 pod.Status.PodIP = pod.Status.PodIPs[0].IP
4266 }
4267
4268
4269 podStatus := &kubecontainer.PodStatus{
4270 ID: pod.UID,
4271 Name: pod.Name,
4272 Namespace: pod.Namespace,
4273 }
4274 podStatus.IPs = tc.nodeIPs
4275
4276 status := kl.generateAPIPodStatus(pod, podStatus, false)
4277 if !reflect.DeepEqual(status.PodIPs, tc.expectedPodIPs) {
4278 t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedPodIPs, status.PodIPs)
4279 }
4280 if kl.nodeIPs[0].String() != status.PodIPs[0].IP {
4281 t.Fatalf("Expected HostIP %q to equal PodIPs[0].IP %q", status.HostIP, status.PodIPs[0].IP)
4282 }
4283 })
4284 }
4285 }
4286
4287 func TestGenerateAPIPodStatusPodIPs(t *testing.T) {
4288 testcases := []struct {
4289 name string
4290 nodeIP string
4291 criPodIPs []string
4292 podIPs []v1.PodIP
4293 }{
4294 {
4295 name: "Simple",
4296 nodeIP: "",
4297 criPodIPs: []string{"10.0.0.1"},
4298 podIPs: []v1.PodIP{
4299 {IP: "10.0.0.1"},
4300 },
4301 },
4302 {
4303 name: "Dual-stack",
4304 nodeIP: "",
4305 criPodIPs: []string{"10.0.0.1", "fd01::1234"},
4306 podIPs: []v1.PodIP{
4307 {IP: "10.0.0.1"},
4308 {IP: "fd01::1234"},
4309 },
4310 },
4311 {
4312 name: "Dual-stack with explicit node IP",
4313 nodeIP: "192.168.1.1",
4314 criPodIPs: []string{"10.0.0.1", "fd01::1234"},
4315 podIPs: []v1.PodIP{
4316 {IP: "10.0.0.1"},
4317 {IP: "fd01::1234"},
4318 },
4319 },
4320 {
4321 name: "Dual-stack with CRI returning wrong family first",
4322 nodeIP: "",
4323 criPodIPs: []string{"fd01::1234", "10.0.0.1"},
4324 podIPs: []v1.PodIP{
4325 {IP: "10.0.0.1"},
4326 {IP: "fd01::1234"},
4327 },
4328 },
4329 {
4330 name: "Dual-stack with explicit node IP with CRI returning wrong family first",
4331 nodeIP: "192.168.1.1",
4332 criPodIPs: []string{"fd01::1234", "10.0.0.1"},
4333 podIPs: []v1.PodIP{
4334 {IP: "10.0.0.1"},
4335 {IP: "fd01::1234"},
4336 },
4337 },
4338 {
4339 name: "Dual-stack with IPv6 node IP",
4340 nodeIP: "fd00::5678",
4341 criPodIPs: []string{"10.0.0.1", "fd01::1234"},
4342 podIPs: []v1.PodIP{
4343 {IP: "fd01::1234"},
4344 {IP: "10.0.0.1"},
4345 },
4346 },
4347 {
4348 name: "Dual-stack with IPv6 node IP, other CRI order",
4349 nodeIP: "fd00::5678",
4350 criPodIPs: []string{"fd01::1234", "10.0.0.1"},
4351 podIPs: []v1.PodIP{
4352 {IP: "fd01::1234"},
4353 {IP: "10.0.0.1"},
4354 },
4355 },
4356 {
4357 name: "No Pod IP matching Node IP",
4358 nodeIP: "fd00::5678",
4359 criPodIPs: []string{"10.0.0.1"},
4360 podIPs: []v1.PodIP{
4361 {IP: "10.0.0.1"},
4362 },
4363 },
4364 {
4365 name: "No Pod IP matching (unspecified) Node IP",
4366 nodeIP: "",
4367 criPodIPs: []string{"fd01::1234"},
4368 podIPs: []v1.PodIP{
4369 {IP: "fd01::1234"},
4370 },
4371 },
4372 {
4373 name: "Multiple IPv4 IPs",
4374 nodeIP: "",
4375 criPodIPs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
4376 podIPs: []v1.PodIP{
4377 {IP: "10.0.0.1"},
4378 },
4379 },
4380 {
4381 name: "Multiple Dual-Stack IPs",
4382 nodeIP: "",
4383 criPodIPs: []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
4384 podIPs: []v1.PodIP{
4385 {IP: "10.0.0.1"},
4386 {IP: "fd01::1234"},
4387 },
4388 },
4389 }
4390
4391 for _, tc := range testcases {
4392 t.Run(tc.name, func(t *testing.T) {
4393 testKubelet := newTestKubelet(t, false )
4394 defer testKubelet.Cleanup()
4395 kl := testKubelet.kubelet
4396 if tc.nodeIP != "" {
4397 kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
4398 }
4399
4400 pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
4401
4402 criStatus := &kubecontainer.PodStatus{
4403 ID: pod.UID,
4404 Name: pod.Name,
4405 Namespace: pod.Namespace,
4406 IPs: tc.criPodIPs,
4407 }
4408
4409 status := kl.generateAPIPodStatus(pod, criStatus, false)
4410 if !reflect.DeepEqual(status.PodIPs, tc.podIPs) {
4411 t.Fatalf("Expected PodIPs %#v, got %#v", tc.podIPs, status.PodIPs)
4412 }
4413 if status.PodIP != status.PodIPs[0].IP {
4414 t.Fatalf("Expected PodIP %q to equal PodIPs[0].IP %q", status.PodIP, status.PodIPs[0].IP)
4415 }
4416 })
4417 }
4418 }
4419
4420 func TestSortPodIPs(t *testing.T) {
4421 testcases := []struct {
4422 name string
4423 nodeIP string
4424 podIPs []string
4425 expectedIPs []string
4426 }{
4427 {
4428 name: "Simple",
4429 nodeIP: "",
4430 podIPs: []string{"10.0.0.1"},
4431 expectedIPs: []string{"10.0.0.1"},
4432 },
4433 {
4434 name: "Dual-stack",
4435 nodeIP: "",
4436 podIPs: []string{"10.0.0.1", "fd01::1234"},
4437 expectedIPs: []string{"10.0.0.1", "fd01::1234"},
4438 },
4439 {
4440 name: "Dual-stack with explicit node IP",
4441 nodeIP: "192.168.1.1",
4442 podIPs: []string{"10.0.0.1", "fd01::1234"},
4443 expectedIPs: []string{"10.0.0.1", "fd01::1234"},
4444 },
4445 {
4446 name: "Dual-stack with CRI returning wrong family first",
4447 nodeIP: "",
4448 podIPs: []string{"fd01::1234", "10.0.0.1"},
4449 expectedIPs: []string{"10.0.0.1", "fd01::1234"},
4450 },
4451 {
4452 name: "Dual-stack with explicit node IP with CRI returning wrong family first",
4453 nodeIP: "192.168.1.1",
4454 podIPs: []string{"fd01::1234", "10.0.0.1"},
4455 expectedIPs: []string{"10.0.0.1", "fd01::1234"},
4456 },
4457 {
4458 name: "Dual-stack with IPv6 node IP",
4459 nodeIP: "fd00::5678",
4460 podIPs: []string{"10.0.0.1", "fd01::1234"},
4461 expectedIPs: []string{"fd01::1234", "10.0.0.1"},
4462 },
4463 {
4464 name: "Dual-stack with IPv6 node IP, other CRI order",
4465 nodeIP: "fd00::5678",
4466 podIPs: []string{"fd01::1234", "10.0.0.1"},
4467 expectedIPs: []string{"fd01::1234", "10.0.0.1"},
4468 },
4469 {
4470 name: "No Pod IP matching Node IP",
4471 nodeIP: "fd00::5678",
4472 podIPs: []string{"10.0.0.1"},
4473 expectedIPs: []string{"10.0.0.1"},
4474 },
4475 {
4476 name: "No Pod IP matching (unspecified) Node IP",
4477 nodeIP: "",
4478 podIPs: []string{"fd01::1234"},
4479 expectedIPs: []string{"fd01::1234"},
4480 },
4481 {
4482 name: "Multiple IPv4 IPs",
4483 nodeIP: "",
4484 podIPs: []string{"10.0.0.1", "10.0.0.2", "10.0.0.3"},
4485 expectedIPs: []string{"10.0.0.1"},
4486 },
4487 {
4488 name: "Multiple Dual-Stack IPs",
4489 nodeIP: "",
4490 podIPs: []string{"10.0.0.1", "10.0.0.2", "fd01::1234", "10.0.0.3", "fd01::5678"},
4491 expectedIPs: []string{"10.0.0.1", "fd01::1234"},
4492 },
4493 }
4494
4495 for _, tc := range testcases {
4496 t.Run(tc.name, func(t *testing.T) {
4497 testKubelet := newTestKubelet(t, false )
4498 defer testKubelet.Cleanup()
4499 kl := testKubelet.kubelet
4500 if tc.nodeIP != "" {
4501 kl.nodeIPs = []net.IP{netutils.ParseIPSloppy(tc.nodeIP)}
4502 }
4503
4504 podIPs := kl.sortPodIPs(tc.podIPs)
4505 if !reflect.DeepEqual(podIPs, tc.expectedIPs) {
4506 t.Fatalf("Expected PodIPs %#v, got %#v", tc.expectedIPs, podIPs)
4507 }
4508 })
4509 }
4510 }
4511
4512
4513
4514
4515
4516
4517 func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
4518 pod := podWithUIDNameNs("12345", "test-pod", "test-namespace")
4519
4520 testTimestamp := time.Unix(123456789, 987654321)
4521
4522 criStatus := &kubecontainer.PodStatus{
4523 ID: pod.UID,
4524 Name: pod.Name,
4525 Namespace: pod.Namespace,
4526 ContainerStatuses: []*kubecontainer.Status{
4527 {Name: "containerA", CreatedAt: testTimestamp},
4528 {Name: "containerB", CreatedAt: testTimestamp.Add(1)},
4529 },
4530 }
4531
4532 testKubelet := newTestKubelet(t, false)
4533 defer testKubelet.Cleanup()
4534 kl := testKubelet.kubelet
4535
4536
4537
4538
4539
4540 for i := 0; i < 2; i++ {
4541 go func() {
4542 kl.convertToAPIContainerStatuses(pod, criStatus, []v1.ContainerStatus{}, []v1.Container{}, false, false)
4543 }()
4544 }
4545 }
4546
4547 func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
4548 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)()
4549 nowTime := time.Now()
4550 testContainerName := "ctr0"
4551 testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
4552 testContainer := v1.Container{
4553 Name: testContainerName,
4554 Image: "img",
4555 }
4556 testContainerStatus := v1.ContainerStatus{
4557 Name: testContainerName,
4558 }
4559 testPod := &v1.Pod{
4560 ObjectMeta: metav1.ObjectMeta{
4561 UID: "123456",
4562 Name: "foo",
4563 Namespace: "bar",
4564 },
4565 Spec: v1.PodSpec{
4566 Containers: []v1.Container{testContainer},
4567 },
4568 Status: v1.PodStatus{
4569 ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
4570 },
4571 }
4572 testKubeContainerStatus := kubecontainer.Status{
4573 Name: testContainerName,
4574 ID: testContainerID,
4575 Image: "img",
4576 ImageID: "1234",
4577 ImageRef: "img1234",
4578 State: kubecontainer.ContainerStateRunning,
4579 StartedAt: nowTime,
4580 }
4581 testPodStatus := &kubecontainer.PodStatus{
4582 ID: testPod.UID,
4583 Name: testPod.Name,
4584 Namespace: testPod.Namespace,
4585 ContainerStatuses: []*kubecontainer.Status{&testKubeContainerStatus},
4586 }
4587 CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
4588 CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
4589 CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
4590 CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
4591 CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
4592 CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
4593
4594 testKubelet := newTestKubelet(t, false)
4595 defer testKubelet.Cleanup()
4596 kubelet := testKubelet.kubelet
4597 kubelet.statusManager = status.NewFakeManager()
4598
4599 idx := 0
4600 for tdesc, tc := range map[string]struct {
4601 Resources []v1.ResourceRequirements
4602 OldStatus []v1.ContainerStatus
4603 Expected []v1.ContainerStatus
4604 }{
4605 "GuaranteedQoSPod with CPU and memory CRI status": {
4606 Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
4607 OldStatus: []v1.ContainerStatus{
4608 {
4609 Name: testContainerName,
4610 Image: "img",
4611 ImageID: "img1234",
4612 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4613 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
4614 },
4615 },
4616 Expected: []v1.ContainerStatus{
4617 {
4618 Name: testContainerName,
4619 ContainerID: testContainerID.String(),
4620 Image: "img",
4621 ImageID: "img1234",
4622 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4623 AllocatedResources: CPU1AndMem1G,
4624 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
4625 },
4626 },
4627 },
4628 "BurstableQoSPod with CPU and memory CRI status": {
4629 Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
4630 OldStatus: []v1.ContainerStatus{
4631 {
4632 Name: testContainerName,
4633 Image: "img",
4634 ImageID: "img1234",
4635 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4636 Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
4637 },
4638 },
4639 Expected: []v1.ContainerStatus{
4640 {
4641 Name: testContainerName,
4642 ContainerID: testContainerID.String(),
4643 Image: "img",
4644 ImageID: "img1234",
4645 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4646 AllocatedResources: CPU1AndMem1G,
4647 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
4648 },
4649 },
4650 },
4651 "GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
4652 Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
4653 OldStatus: []v1.ContainerStatus{
4654 {
4655 Name: testContainerName,
4656 Image: "img",
4657 ImageID: "img1234",
4658 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4659 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
4660 },
4661 },
4662 Expected: []v1.ContainerStatus{
4663 {
4664 Name: testContainerName,
4665 ContainerID: testContainerID.String(),
4666 Image: "img",
4667 ImageID: "img1234",
4668 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4669 AllocatedResources: CPU1AndMem1GAndStorage2G,
4670 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
4671 },
4672 },
4673 },
4674 "BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
4675 Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
4676 OldStatus: []v1.ContainerStatus{
4677 {
4678 Name: testContainerName,
4679 Image: "img",
4680 ImageID: "img1234",
4681 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4682 Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
4683 },
4684 },
4685 Expected: []v1.ContainerStatus{
4686 {
4687 Name: testContainerName,
4688 ContainerID: testContainerID.String(),
4689 Image: "img",
4690 ImageID: "img1234",
4691 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4692 AllocatedResources: CPU1AndMem1GAndStorage2G,
4693 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
4694 },
4695 },
4696 },
4697 "BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
4698 Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
4699 OldStatus: []v1.ContainerStatus{
4700 {
4701 Name: testContainerName,
4702 Image: "img",
4703 ImageID: "img1234",
4704 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4705 },
4706 },
4707 Expected: []v1.ContainerStatus{
4708 {
4709 Name: testContainerName,
4710 ContainerID: testContainerID.String(),
4711 Image: "img",
4712 ImageID: "img1234",
4713 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4714 AllocatedResources: CPU1AndMem1GAndStorage2G,
4715 Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
4716 },
4717 },
4718 },
4719 "BestEffortQoSPod": {
4720 OldStatus: []v1.ContainerStatus{
4721 {
4722 Name: testContainerName,
4723 Image: "img",
4724 ImageID: "img1234",
4725 State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
4726 Resources: &v1.ResourceRequirements{},
4727 },
4728 },
4729 Expected: []v1.ContainerStatus{
4730 {
4731 Name: testContainerName,
4732 ContainerID: testContainerID.String(),
4733 Image: "img",
4734 ImageID: "img1234",
4735 State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
4736 Resources: &v1.ResourceRequirements{},
4737 },
4738 },
4739 },
4740 } {
4741 tPod := testPod.DeepCopy()
4742 tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
4743 for i := range tPod.Spec.Containers {
4744 if tc.Resources != nil {
4745 tPod.Spec.Containers[i].Resources = tc.Resources[i]
4746 }
4747 kubelet.statusManager.SetPodAllocation(tPod)
4748 if tc.Resources != nil {
4749 tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
4750 testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
4751 MemoryLimit: tc.Resources[i].Limits.Memory(),
4752 CPULimit: tc.Resources[i].Limits.Cpu(),
4753 CPURequest: tc.Resources[i].Requests.Cpu(),
4754 }
4755 }
4756 }
4757
4758 t.Logf("TestCase: %q", tdesc)
4759 cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
4760 assert.Equal(t, tc.Expected, cStatuses)
4761 }
4762 }
4763
4764 func TestKubelet_HandlePodCleanups(t *testing.T) {
4765 one := int64(1)
4766 two := int64(2)
4767 deleted := metav1.NewTime(time.Unix(2, 0).UTC())
4768 type rejectedPod struct {
4769 uid types.UID
4770 reason string
4771 message string
4772 }
4773 simplePod := func() *v1.Pod {
4774 return &v1.Pod{
4775 ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
4776 Spec: v1.PodSpec{
4777 Containers: []v1.Container{
4778 {Name: "container-1"},
4779 },
4780 },
4781 }
4782 }
4783 withPhase := func(pod *v1.Pod, phase v1.PodPhase) *v1.Pod {
4784 pod.Status.Phase = phase
4785 return pod
4786 }
4787 staticPod := func() *v1.Pod {
4788 return &v1.Pod{
4789 ObjectMeta: metav1.ObjectMeta{
4790 Name: "pod1",
4791 Namespace: "ns1",
4792 UID: types.UID("1"),
4793 Annotations: map[string]string{
4794 kubetypes.ConfigSourceAnnotationKey: kubetypes.FileSource,
4795 },
4796 },
4797 Spec: v1.PodSpec{
4798 Containers: []v1.Container{
4799 {Name: "container-1"},
4800 },
4801 },
4802 }
4803 }
4804 runtimePod := func(pod *v1.Pod) *kubecontainer.Pod {
4805 runningPod := &kubecontainer.Pod{
4806 ID: types.UID(pod.UID),
4807 Name: pod.Name,
4808 Namespace: pod.Namespace,
4809 Containers: []*kubecontainer.Container{
4810 {Name: "container-1", ID: kubecontainer.ContainerID{Type: "test", ID: "c1"}},
4811 },
4812 }
4813 for i, container := range pod.Spec.Containers {
4814 runningPod.Containers = append(runningPod.Containers, &kubecontainer.Container{
4815 Name: container.Name,
4816 ID: kubecontainer.ContainerID{Type: "test", ID: fmt.Sprintf("c%d", i)},
4817 })
4818 }
4819 return runningPod
4820 }
4821 mirrorPod := func(pod *v1.Pod, nodeName string, nodeUID types.UID) *v1.Pod {
4822 copied := pod.DeepCopy()
4823 if copied.Annotations == nil {
4824 copied.Annotations = make(map[string]string)
4825 }
4826 copied.Annotations[kubetypes.ConfigMirrorAnnotationKey] = pod.Annotations[kubetypes.ConfigHashAnnotationKey]
4827 isTrue := true
4828 copied.OwnerReferences = append(copied.OwnerReferences, metav1.OwnerReference{
4829 APIVersion: v1.SchemeGroupVersion.String(),
4830 Kind: "Node",
4831 Name: nodeName,
4832 UID: nodeUID,
4833 Controller: &isTrue,
4834 })
4835 return copied
4836 }
4837
4838 tests := []struct {
4839 name string
4840 pods []*v1.Pod
4841 runtimePods []*containertest.FakePod
4842 rejectedPods []rejectedPod
4843 terminatingErr error
4844 prepareWorker func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
4845 wantWorker func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
4846 wantWorkerAfterRetry func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord)
4847 wantErr bool
4848 expectMetrics map[string]string
4849 expectMetricsAfterRetry map[string]string
4850 }{
4851 {
4852 name: "missing pod is requested for termination with short grace period",
4853 wantErr: false,
4854 runtimePods: []*containertest.FakePod{
4855 {
4856 Pod: runtimePod(staticPod()),
4857 },
4858 },
4859 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
4860 drainAllWorkers(w)
4861 uid := types.UID("1")
4862
4863
4864 if len(w.podSyncStatuses) != 0 {
4865 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
4866 }
4867 r, ok := records[uid]
4868 if !ok || len(r) != 1 || r[0].updateType != kubetypes.SyncPodKill || r[0].terminated || r[0].runningPod == nil || r[0].gracePeriod != nil {
4869 t.Fatalf("unexpected pod sync records: %#v", r)
4870 }
4871 },
4872 expectMetrics: map[string]string{
4873 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
4874 # TYPE kubelet_orphaned_runtime_pods_total counter
4875 kubelet_orphaned_runtime_pods_total 1
4876 `,
4877 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
4878 # TYPE kubelet_working_pods gauge
4879 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
4880 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
4881 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
4882 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
4883 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
4884 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
4885 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
4886 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
4887 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
4888 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
4889 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
4890 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
4891 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
4892 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
4893 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 1
4894 `,
4895 },
4896 },
4897 {
4898 name: "terminating pod that errored and is not in config is notified by the cleanup",
4899 wantErr: false,
4900 runtimePods: []*containertest.FakePod{
4901 {
4902 Pod: runtimePod(simplePod()),
4903 },
4904 },
4905 terminatingErr: errors.New("unable to terminate"),
4906 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
4907
4908 pod := simplePod()
4909 w.UpdatePod(UpdatePodOptions{
4910 UpdateType: kubetypes.SyncPodCreate,
4911 StartTime: time.Unix(1, 0).UTC(),
4912 Pod: pod,
4913 })
4914 drainAllWorkers(w)
4915
4916
4917 two := int64(2)
4918 deleted := metav1.NewTime(time.Unix(2, 0).UTC())
4919 updatedPod := &v1.Pod{
4920 ObjectMeta: metav1.ObjectMeta{
4921 Name: "pod1",
4922 Namespace: "ns1",
4923 UID: types.UID("1"),
4924 DeletionGracePeriodSeconds: &two,
4925 DeletionTimestamp: &deleted,
4926 },
4927 Spec: v1.PodSpec{
4928 TerminationGracePeriodSeconds: &two,
4929 Containers: []v1.Container{
4930 {Name: "container-1"},
4931 },
4932 },
4933 }
4934 w.UpdatePod(UpdatePodOptions{
4935 UpdateType: kubetypes.SyncPodKill,
4936 StartTime: time.Unix(3, 0).UTC(),
4937 Pod: updatedPod,
4938 })
4939 drainAllWorkers(w)
4940 r, ok := records[updatedPod.UID]
4941 if !ok || len(r) != 2 || r[1].gracePeriod == nil || *r[1].gracePeriod != 2 {
4942 t.Fatalf("unexpected records: %#v", records)
4943 }
4944
4945 },
4946 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
4947 uid := types.UID("1")
4948 if len(w.podSyncStatuses) != 1 {
4949 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
4950 }
4951 s, ok := w.podSyncStatuses[uid]
4952 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
4953 t.Fatalf("unexpected requested pod termination: %#v", s)
4954 }
4955
4956
4957 if actual, expected := records[uid], []syncPodRecord{
4958 {name: "pod1", updateType: kubetypes.SyncPodCreate},
4959 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
4960 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
4961 }; !reflect.DeepEqual(expected, actual) {
4962 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
4963 }
4964 },
4965 expectMetrics: map[string]string{
4966 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
4967 # TYPE kubelet_desired_pods gauge
4968 kubelet_desired_pods{static=""} 0
4969 kubelet_desired_pods{static="true"} 0
4970 `,
4971 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
4972 # TYPE kubelet_active_pods gauge
4973 kubelet_active_pods{static=""} 0
4974 kubelet_active_pods{static="true"} 0
4975 `,
4976 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
4977 # TYPE kubelet_orphaned_runtime_pods_total counter
4978 kubelet_orphaned_runtime_pods_total 0
4979 `,
4980 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
4981 # TYPE kubelet_working_pods gauge
4982 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
4983 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
4984 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
4985 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
4986 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
4987 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
4988 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
4989 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
4990 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
4991 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
4992 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 1
4993 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
4994 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
4995 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
4996 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
4997 `,
4998 },
4999 wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5000 uid := types.UID("1")
5001 if len(w.podSyncStatuses) != 1 {
5002 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5003 }
5004 s, ok := w.podSyncStatuses[uid]
5005 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5006 t.Fatalf("unexpected requested pod termination: %#v", s)
5007 }
5008
5009
5010 if actual, expected := records[uid], []syncPodRecord{
5011 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5012 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
5013 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
5014
5015 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
5016
5017 {name: "pod1", terminated: true},
5018 }; !reflect.DeepEqual(expected, actual) {
5019 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5020 }
5021 },
5022 },
5023 {
5024 name: "terminating pod that errored and is not in config or worker is force killed by the cleanup",
5025 wantErr: false,
5026 runtimePods: []*containertest.FakePod{
5027 {
5028 Pod: runtimePod(simplePod()),
5029 },
5030 },
5031 terminatingErr: errors.New("unable to terminate"),
5032 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5033 uid := types.UID("1")
5034 if len(w.podSyncStatuses) != 1 {
5035 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5036 }
5037 s, ok := w.podSyncStatuses[uid]
5038 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5039 t.Fatalf("unexpected requested pod termination: %#v", s)
5040 }
5041
5042
5043 expectedRunningPod := runtimePod(simplePod())
5044 if actual, expected := s.activeUpdate, (&UpdatePodOptions{
5045 RunningPod: expectedRunningPod,
5046 KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
5047 }); !reflect.DeepEqual(expected, actual) {
5048 t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
5049 }
5050
5051
5052 if actual, expected := records[uid], []syncPodRecord{
5053 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5054 }; !reflect.DeepEqual(expected, actual) {
5055 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5056 }
5057 },
5058 wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5059 uid := types.UID("1")
5060 if len(w.podSyncStatuses) != 0 {
5061 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5062 }
5063
5064
5065 expectedRunningPod := runtimePod(simplePod())
5066 if actual, expected := records[uid], []syncPodRecord{
5067
5068 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5069
5070 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5071
5072
5073 }; !reflect.DeepEqual(expected, actual) {
5074 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5075 }
5076 },
5077 },
5078 {
5079 name: "pod is added to worker by sync method",
5080 wantErr: false,
5081 pods: []*v1.Pod{
5082 simplePod(),
5083 },
5084 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5085 uid := types.UID("1")
5086 if len(w.podSyncStatuses) != 1 {
5087 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5088 }
5089 s, ok := w.podSyncStatuses[uid]
5090 if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
5091 t.Fatalf("unexpected requested pod termination: %#v", s)
5092 }
5093
5094
5095 if actual, expected := records[uid], []syncPodRecord{
5096 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5097 }; !reflect.DeepEqual(expected, actual) {
5098 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5099 }
5100 },
5101 expectMetrics: map[string]string{
5102 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5103 # TYPE kubelet_desired_pods gauge
5104 kubelet_desired_pods{static=""} 1
5105 kubelet_desired_pods{static="true"} 0
5106 `,
5107 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
5108 # TYPE kubelet_active_pods gauge
5109 kubelet_active_pods{static=""} 1
5110 kubelet_active_pods{static="true"} 0
5111 `,
5112 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
5113 # TYPE kubelet_orphaned_runtime_pods_total counter
5114 kubelet_orphaned_runtime_pods_total 0
5115 `,
5116
5117
5118
5119
5120
5121
5122 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5123 # TYPE kubelet_restarted_pods_total counter
5124 kubelet_restarted_pods_total{static=""} 1
5125 kubelet_restarted_pods_total{static="true"} 0
5126 `,
5127 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5128 # TYPE kubelet_working_pods gauge
5129 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
5130 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
5131 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5132 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5133 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5134 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
5135 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5136 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5137 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5138 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5139 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5140 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5141 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5142 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5143 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5144 `,
5145 },
5146 },
5147 {
5148 name: "pod is not added to worker by sync method because it is in a terminal phase",
5149 wantErr: false,
5150 pods: []*v1.Pod{
5151 withPhase(simplePod(), v1.PodFailed),
5152 },
5153 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5154 uid := types.UID("1")
5155 if len(w.podSyncStatuses) != 0 {
5156 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5157 }
5158
5159 if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
5160 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5161 }
5162 },
5163 expectMetrics: map[string]string{
5164 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5165 # TYPE kubelet_desired_pods gauge
5166 kubelet_desired_pods{static=""} 1
5167 kubelet_desired_pods{static="true"} 0
5168 `,
5169 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
5170 # TYPE kubelet_active_pods gauge
5171 kubelet_active_pods{static=""} 0
5172 kubelet_active_pods{static="true"} 0
5173 `,
5174 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
5175 # TYPE kubelet_orphaned_runtime_pods_total counter
5176 kubelet_orphaned_runtime_pods_total 0
5177 `,
5178
5179
5180
5181
5182
5183
5184 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5185 # TYPE kubelet_restarted_pods_total counter
5186 kubelet_restarted_pods_total{static=""} 0
5187 kubelet_restarted_pods_total{static="true"} 0
5188 `,
5189 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5190 # TYPE kubelet_working_pods gauge
5191 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
5192 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
5193 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5194 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5195 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5196 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
5197 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5198 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5199 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5200 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5201 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5202 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5203 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5204 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5205 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5206 `,
5207 },
5208 },
5209 {
5210 name: "pod is not added to worker by sync method because it has been rejected",
5211 wantErr: false,
5212 pods: []*v1.Pod{
5213 simplePod(),
5214 },
5215 rejectedPods: []rejectedPod{
5216 {uid: "1", reason: "Test", message: "rejected"},
5217 },
5218 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5219 uid := types.UID("1")
5220 if len(w.podSyncStatuses) != 0 {
5221 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5222 }
5223
5224 if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
5225 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5226 }
5227 },
5228 expectMetrics: map[string]string{
5229 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5230 # TYPE kubelet_desired_pods gauge
5231 kubelet_desired_pods{static=""} 1
5232 kubelet_desired_pods{static="true"} 0
5233 `,
5234 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
5235 # TYPE kubelet_active_pods gauge
5236 kubelet_active_pods{static=""} 0
5237 kubelet_active_pods{static="true"} 0
5238 `,
5239 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
5240 # TYPE kubelet_orphaned_runtime_pods_total counter
5241 kubelet_orphaned_runtime_pods_total 0
5242 `,
5243
5244
5245
5246
5247
5248
5249 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5250 # TYPE kubelet_restarted_pods_total counter
5251 kubelet_restarted_pods_total{static=""} 0
5252 kubelet_restarted_pods_total{static="true"} 0
5253 `,
5254 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5255 # TYPE kubelet_working_pods gauge
5256 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
5257 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
5258 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5259 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5260 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5261 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
5262 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5263 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5264 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5265 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5266 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5267 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5268 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5269 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5270 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5271 `,
5272 },
5273 },
5274 {
5275 name: "terminating pod that is known to the config gets no update during pod cleanup",
5276 wantErr: false,
5277 pods: []*v1.Pod{
5278 {
5279 ObjectMeta: metav1.ObjectMeta{
5280 Name: "pod1",
5281 Namespace: "ns1",
5282 UID: types.UID("1"),
5283 DeletionGracePeriodSeconds: &two,
5284 DeletionTimestamp: &deleted,
5285 },
5286 Spec: v1.PodSpec{
5287 TerminationGracePeriodSeconds: &two,
5288 Containers: []v1.Container{
5289 {Name: "container-1"},
5290 },
5291 },
5292 },
5293 },
5294 runtimePods: []*containertest.FakePod{
5295 {
5296 Pod: runtimePod(simplePod()),
5297 },
5298 },
5299 terminatingErr: errors.New("unable to terminate"),
5300 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5301
5302 pod := &v1.Pod{
5303 ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "ns1", UID: types.UID("1")},
5304 Spec: v1.PodSpec{
5305 Containers: []v1.Container{
5306 {Name: "container-1"},
5307 },
5308 },
5309 }
5310 w.UpdatePod(UpdatePodOptions{
5311 UpdateType: kubetypes.SyncPodCreate,
5312 StartTime: time.Unix(1, 0).UTC(),
5313 Pod: pod,
5314 })
5315 drainAllWorkers(w)
5316
5317
5318 updatedPod := &v1.Pod{
5319 ObjectMeta: metav1.ObjectMeta{
5320 Name: "pod1",
5321 Namespace: "ns1",
5322 UID: types.UID("1"),
5323 DeletionGracePeriodSeconds: &two,
5324 DeletionTimestamp: &deleted,
5325 },
5326 Spec: v1.PodSpec{
5327 TerminationGracePeriodSeconds: &two,
5328 Containers: []v1.Container{
5329 {Name: "container-1"},
5330 },
5331 },
5332 }
5333 w.UpdatePod(UpdatePodOptions{
5334 UpdateType: kubetypes.SyncPodKill,
5335 StartTime: time.Unix(3, 0).UTC(),
5336 Pod: updatedPod,
5337 })
5338 drainAllWorkers(w)
5339
5340
5341 if actual, expected := records[updatedPod.UID], []syncPodRecord{
5342 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5343 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
5344 }; !reflect.DeepEqual(expected, actual) {
5345 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5346 }
5347 },
5348 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5349 uid := types.UID("1")
5350 if len(w.podSyncStatuses) != 1 {
5351 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5352 }
5353 s, ok := w.podSyncStatuses[uid]
5354 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5355 t.Fatalf("unexpected requested pod termination: %#v", s)
5356 }
5357
5358
5359 if actual, expected := records[uid], []syncPodRecord{
5360 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5361 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &two},
5362 }; !reflect.DeepEqual(expected, actual) {
5363 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5364 }
5365 },
5366 },
5367 {
5368 name: "pod that could not start and is not in config is force terminated during pod cleanup",
5369 wantErr: false,
5370 runtimePods: []*containertest.FakePod{
5371 {
5372 Pod: runtimePod(simplePod()),
5373 },
5374 },
5375 terminatingErr: errors.New("unable to terminate"),
5376 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5377
5378 pod := staticPod()
5379
5380 w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
5381
5382 w.UpdatePod(UpdatePodOptions{
5383 UpdateType: kubetypes.SyncPodCreate,
5384 StartTime: time.Unix(1, 0).UTC(),
5385 Pod: pod,
5386 })
5387 drainAllWorkers(w)
5388
5389 if _, ok := records[pod.UID]; ok {
5390 t.Fatalf("unexpected records: %#v", records)
5391 }
5392
5393 },
5394 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5395
5396
5397
5398 uid := types.UID("1")
5399 if len(w.podSyncStatuses) != 1 {
5400 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5401 }
5402
5403 s, ok := w.podSyncStatuses[uid]
5404 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5405 t.Errorf("unexpected requested pod termination: %#v", s)
5406 }
5407
5408
5409 expectedRunningPod := runtimePod(simplePod())
5410 if actual, expected := s.activeUpdate, (&UpdatePodOptions{
5411 RunningPod: expectedRunningPod,
5412 KillPodOptions: &KillPodOptions{PodTerminationGracePeriodSecondsOverride: &one},
5413 }); !reflect.DeepEqual(expected, actual) {
5414 t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
5415 }
5416
5417
5418 if actual, expected := records[uid], []syncPodRecord{
5419 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5420
5421 }; !reflect.DeepEqual(expected, actual) {
5422 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5423 }
5424 },
5425 wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5426 uid := types.UID("1")
5427 if len(w.podSyncStatuses) != 0 {
5428 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5429 }
5430
5431
5432 expectedRunningPod := runtimePod(simplePod())
5433 if actual, expected := records[uid], []syncPodRecord{
5434
5435 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5436
5437 {name: "pod1", updateType: kubetypes.SyncPodKill, runningPod: expectedRunningPod},
5438 }; !reflect.DeepEqual(expected, actual) {
5439 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5440 }
5441 },
5442 },
5443 {
5444 name: "pod that could not start still has a pending update and is tracked in metrics",
5445 wantErr: false,
5446 pods: []*v1.Pod{
5447 staticPod(),
5448 },
5449 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5450
5451 pod := staticPod()
5452
5453 w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
5454
5455 w.UpdatePod(UpdatePodOptions{
5456 UpdateType: kubetypes.SyncPodCreate,
5457 StartTime: time.Unix(1, 0).UTC(),
5458 Pod: pod,
5459 })
5460 drainAllWorkers(w)
5461
5462 if _, ok := records[pod.UID]; ok {
5463 t.Fatalf("unexpected records: %#v", records)
5464 }
5465
5466 },
5467 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5468 uid := types.UID("1")
5469 if len(w.podSyncStatuses) != 1 {
5470 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5471 }
5472 s, ok := w.podSyncStatuses[uid]
5473 if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || s.restartRequested || s.activeUpdate != nil || s.pendingUpdate == nil {
5474 t.Errorf("unexpected requested pod termination: %#v", s)
5475 }
5476
5477
5478 if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
5479 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5480 }
5481 },
5482 expectMetrics: map[string]string{
5483 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5484 # TYPE kubelet_desired_pods gauge
5485 kubelet_desired_pods{static=""} 0
5486 kubelet_desired_pods{static="true"} 1
5487 `,
5488 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5489 # TYPE kubelet_working_pods gauge
5490 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
5491 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 1
5492 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5493 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5494 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5495 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
5496 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5497 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5498 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5499 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5500 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5501 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5502 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5503 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5504 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5505 `,
5506 },
5507 },
5508 {
5509 name: "pod that could not start and is not in config is force terminated without runtime during pod cleanup",
5510 wantErr: false,
5511 terminatingErr: errors.New("unable to terminate"),
5512 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5513
5514 pod := staticPod()
5515
5516 w.startedStaticPodsByFullname[kubecontainer.GetPodFullName(pod)] = types.UID("2")
5517
5518 w.UpdatePod(UpdatePodOptions{
5519 UpdateType: kubetypes.SyncPodCreate,
5520 StartTime: time.Unix(1, 0).UTC(),
5521 Pod: pod,
5522 })
5523 drainAllWorkers(w)
5524
5525 if _, ok := records[pod.UID]; ok {
5526 t.Fatalf("unexpected records: %#v", records)
5527 }
5528
5529 },
5530 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5531 uid := types.UID("1")
5532 if len(w.podSyncStatuses) != 0 {
5533 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5534 }
5535
5536
5537 if actual, expected := records[uid], []syncPodRecord(nil); !reflect.DeepEqual(expected, actual) {
5538 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5539 }
5540 },
5541 },
5542 {
5543 name: "pod that is terminating is recreated by config with the same UID",
5544 wantErr: false,
5545 pods: []*v1.Pod{
5546 func() *v1.Pod {
5547 pod := staticPod()
5548 pod.Annotations["version"] = "2"
5549 return pod
5550 }(),
5551 },
5552
5553 runtimePods: []*containertest.FakePod{
5554 {
5555 Pod: runtimePod(staticPod()),
5556 },
5557 },
5558 terminatingErr: errors.New("unable to terminate"),
5559 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5560
5561 pod := staticPod()
5562
5563 w.UpdatePod(UpdatePodOptions{
5564 UpdateType: kubetypes.SyncPodCreate,
5565 StartTime: time.Unix(1, 0).UTC(),
5566 Pod: pod,
5567 })
5568 drainAllWorkers(w)
5569
5570
5571 w.UpdatePod(UpdatePodOptions{
5572 UpdateType: kubetypes.SyncPodKill,
5573 StartTime: time.Unix(2, 0).UTC(),
5574 Pod: pod,
5575 })
5576 pod = staticPod()
5577 pod.Annotations["version"] = "2"
5578 w.UpdatePod(UpdatePodOptions{
5579 UpdateType: kubetypes.SyncPodCreate,
5580 StartTime: time.Unix(3, 0).UTC(),
5581 Pod: pod,
5582 })
5583 drainAllWorkers(w)
5584
5585
5586 if actual, expected := records[pod.UID], []syncPodRecord{
5587 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5588 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
5589 }; !reflect.DeepEqual(expected, actual) {
5590 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5591 }
5592
5593 },
5594 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5595 uid := types.UID("1")
5596 if len(w.podSyncStatuses) != 1 {
5597 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5598 }
5599 s, ok := w.podSyncStatuses[uid]
5600 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() || !s.restartRequested {
5601 t.Errorf("unexpected requested pod termination: %#v", s)
5602 }
5603
5604
5605 if actual, expected := records[uid], []syncPodRecord{
5606 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5607 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
5608 }; !reflect.DeepEqual(expected, actual) {
5609 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5610 }
5611 },
5612 expectMetrics: map[string]string{
5613 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5614 # TYPE kubelet_desired_pods gauge
5615 kubelet_desired_pods{static=""} 0
5616 kubelet_desired_pods{static="true"} 1
5617 `,
5618 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
5619 # TYPE kubelet_active_pods gauge
5620 kubelet_active_pods{static=""} 0
5621 kubelet_active_pods{static="true"} 1
5622 `,
5623 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
5624 # TYPE kubelet_orphaned_runtime_pods_total counter
5625 kubelet_orphaned_runtime_pods_total 0
5626 `,
5627 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5628 # TYPE kubelet_restarted_pods_total counter
5629 kubelet_restarted_pods_total{static=""} 0
5630 kubelet_restarted_pods_total{static="true"} 0
5631 `,
5632 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5633 # TYPE kubelet_working_pods gauge
5634 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 0
5635 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
5636 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5637 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5638 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5639 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 1
5640 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5641 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5642 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5643 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5644 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5645 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5646 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5647 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5648 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5649 `,
5650 },
5651 expectMetricsAfterRetry: map[string]string{
5652 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5653 # TYPE kubelet_restarted_pods_total counter
5654 kubelet_restarted_pods_total{static=""} 0
5655 kubelet_restarted_pods_total{static="true"} 1
5656 `,
5657 },
5658 },
5659 {
5660 name: "started pod that is not in config is force terminated during pod cleanup",
5661 wantErr: false,
5662 runtimePods: []*containertest.FakePod{
5663 {
5664 Pod: runtimePod(simplePod()),
5665 },
5666 },
5667 terminatingErr: errors.New("unable to terminate"),
5668 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5669
5670 pod := staticPod()
5671
5672 w.UpdatePod(UpdatePodOptions{
5673 UpdateType: kubetypes.SyncPodCreate,
5674 StartTime: time.Unix(1, 0).UTC(),
5675 Pod: pod,
5676 })
5677 drainAllWorkers(w)
5678
5679
5680 if actual, expected := records[pod.UID], []syncPodRecord{
5681 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5682 }; !reflect.DeepEqual(expected, actual) {
5683 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5684 }
5685
5686 },
5687 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5688 uid := types.UID("1")
5689 if len(w.podSyncStatuses) != 1 {
5690 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5691 }
5692 s, ok := w.podSyncStatuses[uid]
5693 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5694 t.Errorf("unexpected requested pod termination: %#v", s)
5695 }
5696
5697
5698 if actual, expected := records[uid], []syncPodRecord{
5699 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5700 {name: "pod1", updateType: kubetypes.SyncPodKill},
5701 }; !reflect.DeepEqual(expected, actual) {
5702 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5703 }
5704 },
5705 },
5706 {
5707 name: "started pod that is not in config or runtime is force terminated during pod cleanup",
5708 wantErr: false,
5709 runtimePods: []*containertest.FakePod{},
5710 terminatingErr: errors.New("unable to terminate"),
5711 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5712
5713 pod := staticPod()
5714
5715 w.UpdatePod(UpdatePodOptions{
5716 UpdateType: kubetypes.SyncPodCreate,
5717 StartTime: time.Unix(1, 0).UTC(),
5718 Pod: pod,
5719 MirrorPod: mirrorPod(pod, "node-1", "node-uid-1"),
5720 })
5721 drainAllWorkers(w)
5722
5723
5724 if actual, expected := records[pod.UID], []syncPodRecord{
5725 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5726 }; !reflect.DeepEqual(expected, actual) {
5727 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5728 }
5729
5730 },
5731 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5732 uid := types.UID("1")
5733 if len(w.podSyncStatuses) != 1 {
5734 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5735 }
5736 s, ok := w.podSyncStatuses[uid]
5737 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5738 t.Errorf("unexpected requested pod termination: %#v", s)
5739 }
5740
5741
5742 expectedPod := staticPod()
5743 if actual, expected := s.activeUpdate, (&UpdatePodOptions{
5744 Pod: expectedPod,
5745 MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
5746 }); !reflect.DeepEqual(expected, actual) {
5747 t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
5748 }
5749
5750
5751 if actual, expected := records[uid], []syncPodRecord{
5752 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5753 {name: "pod1", updateType: kubetypes.SyncPodKill},
5754 }; !reflect.DeepEqual(expected, actual) {
5755 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5756 }
5757 },
5758 wantWorkerAfterRetry: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5759 uid := types.UID("1")
5760 if len(w.podSyncStatuses) != 1 {
5761 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5762 }
5763 s, ok := w.podSyncStatuses[uid]
5764 if !ok || !s.IsTerminationRequested() || !s.IsTerminationStarted() || !s.IsFinished() || s.IsWorking() || !s.IsDeleted() {
5765 t.Errorf("unexpected requested pod termination: %#v", s)
5766 }
5767
5768
5769 expectedPod := staticPod()
5770 if actual, expected := s.activeUpdate, (&UpdatePodOptions{
5771 Pod: expectedPod,
5772 MirrorPod: mirrorPod(expectedPod, "node-1", "node-uid-1"),
5773 }); !reflect.DeepEqual(expected, actual) {
5774 t.Fatalf("unexpected pod activeUpdate: %s", cmp.Diff(expected, actual))
5775 }
5776
5777
5778 if actual, expected := records[uid], []syncPodRecord{
5779 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5780 {name: "pod1", updateType: kubetypes.SyncPodKill},
5781
5782 {name: "pod1", updateType: kubetypes.SyncPodKill},
5783 {name: "pod1", terminated: true},
5784 }; !reflect.DeepEqual(expected, actual) {
5785 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5786 }
5787 },
5788 },
5789 {
5790 name: "terminated pod is restarted in the same invocation that it is detected",
5791 wantErr: false,
5792 pods: []*v1.Pod{
5793 func() *v1.Pod {
5794 pod := staticPod()
5795 pod.Annotations = map[string]string{"version": "2"}
5796 return pod
5797 }(),
5798 },
5799 prepareWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5800
5801 pod := simplePod()
5802 w.UpdatePod(UpdatePodOptions{
5803 UpdateType: kubetypes.SyncPodCreate,
5804 StartTime: time.Unix(1, 0).UTC(),
5805 Pod: pod,
5806 })
5807 drainAllWorkers(w)
5808 w.UpdatePod(UpdatePodOptions{
5809 UpdateType: kubetypes.SyncPodKill,
5810 Pod: pod,
5811 })
5812 pod2 := simplePod()
5813 pod2.Annotations = map[string]string{"version": "2"}
5814 w.UpdatePod(UpdatePodOptions{
5815 UpdateType: kubetypes.SyncPodCreate,
5816 Pod: pod2,
5817 })
5818 drainAllWorkers(w)
5819 },
5820 wantWorker: func(t *testing.T, w *podWorkers, records map[types.UID][]syncPodRecord) {
5821 uid := types.UID("1")
5822 if len(w.podSyncStatuses) != 1 {
5823 t.Fatalf("unexpected sync statuses: %#v", w.podSyncStatuses)
5824 }
5825 s, ok := w.podSyncStatuses[uid]
5826 if !ok || s.IsTerminationRequested() || s.IsTerminationStarted() || s.IsFinished() || s.IsWorking() || s.IsDeleted() {
5827 t.Fatalf("unexpected requested pod termination: %#v", s)
5828 }
5829 if s.pendingUpdate != nil || s.activeUpdate == nil || s.activeUpdate.Pod == nil || s.activeUpdate.Pod.Annotations["version"] != "2" {
5830 t.Fatalf("unexpected restarted pod: %#v", s.activeUpdate.Pod)
5831 }
5832
5833
5834 if actual, expected := records[uid], []syncPodRecord{
5835 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5836 {name: "pod1", updateType: kubetypes.SyncPodKill, gracePeriod: &one},
5837 {name: "pod1", terminated: true},
5838 {name: "pod1", updateType: kubetypes.SyncPodCreate},
5839 }; !reflect.DeepEqual(expected, actual) {
5840 t.Fatalf("unexpected pod sync records: %s", cmp.Diff(expected, actual, cmp.AllowUnexported(syncPodRecord{})))
5841 }
5842 },
5843 expectMetrics: map[string]string{
5844 metrics.DesiredPodCount.FQName(): `# HELP kubelet_desired_pods [ALPHA] The number of pods the kubelet is being instructed to run. static is true if the pod is not from the apiserver.
5845 # TYPE kubelet_desired_pods gauge
5846 kubelet_desired_pods{static=""} 1
5847 kubelet_desired_pods{static="true"} 0
5848 `,
5849 metrics.ActivePodCount.FQName(): `# HELP kubelet_active_pods [ALPHA] The number of pods the kubelet considers active and which are being considered when admitting new pods. static is true if the pod is not from the apiserver.
5850 # TYPE kubelet_active_pods gauge
5851 kubelet_active_pods{static=""} 1
5852 kubelet_active_pods{static="true"} 0
5853 `,
5854 metrics.OrphanedRuntimePodTotal.FQName(): `# HELP kubelet_orphaned_runtime_pods_total [ALPHA] Number of pods that have been detected in the container runtime without being already known to the pod worker. This typically indicates the kubelet was restarted while a pod was force deleted in the API or in the local configuration, which is unusual.
5855 # TYPE kubelet_orphaned_runtime_pods_total counter
5856 kubelet_orphaned_runtime_pods_total 0
5857 `,
5858 metrics.RestartedPodTotal.FQName(): `# HELP kubelet_restarted_pods_total [ALPHA] Number of pods that have been restarted because they were deleted and recreated with the same UID while the kubelet was watching them (common for static pods, extremely uncommon for API pods)
5859 # TYPE kubelet_restarted_pods_total counter
5860 kubelet_restarted_pods_total{static=""} 1
5861 kubelet_restarted_pods_total{static="true"} 0
5862 `,
5863 metrics.WorkingPodCount.FQName(): `# HELP kubelet_working_pods [ALPHA] Number of pods the kubelet is actually running, broken down by lifecycle phase, whether the pod is desired, orphaned, or runtime only (also orphaned), and whether the pod is static. An orphaned pod has been removed from local configuration or force deleted in the API and consumes resources that are not otherwise visible.
5864 # TYPE kubelet_working_pods gauge
5865 kubelet_working_pods{config="desired",lifecycle="sync",static=""} 1
5866 kubelet_working_pods{config="desired",lifecycle="sync",static="true"} 0
5867 kubelet_working_pods{config="desired",lifecycle="terminated",static=""} 0
5868 kubelet_working_pods{config="desired",lifecycle="terminated",static="true"} 0
5869 kubelet_working_pods{config="desired",lifecycle="terminating",static=""} 0
5870 kubelet_working_pods{config="desired",lifecycle="terminating",static="true"} 0
5871 kubelet_working_pods{config="orphan",lifecycle="sync",static=""} 0
5872 kubelet_working_pods{config="orphan",lifecycle="sync",static="true"} 0
5873 kubelet_working_pods{config="orphan",lifecycle="terminated",static=""} 0
5874 kubelet_working_pods{config="orphan",lifecycle="terminated",static="true"} 0
5875 kubelet_working_pods{config="orphan",lifecycle="terminating",static=""} 0
5876 kubelet_working_pods{config="orphan",lifecycle="terminating",static="true"} 0
5877 kubelet_working_pods{config="runtime_only",lifecycle="sync",static="unknown"} 0
5878 kubelet_working_pods{config="runtime_only",lifecycle="terminated",static="unknown"} 0
5879 kubelet_working_pods{config="runtime_only",lifecycle="terminating",static="unknown"} 0
5880 `,
5881 },
5882 },
5883 }
5884 for _, tt := range tests {
5885 t.Run(tt.name, func(t *testing.T) {
5886
5887 metrics.Register()
5888 for _, metric := range []interface{ Reset() }{
5889 metrics.DesiredPodCount,
5890 metrics.ActivePodCount,
5891 metrics.RestartedPodTotal,
5892 metrics.OrphanedRuntimePodTotal,
5893 metrics.WorkingPodCount,
5894 } {
5895 metric.Reset()
5896 }
5897 metrics.MirrorPodCount.Set(0)
5898
5899 testKubelet := newTestKubelet(t, false)
5900 defer testKubelet.Cleanup()
5901 kl := testKubelet.kubelet
5902
5903 podWorkers, _, processed := createPodWorkers()
5904 kl.podWorkers = podWorkers
5905 originalPodSyncer := podWorkers.podSyncer
5906 syncFuncs := newPodSyncerFuncs(originalPodSyncer)
5907 podWorkers.podSyncer = &syncFuncs
5908 if tt.terminatingErr != nil {
5909 syncFuncs.syncTerminatingPod = func(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus, gracePeriod *int64, podStatusFn func(*v1.PodStatus)) error {
5910 t.Logf("called syncTerminatingPod")
5911 if err := originalPodSyncer.SyncTerminatingPod(ctx, pod, podStatus, gracePeriod, podStatusFn); err != nil {
5912 t.Fatalf("unexpected error in syncTerminatingPodFn: %v", err)
5913 }
5914 return tt.terminatingErr
5915 }
5916 syncFuncs.syncTerminatingRuntimePod = func(ctx context.Context, runningPod *kubecontainer.Pod) error {
5917 if err := originalPodSyncer.SyncTerminatingRuntimePod(ctx, runningPod); err != nil {
5918 t.Fatalf("unexpected error in syncTerminatingRuntimePodFn: %v", err)
5919 }
5920 return tt.terminatingErr
5921 }
5922 }
5923 if tt.prepareWorker != nil {
5924 tt.prepareWorker(t, podWorkers, processed)
5925 }
5926
5927 testKubelet.fakeRuntime.PodList = tt.runtimePods
5928 kl.podManager.SetPods(tt.pods)
5929
5930 for _, reject := range tt.rejectedPods {
5931 pod, ok := kl.podManager.GetPodByUID(reject.uid)
5932 if !ok {
5933 t.Fatalf("unable to reject pod by UID %v", reject.uid)
5934 }
5935 kl.rejectPod(pod, reject.reason, reject.message)
5936 }
5937
5938 if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
5939 t.Errorf("Kubelet.HandlePodCleanups() error = %v, wantErr %v", err, tt.wantErr)
5940 }
5941 drainAllWorkers(podWorkers)
5942 if tt.wantWorker != nil {
5943 tt.wantWorker(t, podWorkers, processed)
5944 }
5945
5946 for k, v := range tt.expectMetrics {
5947 testMetric(t, k, v)
5948 }
5949
5950
5951 if tt.wantWorkerAfterRetry != nil {
5952 podWorkers.podSyncer = originalPodSyncer
5953 if err := kl.HandlePodCleanups(context.Background()); (err != nil) != tt.wantErr {
5954 t.Errorf("Kubelet.HandlePodCleanups() second error = %v, wantErr %v", err, tt.wantErr)
5955 }
5956 drainAllWorkers(podWorkers)
5957 tt.wantWorkerAfterRetry(t, podWorkers, processed)
5958
5959 for k, v := range tt.expectMetricsAfterRetry {
5960 testMetric(t, k, v)
5961 }
5962 }
5963 })
5964 }
5965 }
5966
5967 func testMetric(t *testing.T, metricName string, expectedMetric string) {
5968 t.Helper()
5969 err := testutil.GatherAndCompare(metrics.GetGather(), strings.NewReader(expectedMetric), metricName)
5970 if err != nil {
5971 t.Error(err)
5972 }
5973 }
5974
5975 func TestGetNonExistentImagePullSecret(t *testing.T) {
5976 secrets := make([]*v1.Secret, 0)
5977 fakeRecorder := record.NewFakeRecorder(1)
5978 testKubelet := newTestKubelet(t, false )
5979 testKubelet.kubelet.recorder = fakeRecorder
5980 testKubelet.kubelet.secretManager = secret.NewFakeManagerWithSecrets(secrets)
5981 defer testKubelet.Cleanup()
5982
5983 expectedEvent := "Warning FailedToRetrieveImagePullSecret Unable to retrieve some image pull secrets (secretFoo); attempting to pull the image may not succeed."
5984
5985 testPod := &v1.Pod{
5986 ObjectMeta: metav1.ObjectMeta{
5987 Namespace: "nsFoo",
5988 Name: "podFoo",
5989 Annotations: map[string]string{},
5990 },
5991 Spec: v1.PodSpec{
5992 ImagePullSecrets: []v1.LocalObjectReference{
5993 {Name: "secretFoo"},
5994 },
5995 },
5996 }
5997
5998 pullSecrets := testKubelet.kubelet.getPullSecretsForPod(testPod)
5999 assert.Equal(t, 0, len(pullSecrets))
6000
6001 assert.Equal(t, 1, len(fakeRecorder.Events))
6002 event := <-fakeRecorder.Events
6003 assert.Equal(t, event, expectedEvent)
6004 }
6005
6006 func TestParseGetSubIdsOutput(t *testing.T) {
6007 tests := []struct {
6008 name string
6009 input string
6010 wantFirstID uint32
6011 wantRangeLen uint32
6012 wantErr bool
6013 }{
6014 {
6015 name: "valid",
6016 input: "0: kubelet 65536 2147483648",
6017 wantFirstID: 65536,
6018 wantRangeLen: 2147483648,
6019 },
6020 {
6021 name: "multiple lines",
6022 input: "0: kubelet 1 2\n1: kubelet 3 4\n",
6023 wantErr: true,
6024 },
6025 {
6026 name: "wrong format",
6027 input: "0: kubelet 65536",
6028 wantErr: true,
6029 },
6030 {
6031 name: "non numeric 1",
6032 input: "0: kubelet Foo 65536",
6033 wantErr: true,
6034 },
6035 {
6036 name: "non numeric 2",
6037 input: "0: kubelet 0 Bar",
6038 wantErr: true,
6039 },
6040 {
6041 name: "overflow 1",
6042 input: "0: kubelet 4294967296 2147483648",
6043 wantErr: true,
6044 },
6045 {
6046 name: "overflow 2",
6047 input: "0: kubelet 65536 4294967296",
6048 wantErr: true,
6049 },
6050 {
6051 name: "negative value 1",
6052 input: "0: kubelet -1 2147483648",
6053 wantErr: true,
6054 },
6055 {
6056 name: "negative value 2",
6057 input: "0: kubelet 65536 -1",
6058 wantErr: true,
6059 },
6060 }
6061 for _, tc := range tests {
6062 t.Run(tc.name, func(t *testing.T) {
6063 gotFirstID, gotRangeLen, err := parseGetSubIdsOutput(tc.input)
6064 if tc.wantErr {
6065 if err == nil {
6066 t.Errorf("%s: expected error, got nil", tc.name)
6067 }
6068 } else {
6069 if err != nil {
6070 t.Errorf("%s: unexpected error: %v", tc.name, err)
6071 }
6072 if gotFirstID != tc.wantFirstID || gotRangeLen != tc.wantRangeLen {
6073 t.Errorf("%s: got (%d, %d), want (%d, %d)", tc.name, gotFirstID, gotRangeLen, tc.wantFirstID, tc.wantRangeLen)
6074 }
6075 }
6076 })
6077 }
6078 }
6079
6080 func TestResolveRecursiveReadOnly(t *testing.T) {
6081 testCases := []struct {
6082 m v1.VolumeMount
6083 runtimeSupportsRRO bool
6084 expected bool
6085 expectedErr string
6086 }{
6087 {
6088 m: v1.VolumeMount{Name: "rw"},
6089 runtimeSupportsRRO: true,
6090 expected: false,
6091 expectedErr: "",
6092 },
6093 {
6094 m: v1.VolumeMount{Name: "ro", ReadOnly: true},
6095 runtimeSupportsRRO: true,
6096 expected: false,
6097 expectedErr: "",
6098 },
6099 {
6100 m: v1.VolumeMount{Name: "ro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyDisabled)},
6101 runtimeSupportsRRO: true,
6102 expected: false,
6103 expectedErr: "",
6104 },
6105 {
6106 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
6107 runtimeSupportsRRO: true,
6108 expected: true,
6109 expectedErr: "",
6110 },
6111 {
6112 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
6113 MountPropagation: ptr.To(v1.MountPropagationNone)},
6114 runtimeSupportsRRO: true,
6115 expected: true,
6116 expectedErr: "",
6117 },
6118 {
6119 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
6120 MountPropagation: ptr.To(v1.MountPropagationHostToContainer)},
6121 runtimeSupportsRRO: true,
6122 expected: false,
6123 expectedErr: "not compatible with propagation",
6124 },
6125 {
6126 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible),
6127 MountPropagation: ptr.To(v1.MountPropagationBidirectional)},
6128 runtimeSupportsRRO: true,
6129 expected: false,
6130 expectedErr: "not compatible with propagation",
6131 },
6132 {
6133 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: false, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
6134 runtimeSupportsRRO: true,
6135 expected: false,
6136 expectedErr: "not read-only",
6137 },
6138 {
6139 m: v1.VolumeMount{Name: "rro-if-possible", ReadOnly: false, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyIfPossible)},
6140 runtimeSupportsRRO: false,
6141 expected: false,
6142 expectedErr: "not read-only",
6143 },
6144 {
6145 m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
6146 runtimeSupportsRRO: true,
6147 expected: true,
6148 expectedErr: "",
6149 },
6150 {
6151 m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
6152 MountPropagation: ptr.To(v1.MountPropagationNone)},
6153 runtimeSupportsRRO: true,
6154 expected: true,
6155 expectedErr: "",
6156 },
6157 {
6158 m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
6159 MountPropagation: ptr.To(v1.MountPropagationHostToContainer)},
6160 runtimeSupportsRRO: true,
6161 expected: false,
6162 expectedErr: "not compatible with propagation",
6163 },
6164 {
6165 m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled),
6166 MountPropagation: ptr.To(v1.MountPropagationBidirectional)},
6167 runtimeSupportsRRO: true,
6168 expected: false,
6169 expectedErr: "not compatible with propagation",
6170 },
6171 {
6172 m: v1.VolumeMount{Name: "rro", RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
6173 runtimeSupportsRRO: true,
6174 expected: false,
6175 expectedErr: "not read-only",
6176 },
6177 {
6178 m: v1.VolumeMount{Name: "rro", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyEnabled)},
6179 runtimeSupportsRRO: false,
6180 expected: false,
6181 expectedErr: "not supported by the runtime",
6182 },
6183 {
6184 m: v1.VolumeMount{Name: "invalid", ReadOnly: true, RecursiveReadOnly: ptr.To(v1.RecursiveReadOnlyMode("foo"))},
6185 runtimeSupportsRRO: true,
6186 expected: false,
6187 expectedErr: "unknown recursive read-only mode",
6188 },
6189 }
6190
6191 for _, tc := range testCases {
6192 got, err := resolveRecursiveReadOnly(tc.m, tc.runtimeSupportsRRO)
6193 t.Logf("resolveRecursiveReadOnly(%+v, %v) = (%v, %v)", tc.m, tc.runtimeSupportsRRO, got, err)
6194 if tc.expectedErr == "" {
6195 assert.Equal(t, tc.expected, got)
6196 assert.NoError(t, err)
6197 } else {
6198 assert.ErrorContains(t, err, tc.expectedErr)
6199 }
6200 }
6201 }
6202
View as plain text