1
16
17 package node
18
19 import (
20 "context"
21 "fmt"
22 "strings"
23 "time"
24
25 v1 "k8s.io/api/core/v1"
26 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27 "k8s.io/apimachinery/pkg/util/uuid"
28 "k8s.io/kubernetes/pkg/kubelet/events"
29 "k8s.io/kubernetes/test/e2e/feature"
30 "k8s.io/kubernetes/test/e2e/framework"
31 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
32 e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
33 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
34 "k8s.io/kubernetes/test/e2e/nodefeature"
35 imageutils "k8s.io/kubernetes/test/utils/image"
36 admissionapi "k8s.io/pod-security-admission/api"
37 "k8s.io/utils/pointer"
38 "k8s.io/utils/ptr"
39
40 "github.com/onsi/ginkgo/v2"
41 "github.com/onsi/gomega"
42 )
43
44 var (
45
46 nonRootTestUserID = int64(1000)
47 )
48
49 var _ = SIGDescribe("Security Context", func() {
50 f := framework.NewDefaultFramework("security-context-test")
51 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
52 var podClient *e2epod.PodClient
53 ginkgo.BeforeEach(func() {
54 podClient = e2epod.NewPodClient(f)
55 })
56
57 ginkgo.Context("When creating a pod with HostUsers", func() {
58 containerName := "userns-test"
59 makePod := func(hostUsers bool) *v1.Pod {
60 return &v1.Pod{
61 ObjectMeta: metav1.ObjectMeta{
62 Name: "userns-" + string(uuid.NewUUID()),
63 },
64 Spec: v1.PodSpec{
65 Containers: []v1.Container{
66 {
67 Name: containerName,
68 Image: imageutils.GetE2EImage(imageutils.BusyBox),
69 Command: []string{"cat", "/proc/self/uid_map"},
70 },
71 },
72 RestartPolicy: v1.RestartPolicyNever,
73 HostUsers: &hostUsers,
74 },
75 }
76 }
77
78 f.It("must create the user namespace if set to false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) {
79
80 podClient := e2epod.PodClientNS(f, f.Namespace.Name)
81
82 createdPod1 := podClient.Create(ctx, makePod(false))
83 createdPod2 := podClient.Create(ctx, makePod(false))
84 ginkgo.DeferCleanup(func(ctx context.Context) {
85 ginkgo.By("delete the pods")
86 podClient.DeleteSync(ctx, createdPod1.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
87 podClient.DeleteSync(ctx, createdPod2.Name, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
88 })
89 getLogs := func(pod *v1.Pod) (string, error) {
90 err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, createdPod1.Name, f.Namespace.Name, f.Timeouts.PodStart)
91 if err != nil {
92 return "", err
93 }
94 podStatus, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
95 if err != nil {
96 return "", err
97 }
98 return e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podStatus.Name, containerName)
99 }
100
101 logs1, err := getLogs(createdPod1)
102 framework.ExpectNoError(err)
103 logs2, err := getLogs(createdPod2)
104 framework.ExpectNoError(err)
105
106
107
108 if !strings.Contains(logs1, "65536") || !strings.Contains(logs2, "65536") {
109 framework.Failf("user namespace not created")
110 }
111 if logs1 == logs2 {
112 framework.Failf("two different pods are running with the same user namespace configuration")
113 }
114 })
115
116 f.It("must not create the user namespace if set to true [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) {
117
118 pod := makePod(true)
119
120
121
122 e2epodoutput.TestContainerOutput(ctx, f, "read namespace", pod, 0, []string{
123 "4294967295",
124 })
125 })
126
127 f.It("should mount all volumes with proper permissions with hostUsers=false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) {
128
129
130
131 name := "userns-volumes-test-" + string(uuid.NewUUID())
132 configMap := newConfigMap(f, name)
133 ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
134 var err error
135 if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
136 framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
137 }
138
139
140 secret := secretForTest(f.Namespace.Name, name)
141 ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
142 if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
143 framework.Failf("unable to create test secret %s: %v", secret.Name, err)
144 }
145
146
147 downwardVolSource := &v1.DownwardAPIVolumeSource{
148 Items: []v1.DownwardAPIVolumeFile{
149 {
150 Path: "name",
151 FieldRef: &v1.ObjectFieldSelector{
152 APIVersion: "v1",
153 FieldPath: "metadata.name",
154 },
155 },
156 },
157 }
158
159
160 falseVar := false
161 pod := &v1.Pod{
162 ObjectMeta: metav1.ObjectMeta{
163 Name: "pod-userns-volumes-" + string(uuid.NewUUID()),
164 },
165 Spec: v1.PodSpec{
166 Containers: []v1.Container{
167 {
168 Name: "userns-file-permissions",
169 Image: imageutils.GetE2EImage(imageutils.BusyBox),
170
171
172
173 Command: []string{"sh", "-c", "stat -c='%g' $(find /vol/ -type f)"},
174 VolumeMounts: []v1.VolumeMount{
175 {
176 Name: "cfg",
177 MountPath: "/vol/cfg/",
178 },
179 {
180 Name: "secret",
181 MountPath: "/vol/secret/",
182 },
183 {
184 Name: "downward",
185 MountPath: "/vol/downward/",
186 },
187 {
188 Name: "projected",
189 MountPath: "/vol/projected/",
190 },
191 },
192 },
193 },
194 Volumes: []v1.Volume{
195 {
196 Name: "cfg",
197 VolumeSource: v1.VolumeSource{
198 ConfigMap: &v1.ConfigMapVolumeSource{
199 LocalObjectReference: v1.LocalObjectReference{Name: configMap.Name},
200 },
201 },
202 },
203 {
204 Name: "secret",
205 VolumeSource: v1.VolumeSource{
206 Secret: &v1.SecretVolumeSource{
207 SecretName: secret.Name,
208 },
209 },
210 },
211 {
212 Name: "downward",
213 VolumeSource: v1.VolumeSource{
214 DownwardAPI: downwardVolSource,
215 },
216 },
217 {
218 Name: "projected",
219 VolumeSource: v1.VolumeSource{
220 Projected: &v1.ProjectedVolumeSource{
221 Sources: []v1.VolumeProjection{
222 {
223 DownwardAPI: &v1.DownwardAPIProjection{
224 Items: downwardVolSource.Items,
225 },
226 },
227 {
228 Secret: &v1.SecretProjection{
229 LocalObjectReference: v1.LocalObjectReference{Name: secret.Name},
230 },
231 },
232 },
233 },
234 },
235 },
236 },
237 HostUsers: &falseVar,
238 RestartPolicy: v1.RestartPolicyNever,
239 },
240 }
241
242
243
244 downwardAPIVolFiles := 1
245 projectedFiles := len(secret.Data) + downwardAPIVolFiles
246 e2epodoutput.TestContainerOutput(ctx, f, "check file permissions", pod, 0, []string{
247 strings.Repeat("=0\n", len(secret.Data)+len(configMap.Data)+downwardAPIVolFiles+projectedFiles),
248 })
249 })
250
251 f.It("should set FSGroup to user inside the container with hostUsers=false [LinuxOnly]", feature.UserNamespacesSupport, func(ctx context.Context) {
252
253 name := "userns-volumes-test-" + string(uuid.NewUUID())
254 configMap := newConfigMap(f, name)
255 ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
256 var err error
257 if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
258 framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
259 }
260
261
262 falseVar := false
263 fsGroup := int64(200)
264 pod := &v1.Pod{
265 ObjectMeta: metav1.ObjectMeta{
266 Name: "pod-userns-fsgroup-" + string(uuid.NewUUID()),
267 },
268 Spec: v1.PodSpec{
269 Containers: []v1.Container{
270 {
271 Name: "userns-fsgroup",
272 Image: imageutils.GetE2EImage(imageutils.BusyBox),
273
274
275
276 Command: []string{"sh", "-c", "stat -c='%g' $(find /vol/ -type f)"},
277 VolumeMounts: []v1.VolumeMount{
278 {
279 Name: "cfg",
280 MountPath: "/vol/cfg/",
281 },
282 },
283 },
284 },
285 Volumes: []v1.Volume{
286 {
287 Name: "cfg",
288 VolumeSource: v1.VolumeSource{
289 ConfigMap: &v1.ConfigMapVolumeSource{
290 LocalObjectReference: v1.LocalObjectReference{Name: configMap.Name},
291 },
292 },
293 },
294 },
295 HostUsers: &falseVar,
296 RestartPolicy: v1.RestartPolicyNever,
297 SecurityContext: &v1.PodSecurityContext{
298 FSGroup: &fsGroup,
299 },
300 },
301 }
302
303
304
305
306 e2epodoutput.TestContainerOutput(ctx, f, "check FSGroup is mapped correctly", pod, 0, []string{
307 strings.Repeat(fmt.Sprintf("=%v\n", fsGroup), len(configMap.Data)),
308 })
309 })
310 })
311
312 ginkgo.Context("When creating a container with runAsUser", func() {
313 makeUserPod := func(podName, image string, command []string, userid int64) *v1.Pod {
314 return &v1.Pod{
315 ObjectMeta: metav1.ObjectMeta{
316 Name: podName,
317 },
318 Spec: v1.PodSpec{
319 RestartPolicy: v1.RestartPolicyNever,
320 Containers: []v1.Container{
321 {
322 Image: image,
323 Name: podName,
324 Command: command,
325 SecurityContext: &v1.SecurityContext{
326 RunAsUser: &userid,
327 },
328 },
329 },
330 },
331 }
332 }
333 createAndWaitUserPod := func(ctx context.Context, userid int64) {
334 podName := fmt.Sprintf("busybox-user-%d-%s", userid, uuid.NewUUID())
335 podClient.Create(ctx, makeUserPod(podName,
336 framework.BusyBoxImage,
337 []string{"sh", "-c", fmt.Sprintf("test $(id -u) -eq %d", userid)},
338 userid,
339 ))
340
341 podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
342 }
343
344
350 framework.ConformanceIt("should run the container with uid 65534 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
351 createAndWaitUserPod(ctx, 65534)
352 })
353
354
361 f.It("should run the container with uid 0 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
362 createAndWaitUserPod(ctx, 0)
363 })
364 })
365
366 ginkgo.Context("When creating a container with runAsNonRoot", func() {
367 rootImage := imageutils.GetE2EImage(imageutils.BusyBox)
368 nonRootImage := imageutils.GetE2EImage(imageutils.NonRoot)
369 makeNonRootPod := func(podName, image string, userid *int64) *v1.Pod {
370 return &v1.Pod{
371 ObjectMeta: metav1.ObjectMeta{
372 Name: podName,
373 },
374 Spec: v1.PodSpec{
375 RestartPolicy: v1.RestartPolicyNever,
376 Containers: []v1.Container{
377 {
378 Image: image,
379 Name: podName,
380 Command: []string{"id", "-u"},
381 SecurityContext: &v1.SecurityContext{
382 RunAsNonRoot: pointer.BoolPtr(true),
383 RunAsUser: userid,
384 },
385 },
386 },
387 },
388 }
389 }
390
391 ginkgo.It("should run with an explicit non-root user ID [LinuxOnly]", func(ctx context.Context) {
392
393 e2eskipper.SkipIfNodeOSDistroIs("windows")
394 name := "explicit-nonroot-uid"
395 pod := makeNonRootPod(name, rootImage, pointer.Int64Ptr(nonRootTestUserID))
396 podClient.Create(ctx, pod)
397
398 podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
399 framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1000"))
400 })
401 ginkgo.It("should not run with an explicit root user ID [LinuxOnly]", func(ctx context.Context) {
402
403 e2eskipper.SkipIfNodeOSDistroIs("windows")
404 name := "explicit-root-uid"
405 pod := makeNonRootPod(name, nonRootImage, pointer.Int64Ptr(0))
406 pod = podClient.Create(ctx, pod)
407
408 ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
409 framework.ExpectNoError(err)
410 gomega.Expect(ev).NotTo(gomega.BeNil())
411 gomega.Expect(ev.Reason).To(gomega.Equal(events.FailedToCreateContainer))
412 })
413 ginkgo.It("should run with an image specified user ID", func(ctx context.Context) {
414 name := "implicit-nonroot-uid"
415 pod := makeNonRootPod(name, nonRootImage, nil)
416 podClient.Create(ctx, pod)
417
418 podClient.WaitForSuccess(ctx, name, framework.PodStartTimeout)
419 framework.ExpectNoError(podClient.MatchContainerOutput(ctx, name, name, "1234"))
420 })
421 ginkgo.It("should not run without a specified user ID", func(ctx context.Context) {
422 name := "implicit-root-uid"
423 pod := makeNonRootPod(name, rootImage, nil)
424 pod = podClient.Create(ctx, pod)
425
426 ev, err := podClient.WaitForErrorEventOrSuccess(ctx, pod)
427 framework.ExpectNoError(err)
428 gomega.Expect(ev).NotTo(gomega.BeNil())
429 gomega.Expect(ev.Reason).To(gomega.Equal(events.FailedToCreateContainer))
430 })
431 })
432
433 ginkgo.Context("When creating a pod with readOnlyRootFilesystem", func() {
434 makeUserPod := func(podName, image string, command []string, readOnlyRootFilesystem bool) *v1.Pod {
435 return &v1.Pod{
436 ObjectMeta: metav1.ObjectMeta{
437 Name: podName,
438 },
439 Spec: v1.PodSpec{
440 RestartPolicy: v1.RestartPolicyNever,
441 Containers: []v1.Container{
442 {
443 Image: image,
444 Name: podName,
445 Command: command,
446 SecurityContext: &v1.SecurityContext{
447 ReadOnlyRootFilesystem: &readOnlyRootFilesystem,
448 },
449 },
450 },
451 },
452 }
453 }
454 createAndWaitUserPod := func(ctx context.Context, readOnlyRootFilesystem bool) string {
455 podName := fmt.Sprintf("busybox-readonly-%v-%s", readOnlyRootFilesystem, uuid.NewUUID())
456 podClient.Create(ctx, makeUserPod(podName,
457 framework.BusyBoxImage,
458 []string{"sh", "-c", "touch checkfile"},
459 readOnlyRootFilesystem,
460 ))
461
462 if readOnlyRootFilesystem {
463 waitForFailure(ctx, f, podName, framework.PodStartTimeout)
464 } else {
465 podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
466 }
467
468 return podName
469 }
470
471
479 f.It("should run the container with readonly rootfs when readOnlyRootFilesystem=true [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
480 createAndWaitUserPod(ctx, true)
481 })
482
483
489 framework.ConformanceIt("should run the container with writable rootfs when readOnlyRootFilesystem=false", f.WithNodeConformance(), func(ctx context.Context) {
490 createAndWaitUserPod(ctx, false)
491 })
492 })
493
494 ginkgo.Context("When creating a pod with privileged", func() {
495 makeUserPod := func(podName, image string, command []string, privileged bool) *v1.Pod {
496 return &v1.Pod{
497 ObjectMeta: metav1.ObjectMeta{
498 Name: podName,
499 },
500 Spec: v1.PodSpec{
501 RestartPolicy: v1.RestartPolicyNever,
502 Containers: []v1.Container{
503 {
504 Image: image,
505 Name: podName,
506 Command: command,
507 SecurityContext: &v1.SecurityContext{
508 Privileged: &privileged,
509 },
510 },
511 },
512 },
513 }
514 }
515 createAndWaitUserPod := func(ctx context.Context, privileged bool) string {
516 podName := fmt.Sprintf("busybox-privileged-%v-%s", privileged, uuid.NewUUID())
517 podClient.Create(ctx, makeUserPod(podName,
518 framework.BusyBoxImage,
519 []string{"sh", "-c", "ip link add dummy0 type dummy || true"},
520 privileged,
521 ))
522 podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
523 return podName
524 }
525
531 framework.ConformanceIt("should run the container as unprivileged when false [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
532 podName := createAndWaitUserPod(ctx, false)
533 logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
534 if err != nil {
535 framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
536 }
537
538 framework.Logf("Got logs for pod %q: %q", podName, logs)
539 if !strings.Contains(logs, "Operation not permitted") {
540 framework.Failf("unprivileged container shouldn't be able to create dummy device")
541 }
542 })
543
544 f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, func(ctx context.Context) {
545 podName := createAndWaitUserPod(ctx, true)
546 logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
547 if err != nil {
548 framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
549 }
550
551 framework.Logf("Got logs for pod %q: %q", podName, logs)
552 if strings.Contains(logs, "Operation not permitted") {
553 framework.Failf("privileged container should be able to create dummy device")
554 }
555 })
556 })
557
558 ginkgo.Context("when creating containers with AllowPrivilegeEscalation", func() {
559 makeAllowPrivilegeEscalationPod := func(podName string, allowPrivilegeEscalation *bool, uid int64) *v1.Pod {
560 return &v1.Pod{
561 ObjectMeta: metav1.ObjectMeta{
562 Name: podName,
563 },
564 Spec: v1.PodSpec{
565 RestartPolicy: v1.RestartPolicyNever,
566 Containers: []v1.Container{
567 {
568 Image: imageutils.GetE2EImage(imageutils.Nonewprivs),
569 Name: podName,
570 SecurityContext: &v1.SecurityContext{
571 AllowPrivilegeEscalation: allowPrivilegeEscalation,
572 RunAsUser: &uid,
573 },
574 },
575 },
576 },
577 }
578 }
579 createAndMatchOutput := func(ctx context.Context, podName, output string, allowPrivilegeEscalation *bool, uid int64) error {
580 podClient.Create(ctx, makeAllowPrivilegeEscalationPod(podName,
581 allowPrivilegeEscalation,
582 uid,
583 ))
584 podClient.WaitForSuccess(ctx, podName, framework.PodStartTimeout)
585 return podClient.MatchContainerOutput(ctx, podName, podName, output)
586 }
587
588
597 f.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
598 podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
599 if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", nil, nonRootTestUserID); err != nil {
600 framework.Failf("Match output for pod %q failed: %v", podName, err)
601 }
602 })
603
604
612 framework.ConformanceIt("should not allow privilege escalation when false [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
613 podName := "alpine-nnp-false-" + string(uuid.NewUUID())
614 apeFalse := false
615 if err := createAndMatchOutput(ctx, podName, fmt.Sprintf("Effective uid: %d", nonRootTestUserID), &apeFalse, nonRootTestUserID); err != nil {
616 framework.Failf("Match output for pod %q failed: %v", podName, err)
617 }
618 })
619
620
629 f.It("should allow privilege escalation when true [LinuxOnly]", f.WithNodeConformance(), func(ctx context.Context) {
630 podName := "alpine-nnp-true-" + string(uuid.NewUUID())
631 apeTrue := true
632 if err := createAndMatchOutput(ctx, podName, "Effective uid: 0", &apeTrue, nonRootTestUserID); err != nil {
633 framework.Failf("Match output for pod %q failed: %v", podName, err)
634 }
635 })
636 })
637 })
638
639 var _ = SIGDescribe("User Namespaces for Pod Security Standards [LinuxOnly]", func() {
640 f := framework.NewDefaultFramework("user-namespaces-pss-test")
641 f.NamespacePodSecurityEnforceLevel = admissionapi.LevelRestricted
642
643 ginkgo.Context("with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled", func() {
644 f.It("should allow pod", feature.UserNamespacesPodSecurityStandards, func(ctx context.Context) {
645 name := "pod-user-namespaces-pss-" + string(uuid.NewUUID())
646 pod := &v1.Pod{
647 ObjectMeta: metav1.ObjectMeta{Name: name},
648 Spec: v1.PodSpec{
649 RestartPolicy: v1.RestartPolicyNever,
650 HostUsers: ptr.To(false),
651 SecurityContext: &v1.PodSecurityContext{},
652 Containers: []v1.Container{
653 {
654 Name: name,
655 Image: imageutils.GetE2EImage(imageutils.BusyBox),
656 Command: []string{"whoami"},
657 SecurityContext: &v1.SecurityContext{
658 AllowPrivilegeEscalation: ptr.To(false),
659 Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
660 SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
661 },
662 },
663 },
664 },
665 }
666
667 e2epodoutput.TestContainerOutput(ctx, f, "RunAsUser-RunAsNonRoot", pod, 0, []string{"root"})
668 })
669 })
670 })
671
672
673 func waitForFailure(ctx context.Context, f *framework.Framework, name string, timeout time.Duration) {
674 gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout,
675 func(pod *v1.Pod) (bool, error) {
676 switch pod.Status.Phase {
677 case v1.PodFailed:
678 return true, nil
679 case v1.PodSucceeded:
680 return true, fmt.Errorf("pod %q succeeded with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
681 default:
682 return false, nil
683 }
684 },
685 )).To(gomega.Succeed(), "wait for pod %q to fail", name)
686 }
687
View as plain text