1
16
17 package e2enode
18
19 import (
20 "context"
21 "fmt"
22 "os"
23 "time"
24
25 v1 "k8s.io/api/core/v1"
26 "k8s.io/apimachinery/pkg/api/resource"
27 "k8s.io/apimachinery/pkg/util/uuid"
28 kubeapi "k8s.io/kubernetes/pkg/apis/core"
29 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
30 evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
31 "k8s.io/kubernetes/test/e2e/framework"
32 "k8s.io/kubernetes/test/e2e/nodefeature"
33 admissionapi "k8s.io/pod-security-admission/api"
34
35 "github.com/onsi/ginkgo/v2"
36 "github.com/onsi/gomega"
37 )
38
39 var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, func() {
40 f := framework.NewDefaultFramework("system-node-critical-pod-test")
41 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
42
43 f.SkipNamespaceCreation = true
44
45 ginkgo.AfterEach(func() {
46 if framework.TestContext.PrepullImages {
47
48
49 PrePullAllImages()
50 }
51 })
52
53 ginkgo.Context("when create a system-node-critical pod", func() {
54 tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
55 diskConsumed := resource.MustParse("200Mi")
56 summary := eventuallyGetSummary(ctx)
57 availableBytes := *(summary.Node.Fs.AvailableBytes)
58 initialConfig.EvictionHard = map[string]string{string(evictionapi.SignalNodeFsAvailable): fmt.Sprintf("%d", availableBytes-uint64(diskConsumed.Value()))}
59 initialConfig.EvictionMinimumReclaim = map[string]string{}
60 })
61
62
63 ginkgo.Context("", func() {
64 var staticPodName, mirrorPodName, podPath string
65 ns := kubeapi.NamespaceSystem
66
67 ginkgo.BeforeEach(func(ctx context.Context) {
68 ginkgo.By("create a static system-node-critical pod")
69 staticPodName = "static-disk-hog-" + string(uuid.NewUUID())
70 mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
71 podPath = kubeletCfg.StaticPodPath
72
73
74 err := createStaticSystemNodeCriticalPod(
75 podPath, staticPodName, ns, busyboxImage, v1.RestartPolicyNever, 1024,
76 "dd if=/dev/urandom of=file${i} bs=10485760 count=1 2>/dev/null; sleep .1;",
77 )
78 gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
79
80 ginkgo.By("wait for the mirror pod to be running")
81 gomega.Eventually(ctx, func(ctx context.Context) error {
82 return checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns)
83 }, time.Minute, time.Second*2).Should(gomega.Succeed())
84 })
85
86 ginkgo.It("should not be evicted upon DiskPressure", func(ctx context.Context) {
87 ginkgo.By("wait for the node to have DiskPressure condition")
88 gomega.Eventually(ctx, func(ctx context.Context) error {
89 if hasNodeCondition(ctx, f, v1.NodeDiskPressure) {
90 return nil
91 }
92 msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
93 framework.Logf(msg)
94 return fmt.Errorf(msg)
95 }, time.Minute*2, time.Second*4).Should(gomega.Succeed())
96
97 ginkgo.By("check if it's running all the time")
98 gomega.Consistently(ctx, func(ctx context.Context) error {
99 err := checkMirrorPodRunning(ctx, f.ClientSet, mirrorPodName, ns)
100 if err == nil {
101 framework.Logf("mirror pod %q is running", mirrorPodName)
102 } else {
103 framework.Logf(err.Error())
104 }
105 return err
106 }, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())
107 })
108 ginkgo.AfterEach(func(ctx context.Context) {
109 defer func() {
110 if framework.TestContext.PrepullImages {
111
112
113 PrePullAllImages()
114 }
115 }()
116 ginkgo.By("delete the static pod")
117 err := deleteStaticPod(podPath, staticPodName, ns)
118 gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
119
120 ginkgo.By("wait for the mirror pod to disappear")
121 gomega.Eventually(ctx, func(ctx context.Context) error {
122 return checkMirrorPodDisappear(ctx, f.ClientSet, mirrorPodName, ns)
123 }, time.Minute, time.Second*2).Should(gomega.Succeed())
124
125 ginkgo.By("making sure that node no longer has DiskPressure")
126 gomega.Eventually(ctx, func(ctx context.Context) error {
127 if hasNodeCondition(ctx, f, v1.NodeDiskPressure) {
128 return fmt.Errorf("Conditions haven't returned to normal, node still has DiskPressure")
129 }
130 return nil
131 }, pressureDisappearTimeout, evictionPollInterval).Should(gomega.Succeed())
132 })
133 })
134 })
135 })
136
137 func createStaticSystemNodeCriticalPod(dir, name, namespace, image string, restart v1.RestartPolicy,
138 iterations int, command string) error {
139 template := `
140 apiVersion: v1
141 kind: Pod
142 metadata:
143 name: %s
144 namespace: %s
145 spec:
146 priorityClassName: system-node-critical
147 containers:
148 - name: %s
149 image: %s
150 command: ["sh", "-c", "i=0; while [ $i -lt %d ]; do %s i=$(($i+1)); done; while true; do sleep 5; done"]
151 restartPolicy: %s
152 `
153 file := staticPodPath(dir, name, namespace)
154 podYaml := fmt.Sprintf(template, name, namespace, name, image, iterations, command, string(restart))
155
156 f, err := os.OpenFile(file, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0666)
157 if err != nil {
158 return err
159 }
160 defer f.Close()
161
162 _, err = f.WriteString(podYaml)
163 return err
164 }
165
View as plain text