1
16
17 package e2enode
18
19 import (
20 "context"
21 "fmt"
22
23 "github.com/onsi/gomega"
24 v1 "k8s.io/api/core/v1"
25 "k8s.io/apimachinery/pkg/api/resource"
26 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
28 "k8s.io/kubernetes/test/e2e/framework"
29 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
30 admissionapi "k8s.io/pod-security-admission/api"
31
32 "github.com/onsi/ginkgo/v2"
33 libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
34 "k8s.io/utils/ptr"
35 )
36
37 type testCase struct {
38 name string
39 podSpec *v1.Pod
40 oomTargetContainerName string
41 }
42
43
44
45 const KubeReservedMemory = 0.35
46
47 var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [LinuxOnly]", framework.WithSerial(), func() {
48 f := framework.NewDefaultFramework("nodeallocatable-oomkiller-test")
49 f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
50
51 testCases := []testCase{
52 {
53 name: "single process container without resource limits",
54 oomTargetContainerName: "oomkill-nodeallocatable-container",
55 podSpec: getOOMTargetPod("oomkill-nodeallocatable-pod", "oomkill-nodeallocatable-container",
56 getOOMTargetContainerWithoutLimit),
57 },
58 }
59
60 for _, testCase := range testCases {
61 runOomKillerTest(f, testCase, KubeReservedMemory)
62 }
63 })
64
65 var _ = SIGDescribe("OOMKiller [LinuxOnly]", framework.WithNodeConformance(), func() {
66 f := framework.NewDefaultFramework("oomkiller-test")
67 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
68
69 testCases := []testCase{
70 {
71 name: "single process container",
72 oomTargetContainerName: "oomkill-single-target-container",
73 podSpec: getOOMTargetPod("oomkill-target-pod", "oomkill-single-target-container",
74 getOOMTargetContainer),
75 },
76 {
77 name: "init container",
78 oomTargetContainerName: "oomkill-target-init-container",
79 podSpec: getInitContainerOOMTargetPod("initcontinar-oomkill-target-pod", "oomkill-target-init-container",
80 getOOMTargetContainer),
81 },
82 }
83
84
85
86 if libcontainercgroups.IsCgroup2UnifiedMode() {
87 testCases = append(testCases, testCase{
88 name: "multi process container",
89 oomTargetContainerName: "oomkill-multi-target-container",
90 podSpec: getOOMTargetPod("oomkill-target-pod", "oomkill-multi-target-container",
91 getOOMTargetContainerMultiProcess),
92 })
93 }
94 for _, tc := range testCases {
95 runOomKillerTest(f, tc, 0)
96 }
97 })
98
99 func runOomKillerTest(f *framework.Framework, testCase testCase, kubeReservedMemory float64) {
100 ginkgo.Context(testCase.name, func() {
101
102 if kubeReservedMemory > 0 {
103 tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
104 if initialConfig.KubeReserved == nil {
105 initialConfig.KubeReserved = map[string]string{}
106 }
107
108
109
110
111 initialConfig.KubeReserved["memory"] = fmt.Sprintf("%d", int(kubeReservedMemory*getLocalNode(context.TODO(), f).Status.Capacity.Memory().AsApproximateFloat64()))
112 })
113 }
114
115 ginkgo.BeforeEach(func() {
116 ginkgo.By("setting up the pod to be used in the test")
117 e2epod.NewPodClient(f).Create(context.TODO(), testCase.podSpec)
118 })
119
120 ginkgo.It("The containers terminated by OOM killer should have the reason set to OOMKilled", func() {
121 ginkgo.By("Waiting for the pod to be failed")
122 err := e2epod.WaitForPodTerminatedInNamespace(context.TODO(), f.ClientSet, testCase.podSpec.Name, "", f.Namespace.Name)
123 framework.ExpectNoError(err, "Failed waiting for pod to terminate, %s/%s", f.Namespace.Name, testCase.podSpec.Name)
124
125 ginkgo.By("Fetching the latest pod status")
126 pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), testCase.podSpec.Name, metav1.GetOptions{})
127 framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
128
129 ginkgo.By("Verifying the OOM target container has the expected reason")
130 verifyReasonForOOMKilledContainer(pod, testCase.oomTargetContainerName)
131 })
132
133 ginkgo.AfterEach(func() {
134 ginkgo.By(fmt.Sprintf("deleting pod: %s", testCase.podSpec.Name))
135 e2epod.NewPodClient(f).DeleteSync(context.TODO(), testCase.podSpec.Name, metav1.DeleteOptions{}, framework.PodDeleteTimeout)
136 })
137 })
138 }
139
140 func verifyReasonForOOMKilledContainer(pod *v1.Pod, oomTargetContainerName string) {
141 container := e2epod.FindContainerStatusInPod(pod, oomTargetContainerName)
142 if container == nil {
143 framework.Failf("OOM target pod %q, container %q does not have the expected state terminated", pod.Name, oomTargetContainerName)
144 }
145 if container.State.Terminated == nil {
146 framework.Failf("OOM target pod %q, container %q is not in the terminated state", pod.Name, container.Name)
147 }
148 gomega.Expect(container.State.Terminated.ExitCode).To(gomega.Equal(int32(137)),
149 "pod: %q, container: %q has unexpected exitCode: %q", pod.Name, container.Name, container.State.Terminated.ExitCode)
150
151
152
153
154 if container.State.Terminated.Reason == "OOMKilled" {
155 gomega.Expect(container.State.Terminated.Reason).To(gomega.Equal("OOMKilled"),
156 "pod: %q, container: %q has unexpected reason: %q", pod.Name, container.Name, container.State.Terminated.Reason)
157 }
158
159 }
160
161 func getOOMTargetPod(podName string, ctnName string, createContainer func(name string) v1.Container) *v1.Pod {
162 return &v1.Pod{
163 ObjectMeta: metav1.ObjectMeta{
164 Name: podName,
165 },
166 Spec: v1.PodSpec{
167 RestartPolicy: v1.RestartPolicyNever,
168 Containers: []v1.Container{
169 createContainer(ctnName),
170 },
171 },
172 }
173 }
174
175 func getInitContainerOOMTargetPod(podName string, ctnName string, createContainer func(name string) v1.Container) *v1.Pod {
176 return &v1.Pod{
177 ObjectMeta: metav1.ObjectMeta{
178 Name: podName,
179 },
180 Spec: v1.PodSpec{
181 RestartPolicy: v1.RestartPolicyNever,
182 InitContainers: []v1.Container{
183 createContainer(ctnName),
184 },
185 Containers: []v1.Container{
186 {
187 Name: "busybox",
188 Image: busyboxImage,
189 },
190 },
191 },
192 }
193 }
194
195
196
197 func getOOMTargetContainer(name string) v1.Container {
198 return v1.Container{
199 Name: name,
200 Image: busyboxImage,
201 Command: []string{
202 "sh",
203 "-c",
204
205 "sleep 5 && dd if=/dev/zero of=/dev/null bs=20M",
206 },
207 Resources: v1.ResourceRequirements{
208 Requests: v1.ResourceList{
209 v1.ResourceMemory: resource.MustParse("15Mi"),
210 },
211 Limits: v1.ResourceList{
212 v1.ResourceMemory: resource.MustParse("15Mi"),
213 },
214 },
215 SecurityContext: &v1.SecurityContext{
216 SeccompProfile: &v1.SeccompProfile{
217 Type: v1.SeccompProfileTypeRuntimeDefault,
218 },
219 AllowPrivilegeEscalation: ptr.To(false),
220 RunAsUser: ptr.To[int64](999),
221 RunAsGroup: ptr.To[int64](999),
222 RunAsNonRoot: ptr.To(true),
223 Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
224 },
225 }
226 }
227
228
229
230 func getOOMTargetContainerMultiProcess(name string) v1.Container {
231 return v1.Container{
232 Name: name,
233 Image: busyboxImage,
234 Command: []string{
235 "sh",
236 "-c",
237
238 "sleep 5 && dd if=/dev/zero of=/dev/null bs=20M & sleep 86400",
239 },
240 Resources: v1.ResourceRequirements{
241 Requests: v1.ResourceList{
242 v1.ResourceMemory: resource.MustParse("15Mi"),
243 },
244 Limits: v1.ResourceList{
245 v1.ResourceMemory: resource.MustParse("15Mi"),
246 },
247 },
248 SecurityContext: &v1.SecurityContext{
249 SeccompProfile: &v1.SeccompProfile{
250 Type: v1.SeccompProfileTypeRuntimeDefault,
251 },
252 AllowPrivilegeEscalation: ptr.To(false),
253 RunAsUser: ptr.To[int64](999),
254 RunAsGroup: ptr.To[int64](999),
255 RunAsNonRoot: ptr.To(true),
256 Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
257 },
258 }
259 }
260
261
262
263 func getOOMTargetContainerWithoutLimit(name string) v1.Container {
264 return v1.Container{
265 Name: name,
266 Image: busyboxImage,
267 Command: []string{
268 "sh",
269 "-c",
270
271 "sleep 5 && dd if=/dev/zero of=/dev/null iflag=fullblock count=10 bs=10G",
272 },
273 SecurityContext: &v1.SecurityContext{
274 SeccompProfile: &v1.SeccompProfile{
275 Type: v1.SeccompProfileTypeRuntimeDefault,
276 },
277 AllowPrivilegeEscalation: ptr.To(false),
278 RunAsUser: ptr.To[int64](999),
279 RunAsGroup: ptr.To[int64](999),
280 RunAsNonRoot: ptr.To(true),
281 Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
282 },
283 }
284 }
285
View as plain text