1
16
17 package e2enode
18
19 import (
20 "context"
21 "fmt"
22
23 v1 "k8s.io/api/core/v1"
24 "k8s.io/apimachinery/pkg/api/resource"
25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26 "k8s.io/klog/v2"
27 kubeapi "k8s.io/kubernetes/pkg/apis/core"
28 "k8s.io/kubernetes/pkg/apis/scheduling"
29 kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
30 "k8s.io/kubernetes/test/e2e/framework"
31 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
32 "k8s.io/kubernetes/test/e2e/nodefeature"
33 imageutils "k8s.io/kubernetes/test/utils/image"
34 admissionapi "k8s.io/pod-security-admission/api"
35
36 "github.com/onsi/ginkgo/v2"
37 "github.com/onsi/gomega"
38 )
39
40 const (
41 criticalPodName = "static-critical-pod"
42 guaranteedPodName = "guaranteed"
43 burstablePodName = "burstable"
44 bestEffortPodName = "best-effort"
45 )
46
47 var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, func() {
48 f := framework.NewDefaultFramework("critical-pod-test")
49 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
50 ginkgo.Context("when we need to admit a critical pod", func() {
51 ginkgo.It("should be able to create and delete a critical pod", func(ctx context.Context) {
52
53 node := getNodeName(ctx, f)
54
55 nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
56 Requests: v1.ResourceList{
57 v1.ResourceCPU: resource.MustParse("100m"),
58 v1.ResourceMemory: resource.MustParse("100Mi"),
59 },
60 Limits: v1.ResourceList{
61 v1.ResourceCPU: resource.MustParse("100m"),
62 v1.ResourceMemory: resource.MustParse("100Mi"),
63 },
64 }, node)
65 nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
66 Requests: v1.ResourceList{
67 v1.ResourceCPU: resource.MustParse("100m"),
68 v1.ResourceMemory: resource.MustParse("100Mi"),
69 },
70 }, node)
71 nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{}, node)
72 criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
73
74
75 Requests: getNodeCPUAndMemoryCapacity(ctx, f),
76 }, node)
77
78
79 e2epod.NewPodClient(f).CreateBatch(ctx, []*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
80 e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(ctx, criticalPod)
81
82
83 updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
84 framework.ExpectNoError(err)
85 for _, p := range updatedPodList.Items {
86 if p.Name == nonCriticalBestEffort.Name {
87 gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod: %v should not be preempted with status: %#v", p.Name, p.Status)
88 } else {
89 gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
90 }
91 }
92 })
93
94 f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) {
95
96 node := getNodeName(ctx, f)
97 nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
98 Requests: v1.ResourceList{
99 v1.ResourceCPU: resource.MustParse("100m"),
100 v1.ResourceMemory: resource.MustParse("100Mi"),
101 },
102 Limits: v1.ResourceList{
103 v1.ResourceCPU: resource.MustParse("100m"),
104 v1.ResourceMemory: resource.MustParse("100Mi"),
105 },
106 }, node)
107
108 criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
109
110
111 Requests: getNodeCPUAndMemoryCapacity(ctx, f),
112 }, node)
113 criticalPod.Namespace = kubeapi.NamespaceSystem
114
115 ginkgo.By(fmt.Sprintf("create the non-critical pod %q", klog.KObj(nonCriticalGuaranteed)))
116 e2epod.NewPodClient(f).CreateSync(ctx, nonCriticalGuaranteed)
117
118 ginkgo.By(fmt.Sprintf("create the critical pod %q", klog.KObj(criticalPod)))
119 e2epod.PodClientNS(f, kubeapi.NamespaceSystem).Create(ctx, criticalPod)
120
121 ginkgo.By(fmt.Sprintf("await for the critical pod %q to be ready", klog.KObj(criticalPod)))
122 err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, criticalPod.Name, kubeapi.NamespaceSystem)
123 framework.ExpectNoError(err, "Failed to await for the pod to be running: %q", klog.KObj(criticalPod))
124
125
126 updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
127 framework.ExpectNoError(err)
128 for _, p := range updatedPodList.Items {
129 ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
130 gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
131 if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
132 framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
133 }
134 }
135 })
136 ginkgo.AfterEach(func(ctx context.Context) {
137
138 e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
139 e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
140 e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
141 e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
142
143 logPodEvents(ctx, f)
144 logNodeEvents(ctx, f)
145
146 })
147 })
148 })
149
150 func getNodeCPUAndMemoryCapacity(ctx context.Context, f *framework.Framework) v1.ResourceList {
151 nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
152 framework.ExpectNoError(err)
153
154 gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
155 capacity := nodeList.Items[0].Status.Allocatable
156 return v1.ResourceList{
157 v1.ResourceCPU: capacity[v1.ResourceCPU],
158 v1.ResourceMemory: capacity[v1.ResourceMemory],
159 }
160 }
161
162 func getNodeName(ctx context.Context, f *framework.Framework) string {
163 nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
164 framework.ExpectNoError(err)
165
166 gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
167 return nodeList.Items[0].GetName()
168 }
169
170 func getTestPod(critical bool, name string, resources v1.ResourceRequirements, node string) *v1.Pod {
171 pod := &v1.Pod{
172 TypeMeta: metav1.TypeMeta{
173 Kind: "Pod",
174 APIVersion: "v1",
175 },
176 ObjectMeta: metav1.ObjectMeta{Name: name},
177 Spec: v1.PodSpec{
178 Containers: []v1.Container{
179 {
180 Name: "container",
181 Image: imageutils.GetPauseImageName(),
182 Resources: resources,
183 },
184 },
185 NodeName: node,
186 },
187 }
188 if critical {
189 pod.ObjectMeta.Namespace = kubeapi.NamespaceSystem
190 pod.ObjectMeta.Annotations = map[string]string{
191 kubelettypes.ConfigSourceAnnotationKey: kubelettypes.FileSource,
192 }
193 pod.Spec.PriorityClassName = scheduling.SystemNodeCritical
194
195 if !kubelettypes.IsCriticalPod(pod) {
196 framework.Failf("pod %q should be a critical pod", pod.Name)
197 }
198 } else {
199 if kubelettypes.IsCriticalPod(pod) {
200 framework.Failf("pod %q should not be a critical pod", pod.Name)
201 }
202 }
203 return pod
204 }
205
View as plain text