1
16
17 package windows
18
19 import (
20 "context"
21 "encoding/json"
22 "time"
23
24 kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
25 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
26 kubeletconfigscheme "k8s.io/kubernetes/pkg/kubelet/apis/config/scheme"
27
28 v1 "k8s.io/api/core/v1"
29 "k8s.io/apimachinery/pkg/api/resource"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/apimachinery/pkg/labels"
32 "k8s.io/apimachinery/pkg/util/uuid"
33 "k8s.io/kubernetes/test/e2e/feature"
34 "k8s.io/kubernetes/test/e2e/framework"
35 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
36 imageutils "k8s.io/kubernetes/test/utils/image"
37 admissionapi "k8s.io/pod-security-admission/api"
38
39 "github.com/onsi/ginkgo/v2"
40 "github.com/onsi/gomega"
41 )
42
43 var _ = sigDescribe(feature.Windows, "Memory Limits", framework.WithSerial(), framework.WithSlow(), skipUnlessWindows(func() {
44
45 f := framework.NewDefaultFramework("memory-limit-test-windows")
46 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
47
48 ginkgo.BeforeEach(func() {
49
50 e2eskipper.SkipUnlessNodeOSDistroIs("windows")
51 })
52
53 ginkgo.Context("Allocatable node memory", func() {
54 ginkgo.It("should be equal to a calculated allocatable memory value", func(ctx context.Context) {
55 checkNodeAllocatableTest(ctx, f)
56 })
57 })
58
59 ginkgo.Context("attempt to deploy past allocatable memory limits", func() {
60 ginkgo.It("should fail deployments of pods once there isn't enough memory", func(ctx context.Context) {
61 overrideAllocatableMemoryTest(ctx, f, framework.TestContext.CloudConfig.NumNodes)
62 })
63 })
64 }))
65
66 type nodeMemory struct {
67
68 capacity resource.Quantity
69
70 allocatable resource.Quantity
71
72 systemReserve resource.Quantity
73
74 kubeReserve resource.Quantity
75
76 softEviction resource.Quantity
77
78 hardEviction resource.Quantity
79 }
80
81
82
83 func checkNodeAllocatableTest(ctx context.Context, f *framework.Framework) {
84
85 nodeMem := getNodeMemory(ctx, f)
86 framework.Logf("nodeMem says: %+v", nodeMem)
87
88
89 calculatedNodeAlloc := nodeMem.capacity.DeepCopy()
90 calculatedNodeAlloc.Sub(nodeMem.systemReserve)
91 calculatedNodeAlloc.Sub(nodeMem.kubeReserve)
92 calculatedNodeAlloc.Sub(nodeMem.softEviction)
93 calculatedNodeAlloc.Sub(nodeMem.hardEviction)
94
95
96 gomega.Expect(calculatedNodeAlloc.Cmp(nodeMem.allocatable)).To(gomega.Equal(0), "calculated allocatable memory %+v and stated allocatable memory %+v are same", calculatedNodeAlloc, nodeMem.allocatable)
97 }
98
99
100
101 func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework, allocatablePods int) {
102 selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
103 nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
104 LabelSelector: selector.String(),
105 })
106 framework.ExpectNoError(err)
107
108 framework.Logf("Scheduling 1 pod per node to consume all allocatable memory")
109 for _, node := range nodeList.Items {
110 status := node.Status
111 podMemLimt := resource.NewQuantity(status.Allocatable.Memory().Value()-(1024*1024*100), resource.BinarySI)
112 podName := "mem-test-" + string(uuid.NewUUID())
113 framework.Logf("Scheduling pod %s on node %s (allocatable memory=%v) with memory limit %v", podName, node.Name, status.Allocatable.Memory(), podMemLimt)
114 pod := &v1.Pod{
115 ObjectMeta: metav1.ObjectMeta{
116 Name: podName,
117 },
118 Spec: v1.PodSpec{
119 Containers: []v1.Container{
120 {
121 Name: podName,
122 Image: imageutils.GetPauseImageName(),
123 Resources: v1.ResourceRequirements{
124 Limits: v1.ResourceList{
125 v1.ResourceMemory: *podMemLimt,
126 },
127 },
128 },
129 },
130 NodeSelector: map[string]string{
131 "kubernetes.io/os": "windows",
132 },
133 NodeName: node.Name,
134 },
135 }
136 _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
137 framework.ExpectNoError(err)
138 }
139 framework.Logf("Schedule additional pod which should not get scheduled")
140 podName := "mem-failure-pod"
141 failurePod := &v1.Pod{
142 ObjectMeta: metav1.ObjectMeta{
143 Name: podName,
144 },
145 Spec: v1.PodSpec{
146 Containers: []v1.Container{
147 {
148 Name: podName,
149 Image: imageutils.GetPauseImageName(),
150 Resources: v1.ResourceRequirements{
151 Limits: v1.ResourceList{
152 v1.ResourceMemory: *resource.NewQuantity(1024*1024*1024, resource.BinarySI),
153 },
154 },
155 },
156 },
157 NodeSelector: map[string]string{
158 "kubernetes.io/os": "windows",
159 },
160 },
161 }
162 framework.Logf("Ensuring that pod %s fails to schedule", podName)
163 failurePod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, failurePod, metav1.CreateOptions{})
164 framework.ExpectNoError(err)
165 gomega.Eventually(ctx, func() bool {
166 eventList, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(ctx, metav1.ListOptions{})
167 framework.ExpectNoError(err)
168 for _, e := range eventList.Items {
169
170 if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePod.ObjectMeta.Name {
171 framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
172 return true
173 }
174 }
175 return false
176 }, 3*time.Minute, 10*time.Second).Should(gomega.BeTrue())
177 }
178
179
180 func getNodeMemory(ctx context.Context, f *framework.Framework) nodeMemory {
181 selector := labels.Set{"kubernetes.io/os": "windows"}.AsSelector()
182 nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{
183 LabelSelector: selector.String(),
184 })
185 framework.ExpectNoError(err)
186
187
188
189 gomega.Expect(nodeList.Items).ToNot(gomega.BeEmpty())
190
191 ginkgo.By("Getting memory details from node status and kubelet config")
192 status := nodeList.Items[0].Status
193 nodeName := nodeList.Items[0].ObjectMeta.Name
194
195 framework.Logf("Getting configuration details for node %s", nodeName)
196 request := f.ClientSet.CoreV1().RESTClient().Get().Resource("nodes").Name(nodeName).SubResource("proxy").Suffix("configz")
197 rawbytes, err := request.DoRaw(ctx)
198 framework.ExpectNoError(err)
199 kubeletConfig, err := decodeConfigz(rawbytes)
200 framework.ExpectNoError(err)
201
202 systemReserve, err := resource.ParseQuantity(kubeletConfig.SystemReserved["memory"])
203 if err != nil {
204 systemReserve = *resource.NewQuantity(0, resource.BinarySI)
205 }
206 kubeReserve, err := resource.ParseQuantity(kubeletConfig.KubeReserved["memory"])
207 if err != nil {
208 kubeReserve = *resource.NewQuantity(0, resource.BinarySI)
209 }
210 hardEviction, err := resource.ParseQuantity(kubeletConfig.EvictionHard["memory.available"])
211 if err != nil {
212 hardEviction = *resource.NewQuantity(0, resource.BinarySI)
213 }
214 softEviction, err := resource.ParseQuantity(kubeletConfig.EvictionSoft["memory.available"])
215 if err != nil {
216 softEviction = *resource.NewQuantity(0, resource.BinarySI)
217 }
218
219 nodeMem := nodeMemory{
220 capacity: status.Capacity[v1.ResourceMemory],
221 allocatable: status.Allocatable[v1.ResourceMemory],
222 systemReserve: systemReserve,
223 hardEviction: hardEviction,
224
225 kubeReserve: kubeReserve,
226 softEviction: softEviction,
227 }
228
229 return nodeMem
230 }
231
232
233
234 func decodeConfigz(contentsBytes []byte) (*kubeletconfig.KubeletConfiguration, error) {
235
236
237 type configzWrapper struct {
238 ComponentConfig kubeletconfigv1beta1.KubeletConfiguration `json:"kubeletconfig"`
239 }
240
241 configz := configzWrapper{}
242 kubeCfg := kubeletconfig.KubeletConfiguration{}
243
244 err := json.Unmarshal(contentsBytes, &configz)
245 if err != nil {
246 return nil, err
247 }
248
249 scheme, _, err := kubeletconfigscheme.NewSchemeAndCodecs()
250 if err != nil {
251 return nil, err
252 }
253 err = scheme.Convert(&configz.ComponentConfig, &kubeCfg, nil)
254 if err != nil {
255 return nil, err
256 }
257
258 return &kubeCfg, nil
259 }
260
View as plain text