1
2
3
4
19
20 package e2enode
21
22 import (
23 "context"
24 "encoding/json"
25 "fmt"
26 "os"
27 "os/exec"
28 "regexp"
29 "sort"
30 "strconv"
31 "strings"
32 "time"
33
34 v1 "k8s.io/api/core/v1"
35 "k8s.io/apimachinery/pkg/api/resource"
36 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
37 kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
38 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
39 "k8s.io/kubernetes/pkg/kubelet/apis/podresources"
40 "k8s.io/kubernetes/pkg/kubelet/cm/memorymanager/state"
41 "k8s.io/kubernetes/pkg/kubelet/util"
42 "k8s.io/kubernetes/test/e2e/feature"
43 "k8s.io/kubernetes/test/e2e/framework"
44 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
45 admissionapi "k8s.io/pod-security-admission/api"
46 "k8s.io/utils/cpuset"
47 "k8s.io/utils/pointer"
48
49 "github.com/onsi/ginkgo/v2"
50 "github.com/onsi/gomega"
51 )
52
53 const (
54 evictionHardMemory = "memory.available"
55 resourceMemory = "memory"
56 staticPolicy = "Static"
57 nonePolicy = "None"
58 hugepages2MiCount = 8
59 )
60
61
62 type memoryManagerCtnAttributes struct {
63 ctnName string
64 cpus string
65 memory string
66 hugepages2Mi string
67 }
68
69
70 func makeMemoryManagerContainers(ctnCmd string, ctnAttributes []memoryManagerCtnAttributes) ([]v1.Container, bool) {
71 hugepagesMount := false
72 var containers []v1.Container
73 for _, ctnAttr := range ctnAttributes {
74 ctn := v1.Container{
75 Name: ctnAttr.ctnName,
76 Image: busyboxImage,
77 Resources: v1.ResourceRequirements{
78 Limits: v1.ResourceList{
79 v1.ResourceCPU: resource.MustParse(ctnAttr.cpus),
80 v1.ResourceMemory: resource.MustParse(ctnAttr.memory),
81 },
82 },
83 Command: []string{"sh", "-c", ctnCmd},
84 }
85 if ctnAttr.hugepages2Mi != "" {
86 hugepagesMount = true
87
88 ctn.Resources.Limits[hugepagesResourceName2Mi] = resource.MustParse(ctnAttr.hugepages2Mi)
89 ctn.VolumeMounts = []v1.VolumeMount{
90 {
91 Name: "hugepages-2mi",
92 MountPath: "/hugepages-2Mi",
93 },
94 }
95 }
96
97 containers = append(containers, ctn)
98 }
99
100 return containers, hugepagesMount
101 }
102
103
104 func makeMemoryManagerPod(podName string, initCtnAttributes, ctnAttributes []memoryManagerCtnAttributes) *v1.Pod {
105 hugepagesMount := false
106 memsetCmd := "grep Mems_allowed_list /proc/self/status | cut -f2"
107 memsetSleepCmd := memsetCmd + "&& sleep 1d"
108 var containers, initContainers []v1.Container
109 if len(initCtnAttributes) > 0 {
110 initContainers, _ = makeMemoryManagerContainers(memsetCmd, initCtnAttributes)
111 }
112 containers, hugepagesMount = makeMemoryManagerContainers(memsetSleepCmd, ctnAttributes)
113
114 pod := &v1.Pod{
115 ObjectMeta: metav1.ObjectMeta{
116 GenerateName: podName,
117 },
118 Spec: v1.PodSpec{
119 RestartPolicy: v1.RestartPolicyNever,
120 Containers: containers,
121 InitContainers: initContainers,
122 },
123 }
124
125 if hugepagesMount {
126 pod.Spec.Volumes = []v1.Volume{
127 {
128 Name: "hugepages-2mi",
129 VolumeSource: v1.VolumeSource{
130 EmptyDir: &v1.EmptyDirVolumeSource{
131 Medium: mediumHugepages2Mi,
132 },
133 },
134 },
135 }
136 }
137
138 return pod
139 }
140
141 func getMemoryManagerState() (*state.MemoryManagerCheckpoint, error) {
142 if _, err := os.Stat(memoryManagerStateFile); os.IsNotExist(err) {
143 return nil, fmt.Errorf("the memory manager state file %s does not exist", memoryManagerStateFile)
144 }
145
146 out, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("cat %s", memoryManagerStateFile)).Output()
147 if err != nil {
148 return nil, fmt.Errorf("failed to run command 'cat %s': out: %s, err: %w", memoryManagerStateFile, out, err)
149 }
150
151 memoryManagerCheckpoint := &state.MemoryManagerCheckpoint{}
152 if err := json.Unmarshal(out, memoryManagerCheckpoint); err != nil {
153 return nil, fmt.Errorf("failed to unmarshal memory manager state file: %w", err)
154 }
155 return memoryManagerCheckpoint, nil
156 }
157
158 func getAllocatableMemoryFromStateFile(s *state.MemoryManagerCheckpoint) []state.Block {
159 var allocatableMemory []state.Block
160 for numaNodeID, numaNodeState := range s.MachineState {
161 for resourceName, memoryTable := range numaNodeState.MemoryMap {
162 if memoryTable.Allocatable == 0 {
163 continue
164 }
165
166 block := state.Block{
167 NUMAAffinity: []int{numaNodeID},
168 Type: resourceName,
169 Size: memoryTable.Allocatable,
170 }
171 allocatableMemory = append(allocatableMemory, block)
172 }
173 }
174 return allocatableMemory
175 }
176
177 type memoryManagerKubeletParams struct {
178 policy string
179 systemReservedMemory []kubeletconfig.MemoryReservation
180 systemReserved map[string]string
181 kubeReserved map[string]string
182 evictionHard map[string]string
183 }
184
185 func updateKubeletConfigWithMemoryManagerParams(initialCfg *kubeletconfig.KubeletConfiguration, params *memoryManagerKubeletParams) {
186 if initialCfg.FeatureGates == nil {
187 initialCfg.FeatureGates = map[string]bool{}
188 }
189
190 initialCfg.MemoryManagerPolicy = params.policy
191
192
193 if initialCfg.SystemReserved == nil {
194 initialCfg.SystemReserved = map[string]string{}
195 }
196 for resourceName, value := range params.systemReserved {
197 initialCfg.SystemReserved[resourceName] = value
198 }
199
200
201 if initialCfg.KubeReserved == nil {
202 initialCfg.KubeReserved = map[string]string{}
203 }
204 for resourceName, value := range params.kubeReserved {
205 initialCfg.KubeReserved[resourceName] = value
206 }
207
208
209 if initialCfg.EvictionHard == nil {
210 initialCfg.EvictionHard = map[string]string{}
211 }
212 for resourceName, value := range params.evictionHard {
213 initialCfg.EvictionHard[resourceName] = value
214 }
215
216
217 if initialCfg.ReservedMemory == nil {
218 initialCfg.ReservedMemory = []kubeletconfig.MemoryReservation{}
219 }
220 initialCfg.ReservedMemory = append(initialCfg.ReservedMemory, params.systemReservedMemory...)
221 }
222
223 func getAllNUMANodes() []int {
224 outData, err := exec.Command("/bin/sh", "-c", "lscpu").Output()
225 framework.ExpectNoError(err)
226
227 numaNodeRegex, err := regexp.Compile(`NUMA node(\d+) CPU\(s\):`)
228 framework.ExpectNoError(err)
229
230 matches := numaNodeRegex.FindAllSubmatch(outData, -1)
231
232 var numaNodes []int
233 for _, m := range matches {
234 n, err := strconv.Atoi(string(m[1]))
235 framework.ExpectNoError(err)
236
237 numaNodes = append(numaNodes, n)
238 }
239
240 sort.Ints(numaNodes)
241 return numaNodes
242 }
243
244
245 var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.WithSerial(), feature.MemoryManager, func() {
246
247 var (
248 allNUMANodes []int
249 ctnParams, initCtnParams []memoryManagerCtnAttributes
250 is2MiHugepagesSupported *bool
251 isMultiNUMASupported *bool
252 testPod *v1.Pod
253 )
254
255 f := framework.NewDefaultFramework("memory-manager-test")
256 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
257
258 memoryQuantity := resource.MustParse("1100Mi")
259 defaultKubeParams := &memoryManagerKubeletParams{
260 systemReservedMemory: []kubeletconfig.MemoryReservation{
261 {
262 NumaNode: 0,
263 Limits: v1.ResourceList{
264 resourceMemory: memoryQuantity,
265 },
266 },
267 },
268 systemReserved: map[string]string{resourceMemory: "500Mi"},
269 kubeReserved: map[string]string{resourceMemory: "500Mi"},
270 evictionHard: map[string]string{evictionHardMemory: "100Mi"},
271 }
272
273 verifyMemoryPinning := func(ctx context.Context, pod *v1.Pod, numaNodeIDs []int) {
274 ginkgo.By("Verifying the NUMA pinning")
275
276 output, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
277 framework.ExpectNoError(err)
278
279 currentNUMANodeIDs, err := cpuset.Parse(strings.Trim(output, "\n"))
280 framework.ExpectNoError(err)
281
282 gomega.Expect(numaNodeIDs).To(gomega.Equal(currentNUMANodeIDs.List()))
283 }
284
285 waitingForHugepages := func(ctx context.Context, hugepagesCount int) {
286 gomega.Eventually(ctx, func(ctx context.Context) error {
287 node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
288 if err != nil {
289 return err
290 }
291
292 capacity, ok := node.Status.Capacity[v1.ResourceName(hugepagesResourceName2Mi)]
293 if !ok {
294 return fmt.Errorf("the node does not have the resource %s", hugepagesResourceName2Mi)
295 }
296
297 size, succeed := capacity.AsInt64()
298 if !succeed {
299 return fmt.Errorf("failed to convert quantity to int64")
300 }
301
302
303 expectedSize := int64(hugepagesCount * hugepagesSize2M * 1024)
304 if size != expectedSize {
305 return fmt.Errorf("the actual size %d is different from the expected one %d", size, expectedSize)
306 }
307 return nil
308 }, time.Minute, framework.Poll).Should(gomega.BeNil())
309 }
310
311 ginkgo.BeforeEach(func(ctx context.Context) {
312 if isMultiNUMASupported == nil {
313 isMultiNUMASupported = pointer.BoolPtr(isMultiNUMA())
314 }
315
316 if is2MiHugepagesSupported == nil {
317 is2MiHugepagesSupported = pointer.BoolPtr(isHugePageAvailable(hugepagesSize2M))
318 }
319
320 if len(allNUMANodes) == 0 {
321 allNUMANodes = getAllNUMANodes()
322 }
323
324
325 if *is2MiHugepagesSupported {
326 ginkgo.By("Configuring hugepages")
327 gomega.Eventually(ctx, func() error {
328 return configureHugePages(hugepagesSize2M, hugepages2MiCount, pointer.IntPtr(0))
329 }, 30*time.Second, framework.Poll).Should(gomega.BeNil())
330 }
331 })
332
333
334 ginkgo.JustBeforeEach(func(ctx context.Context) {
335
336 if *is2MiHugepagesSupported {
337 ginkgo.By("Waiting for hugepages resource to become available on the local node")
338 waitingForHugepages(ctx, hugepages2MiCount)
339
340 for i := 0; i < len(ctnParams); i++ {
341 ctnParams[i].hugepages2Mi = "8Mi"
342 }
343 }
344
345 if len(ctnParams) > 0 {
346 testPod = makeMemoryManagerPod(ctnParams[0].ctnName, initCtnParams, ctnParams)
347 }
348 })
349
350 ginkgo.JustAfterEach(func(ctx context.Context) {
351
352 if testPod != nil && testPod.Name != "" {
353 e2epod.NewPodClient(f).DeleteSync(ctx, testPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
354 }
355
356
357 if *is2MiHugepagesSupported {
358 ginkgo.By("Releasing allocated hugepages")
359 gomega.Eventually(ctx, func() error {
360
361 return configureHugePages(hugepagesSize2M, 0, pointer.IntPtr(0))
362 }, 90*time.Second, 15*time.Second).ShouldNot(gomega.HaveOccurred(), "failed to release hugepages")
363 }
364 })
365
366 ginkgo.Context("with static policy", func() {
367 tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
368 kubeParams := *defaultKubeParams
369 kubeParams.policy = staticPolicy
370 updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams)
371 })
372
373 ginkgo.JustAfterEach(func() {
374
375 ctnParams = []memoryManagerCtnAttributes{}
376 initCtnParams = []memoryManagerCtnAttributes{}
377 })
378
379
380 ginkgo.It("should report memory data during request to pod resources GetAllocatableResources", func(ctx context.Context) {
381 endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
382 framework.ExpectNoError(err)
383
384 cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
385 framework.ExpectNoError(err)
386 defer conn.Close()
387
388 resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
389 framework.ExpectNoError(err)
390 gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
391
392 stateData, err := getMemoryManagerState()
393 framework.ExpectNoError(err)
394
395 stateAllocatableMemory := getAllocatableMemoryFromStateFile(stateData)
396 gomega.Expect(resp.Memory).To(gomega.HaveLen(len(stateAllocatableMemory)))
397
398 for _, containerMemory := range resp.Memory {
399 gomega.Expect(containerMemory.Topology).NotTo(gomega.BeNil())
400 gomega.Expect(containerMemory.Topology.Nodes).To(gomega.HaveLen(1))
401 gomega.Expect(containerMemory.Topology.Nodes[0]).NotTo(gomega.BeNil())
402
403 numaNodeID := int(containerMemory.Topology.Nodes[0].ID)
404 for _, numaStateMemory := range stateAllocatableMemory {
405 gomega.Expect(numaStateMemory.NUMAAffinity).To(gomega.HaveLen(1))
406 if numaNodeID != numaStateMemory.NUMAAffinity[0] {
407 continue
408 }
409
410 if containerMemory.MemoryType != string(numaStateMemory.Type) {
411 continue
412 }
413
414 gomega.Expect(containerMemory.Size_).To(gomega.BeEquivalentTo(numaStateMemory.Size))
415 }
416 }
417
418 gomega.Expect(resp.Memory).ToNot(gomega.BeEmpty())
419 })
420
421 ginkgo.When("guaranteed pod has init and app containers", func() {
422 ginkgo.BeforeEach(func() {
423
424 ctnParams = []memoryManagerCtnAttributes{
425 {
426 ctnName: "memory-manager-static",
427 cpus: "100m",
428 memory: "128Mi",
429 },
430 }
431
432 initCtnParams = []memoryManagerCtnAttributes{
433 {
434 ctnName: "init-memory-manager-static",
435 cpus: "100m",
436 memory: "128Mi",
437 },
438 }
439 })
440
441 ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
442 ginkgo.By("Running the test pod")
443 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
444
445
446 if !*isMultiNUMASupported {
447 return
448 }
449
450 verifyMemoryPinning(ctx, testPod, []int{0})
451 })
452 })
453
454 ginkgo.When("guaranteed pod has only app containers", func() {
455 ginkgo.BeforeEach(func() {
456
457 ctnParams = []memoryManagerCtnAttributes{
458 {
459 ctnName: "memory-manager-static",
460 cpus: "100m",
461 memory: "128Mi",
462 },
463 }
464 })
465
466 ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
467 ginkgo.By("Running the test pod")
468 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
469
470
471 if !*isMultiNUMASupported {
472 return
473 }
474
475 verifyMemoryPinning(ctx, testPod, []int{0})
476 })
477 })
478
479 ginkgo.When("multiple guaranteed pods started", func() {
480 var testPod2 *v1.Pod
481
482 ginkgo.BeforeEach(func() {
483
484 ctnParams = []memoryManagerCtnAttributes{
485 {
486 ctnName: "memory-manager-static",
487 cpus: "100m",
488 memory: "128Mi",
489 },
490 }
491 })
492
493 ginkgo.JustBeforeEach(func() {
494 testPod2 = makeMemoryManagerPod("memory-manager-static", initCtnParams, ctnParams)
495 })
496
497 ginkgo.It("should succeed to start all pods", func(ctx context.Context) {
498 ginkgo.By("Running the test pod and the test pod 2")
499 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
500
501 ginkgo.By("Running the test pod 2")
502 testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2)
503
504
505 if !*isMultiNUMASupported {
506 return
507 }
508
509 verifyMemoryPinning(ctx, testPod, []int{0})
510 verifyMemoryPinning(ctx, testPod2, []int{0})
511 })
512
513
514 ginkgo.It("should report memory data for each guaranteed pod and container during request to pod resources List", func(ctx context.Context) {
515 ginkgo.By("Running the test pod and the test pod 2")
516 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
517
518 ginkgo.By("Running the test pod 2")
519 testPod2 = e2epod.NewPodClient(f).CreateSync(ctx, testPod2)
520
521 endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
522 framework.ExpectNoError(err)
523
524 cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
525 framework.ExpectNoError(err)
526 defer conn.Close()
527
528 resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
529 framework.ExpectNoError(err)
530
531 for _, pod := range []*v1.Pod{testPod, testPod2} {
532 for _, podResource := range resp.PodResources {
533 if podResource.Name != pod.Name {
534 continue
535 }
536
537 for _, c := range pod.Spec.Containers {
538 for _, containerResource := range podResource.Containers {
539 if containerResource.Name != c.Name {
540 continue
541 }
542
543 for _, containerMemory := range containerResource.Memory {
544 q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)]
545 value, ok := q.AsInt64()
546 gomega.Expect(ok).To(gomega.BeTrue())
547 gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_))
548 }
549 }
550 }
551 }
552 }
553 })
554
555 ginkgo.JustAfterEach(func(ctx context.Context) {
556
557 if testPod2.Name != "" {
558 e2epod.NewPodClient(f).DeleteSync(ctx, testPod2.Name, metav1.DeleteOptions{}, 2*time.Minute)
559 }
560 })
561 })
562
563
564
565
566
567 ginkgo.When("guaranteed pod memory request is bigger than free memory on each NUMA node", func() {
568 var workloadPods []*v1.Pod
569
570 ginkgo.BeforeEach(func() {
571 if !*isMultiNUMASupported {
572 ginkgo.Skip("The machines has less than two NUMA nodes")
573 }
574
575 ctnParams = []memoryManagerCtnAttributes{
576 {
577 ctnName: "memory-manager-static",
578 cpus: "100m",
579 memory: "384Mi",
580 },
581 }
582 })
583
584 ginkgo.JustBeforeEach(func(ctx context.Context) {
585 stateData, err := getMemoryManagerState()
586 framework.ExpectNoError(err)
587
588 for _, memoryState := range stateData.MachineState {
589
590 workloadPodMemory := memoryState.MemoryMap[v1.ResourceMemory].Free - 256*1024*1024
591 memoryQuantity := resource.NewQuantity(int64(workloadPodMemory), resource.BinarySI)
592 workloadCtnAttrs := []memoryManagerCtnAttributes{
593 {
594 ctnName: "workload-pod",
595 cpus: "100m",
596 memory: memoryQuantity.String(),
597 },
598 }
599 workloadPod := makeMemoryManagerPod(workloadCtnAttrs[0].ctnName, initCtnParams, workloadCtnAttrs)
600
601 workloadPod = e2epod.NewPodClient(f).CreateSync(ctx, workloadPod)
602 workloadPods = append(workloadPods, workloadPod)
603 }
604 })
605
606 ginkgo.It("should be rejected", func(ctx context.Context) {
607 ginkgo.By("Creating the pod")
608 testPod = e2epod.NewPodClient(f).Create(ctx, testPod)
609
610 ginkgo.By("Checking that pod failed to start because of admission error")
611 gomega.Eventually(ctx, func() bool {
612 tmpPod, err := e2epod.NewPodClient(f).Get(ctx, testPod.Name, metav1.GetOptions{})
613 framework.ExpectNoError(err)
614
615 if tmpPod.Status.Phase != v1.PodFailed {
616 return false
617 }
618
619 if tmpPod.Status.Reason != "UnexpectedAdmissionError" {
620 return false
621 }
622
623 if !strings.Contains(tmpPod.Status.Message, "Allocate failed due to [memorymanager]") {
624 return false
625 }
626
627 return true
628 }, time.Minute, 5*time.Second).Should(
629 gomega.BeTrue(),
630 "the pod succeeded to start, when it should fail with the admission error",
631 )
632 })
633
634 ginkgo.JustAfterEach(func(ctx context.Context) {
635 for _, workloadPod := range workloadPods {
636 if workloadPod.Name != "" {
637 e2epod.NewPodClient(f).DeleteSync(ctx, workloadPod.Name, metav1.DeleteOptions{}, 2*time.Minute)
638 }
639 }
640 })
641 })
642 })
643
644 ginkgo.Context("with none policy", func() {
645 tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
646 kubeParams := *defaultKubeParams
647 kubeParams.policy = nonePolicy
648 updateKubeletConfigWithMemoryManagerParams(initialConfig, &kubeParams)
649 })
650
651
652 ginkgo.Context("", func() {
653 ginkgo.BeforeEach(func() {
654
655 ctnParams = []memoryManagerCtnAttributes{
656 {
657 ctnName: "memory-manager-none",
658 cpus: "100m",
659 memory: "128Mi",
660 },
661 }
662 })
663
664
665 ginkgo.It("should not report any memory data during request to pod resources GetAllocatableResources", func(ctx context.Context) {
666 endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
667 framework.ExpectNoError(err)
668
669 cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
670 framework.ExpectNoError(err)
671 defer conn.Close()
672
673 resp, err := cli.GetAllocatableResources(ctx, &kubeletpodresourcesv1.AllocatableResourcesRequest{})
674 framework.ExpectNoError(err)
675
676 gomega.Expect(resp.Memory).To(gomega.BeEmpty())
677 })
678
679
680 ginkgo.It("should not report any memory data during request to pod resources List", func(ctx context.Context) {
681 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
682
683 endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
684 framework.ExpectNoError(err)
685
686 cli, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
687 framework.ExpectNoError(err)
688 defer conn.Close()
689
690 resp, err := cli.List(ctx, &kubeletpodresourcesv1.ListPodResourcesRequest{})
691 framework.ExpectNoError(err)
692
693 for _, podResource := range resp.PodResources {
694 if podResource.Name != testPod.Name {
695 continue
696 }
697
698 for _, containerResource := range podResource.Containers {
699 gomega.Expect(containerResource.Memory).To(gomega.BeEmpty())
700 }
701 }
702 })
703
704 ginkgo.It("should succeed to start the pod", func(ctx context.Context) {
705 testPod = e2epod.NewPodClient(f).CreateSync(ctx, testPod)
706
707
708 if !*isMultiNUMASupported {
709 return
710 }
711
712 verifyMemoryPinning(ctx, testPod, allNUMANodes)
713 })
714 })
715 })
716 })
717
View as plain text