1
16
17 package node
18
19 import (
20 "bytes"
21 "context"
22 "fmt"
23 "io"
24 "os/exec"
25 "path/filepath"
26 "regexp"
27 "strings"
28 "time"
29
30 "github.com/onsi/gomega"
31 v1 "k8s.io/api/core/v1"
32 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
33 "k8s.io/apimachinery/pkg/util/sets"
34 "k8s.io/apimachinery/pkg/util/uuid"
35 "k8s.io/apimachinery/pkg/util/wait"
36 clientset "k8s.io/client-go/kubernetes"
37 "k8s.io/kubernetes/test/e2e/feature"
38 "k8s.io/kubernetes/test/e2e/framework"
39 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
40 e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
41 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
42 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
43 e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
44 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
45 e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
46 e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
47 testutils "k8s.io/kubernetes/test/utils"
48 imageutils "k8s.io/kubernetes/test/utils/image"
49 admissionapi "k8s.io/pod-security-admission/api"
50
51 "github.com/onsi/ginkgo/v2"
52 )
53
54 const (
55
56 pollInterval = 1 * time.Second
57
58 containerStatsPollingInterval = 5 * time.Second
59
60 maxNodesToCheck = 10
61 )
62
63
64
65 func getPodMatches(ctx context.Context, c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
66 matches := sets.NewString()
67 framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
68 runningPods, err := e2ekubelet.GetKubeletPods(ctx, c, nodeName)
69 if err != nil {
70 framework.Logf("Error checking running pods on %v: %v", nodeName, err)
71 return matches
72 }
73 for _, pod := range runningPods.Items {
74 if pod.Namespace == namespace && strings.HasPrefix(pod.Name, podNamePrefix) {
75 matches.Insert(pod.Name)
76 }
77 }
78 return matches
79 }
80
81
82
83
84
85
86
87
88
89 func waitTillNPodsRunningOnNodes(ctx context.Context, c clientset.Interface, nodeNames sets.String, podNamePrefix string, namespace string, targetNumPods int, timeout time.Duration) error {
90 return wait.PollWithContext(ctx, pollInterval, timeout, func(ctx context.Context) (bool, error) {
91 matchCh := make(chan sets.String, len(nodeNames))
92 for _, item := range nodeNames.List() {
93
94 nodeName := item
95 go func() {
96 matchCh <- getPodMatches(ctx, c, nodeName, podNamePrefix, namespace)
97 }()
98 }
99
100 seen := sets.NewString()
101 for i := 0; i < len(nodeNames.List()); i++ {
102 seen = seen.Union(<-matchCh)
103 }
104 if seen.Len() == targetNumPods {
105 return true, nil
106 }
107 framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
108 return false, nil
109 })
110 }
111
112
113
114
115 func restartNfsServer(serverPod *v1.Pod) {
116 const startcmd = "/usr/sbin/rpc.nfsd 1"
117 ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
118 e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", startcmd)
119 }
120
121
122
123
124 func stopNfsServer(serverPod *v1.Pod) {
125 const stopcmd = "/usr/sbin/rpc.nfsd 0"
126 ns := fmt.Sprintf("--namespace=%v", serverPod.Namespace)
127 e2ekubectl.RunKubectlOrDie(ns, "exec", ns, serverPod.Name, "--", "/bin/sh", "-c", stopcmd)
128 }
129
130
131
132
133 func createPodUsingNfs(ctx context.Context, f *framework.Framework, c clientset.Interface, ns, nfsIP, cmd string) *v1.Pod {
134 ginkgo.By("create pod using nfs volume")
135
136 isPrivileged := true
137 cmdLine := []string{"-c", cmd}
138 pod := &v1.Pod{
139 TypeMeta: metav1.TypeMeta{
140 Kind: "Pod",
141 APIVersion: "v1",
142 },
143 ObjectMeta: metav1.ObjectMeta{
144 GenerateName: "pod-nfs-vol-",
145 Namespace: ns,
146 },
147 Spec: v1.PodSpec{
148 Containers: []v1.Container{
149 {
150 Name: "pod-nfs-vol",
151 Image: imageutils.GetE2EImage(imageutils.BusyBox),
152 Command: []string{"/bin/sh"},
153 Args: cmdLine,
154 VolumeMounts: []v1.VolumeMount{
155 {
156 Name: "nfs-vol",
157 MountPath: "/mnt",
158 },
159 },
160 SecurityContext: &v1.SecurityContext{
161 Privileged: &isPrivileged,
162 },
163 },
164 },
165 RestartPolicy: v1.RestartPolicyNever,
166 Volumes: []v1.Volume{
167 {
168 Name: "nfs-vol",
169 VolumeSource: v1.VolumeSource{
170 NFS: &v1.NFSVolumeSource{
171 Server: nfsIP,
172 Path: "/exports",
173 ReadOnly: false,
174 },
175 },
176 },
177 },
178 },
179 }
180 rtnPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
181 framework.ExpectNoError(err)
182
183 err = e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, rtnPod.Name, f.Namespace.Name, framework.PodStartTimeout)
184 framework.ExpectNoError(err)
185
186 rtnPod, err = c.CoreV1().Pods(ns).Get(ctx, rtnPod.Name, metav1.GetOptions{})
187 framework.ExpectNoError(err)
188 return rtnPod
189 }
190
191
192
193
194 func getHostExternalAddress(ctx context.Context, client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
195 node, err := client.CoreV1().Nodes().Get(ctx, p.Spec.NodeName, metav1.GetOptions{})
196 if err != nil {
197 return "", err
198 }
199 for _, address := range node.Status.Addresses {
200 if address.Type == v1.NodeExternalIP {
201 if address.Address != "" {
202 externalAddress = address.Address
203 break
204 }
205 }
206 }
207 if externalAddress == "" {
208 err = fmt.Errorf("No external address for pod %v on node %v",
209 p.Name, p.Spec.NodeName)
210 }
211 return
212 }
213
214
215
216
217
218
219
220 func checkPodCleanup(ctx context.Context, c clientset.Interface, pod *v1.Pod, expectClean bool) {
221 timeout := 5 * time.Minute
222 poll := 20 * time.Second
223 podDir := filepath.Join("/var/lib/kubelet/pods", string(pod.UID))
224 mountDir := filepath.Join(podDir, "volumes", "kubernetes.io~nfs")
225
226 nodeIP, err := getHostExternalAddress(ctx, c, pod)
227 framework.ExpectNoError(err)
228
229 condMsg := "deleted"
230 if !expectClean {
231 condMsg = "present"
232 }
233
234
235 type testT struct {
236 feature string
237 cmd string
238 }
239 tests := []testT{
240 {
241 feature: "pod UID directory",
242 cmd: fmt.Sprintf("sudo ls %v", podDir),
243 },
244 {
245 feature: "pod nfs mount",
246 cmd: fmt.Sprintf("sudo mount | grep %v", mountDir),
247 },
248 }
249
250 for _, test := range tests {
251 framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
252 err = wait.PollWithContext(ctx, poll, timeout, func(ctx context.Context) (bool, error) {
253 result, err := e2essh.NodeExec(ctx, nodeIP, test.cmd, framework.TestContext.Provider)
254 framework.ExpectNoError(err)
255 e2essh.LogResult(result)
256 ok := (result.Code == 0 && len(result.Stdout) > 0 && len(result.Stderr) == 0)
257 if expectClean && ok {
258 return false, nil
259 }
260 if !expectClean && !ok {
261 return true, fmt.Errorf("%v is gone but expected to exist", test.feature)
262 }
263 return true, nil
264 })
265 framework.ExpectNoError(err, fmt.Sprintf("Host (%v) cleanup error: %v. Expected %q to be %v", nodeIP, err, test.feature, condMsg))
266 }
267
268 if expectClean {
269 framework.Logf("Pod's host has been cleaned up")
270 } else {
271 framework.Logf("Pod's host has not been cleaned up (per expectation)")
272 }
273 }
274
275 var _ = SIGDescribe("kubelet", func() {
276 var (
277 c clientset.Interface
278 ns string
279 )
280 f := framework.NewDefaultFramework("kubelet")
281 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
282
283 ginkgo.BeforeEach(func() {
284 c = f.ClientSet
285 ns = f.Namespace.Name
286 })
287
288 ginkgo.Describe("Clean up pods on node", func() {
289 var (
290 numNodes int
291 nodeNames sets.String
292 nodeLabels map[string]string
293 resourceMonitor *e2ekubelet.ResourceMonitor
294 )
295 type DeleteTest struct {
296 podsPerNode int
297 timeout time.Duration
298 }
299
300 deleteTests := []DeleteTest{
301 {podsPerNode: 10, timeout: 1 * time.Minute},
302 }
303
304
305 start := func(ctx context.Context) {
306
307
308 nodeLabels = make(map[string]string)
309 nodeLabels["kubelet_cleanup"] = "true"
310 nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, c, maxNodesToCheck)
311 numNodes = len(nodes.Items)
312 framework.ExpectNoError(err)
313 nodeNames = sets.NewString()
314 for i := 0; i < len(nodes.Items); i++ {
315 nodeNames.Insert(nodes.Items[i].Name)
316 }
317 for nodeName := range nodeNames {
318 for k, v := range nodeLabels {
319 e2enode.AddOrUpdateLabelOnNode(c, nodeName, k, v)
320 ginkgo.DeferCleanup(e2enode.RemoveLabelOffNode, c, nodeName, k)
321 }
322 }
323
324
325
326
327 actualNodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
328 framework.ExpectNoError(err)
329
330
331 if len(actualNodes.Items) <= maxNodesToCheck {
332 resourceMonitor = e2ekubelet.NewResourceMonitor(f.ClientSet, e2ekubelet.TargetContainers(), containerStatsPollingInterval)
333 resourceMonitor.Start(ctx)
334 ginkgo.DeferCleanup(resourceMonitor.Stop)
335 }
336 }
337
338 for _, itArg := range deleteTests {
339 name := fmt.Sprintf(
340 "kubelet should be able to delete %d pods per node in %v.", itArg.podsPerNode, itArg.timeout)
341 itArg := itArg
342 ginkgo.It(name, func(ctx context.Context) {
343 start(ctx)
344 totalPods := itArg.podsPerNode * numNodes
345 ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
346 rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
347
348 err := e2erc.RunRC(ctx, testutils.RCConfig{
349 Client: f.ClientSet,
350 Name: rcName,
351 Namespace: f.Namespace.Name,
352 Image: imageutils.GetPauseImageName(),
353 Replicas: totalPods,
354 NodeSelector: nodeLabels,
355 })
356 framework.ExpectNoError(err)
357
358
359
360
361 err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
362 framework.ExpectNoError(err)
363 if resourceMonitor != nil {
364 resourceMonitor.LogLatest()
365 }
366
367 ginkgo.By("Deleting the RC")
368 e2erc.DeleteRCAndWaitForGC(ctx, f.ClientSet, f.Namespace.Name, rcName)
369
370
371
372
373
374
375
376 start := time.Now()
377 err = waitTillNPodsRunningOnNodes(ctx, f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
378 framework.ExpectNoError(err)
379 framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
380 time.Since(start))
381 if resourceMonitor != nil {
382 resourceMonitor.LogCPUSummary()
383 }
384 })
385 }
386 })
387
388
389 f.Describe("host cleanup with volume mounts [HostCleanup]", f.WithFlaky(), func() {
390
391 type hostCleanupTest struct {
392 itDescr string
393 podCmd string
394 }
395
396
397
398
399
400
401 ginkgo.Context("Host cleanup after disrupting NFS volume [NFS]", func() {
402
403 var (
404 nfsServerPod *v1.Pod
405 nfsIP string
406 pod *v1.Pod
407 )
408
409
410 testTbl := []hostCleanupTest{
411 {
412 itDescr: "after stopping the nfs-server and deleting the (sleeping) client pod, the NFS mount and the pod's UID directory should be removed.",
413 podCmd: "sleep 6000",
414 },
415 {
416 itDescr: "after stopping the nfs-server and deleting the (active) client pod, the NFS mount and the pod's UID directory should be removed.",
417 podCmd: "while true; do echo FeFieFoFum >>/mnt/SUCCESS; sleep 1; cat /mnt/SUCCESS; done",
418 },
419 }
420
421 ginkgo.BeforeEach(func(ctx context.Context) {
422 e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
423 _, nfsServerPod, nfsIP = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"})
424 })
425
426 ginkgo.AfterEach(func(ctx context.Context) {
427 err := e2epod.DeletePodWithWait(ctx, c, pod)
428 framework.ExpectNoError(err, "AfterEach: Failed to delete client pod ", pod.Name)
429 err = e2epod.DeletePodWithWait(ctx, c, nfsServerPod)
430 framework.ExpectNoError(err, "AfterEach: Failed to delete server pod ", nfsServerPod.Name)
431 })
432
433
434 for _, t := range testTbl {
435 t := t
436 ginkgo.It(t.itDescr, func(ctx context.Context) {
437 pod = createPodUsingNfs(ctx, f, c, ns, nfsIP, t.podCmd)
438
439 ginkgo.By("Stop the NFS server")
440 stopNfsServer(nfsServerPod)
441
442 ginkgo.By("Delete the pod mounted to the NFS volume -- expect failure")
443 err := e2epod.DeletePodWithWait(ctx, c, pod)
444 gomega.Expect(err).To(gomega.HaveOccurred())
445
446
447 ginkgo.By("Check if pod's host has been cleaned up -- expect not")
448 checkPodCleanup(ctx, c, pod, false)
449
450 ginkgo.By("Restart the nfs server")
451 restartNfsServer(nfsServerPod)
452
453 ginkgo.By("Verify that the deleted client pod is now cleaned up")
454 checkPodCleanup(ctx, c, pod, true)
455 })
456 }
457 })
458 })
459
460
461 f.Describe("kubectl get --raw \"/api/v1/nodes/<insert-node-name-here>/proxy/logs/?query=/<insert-log-file-name-here>", feature.NodeLogQuery, func() {
462 var linuxNodeName string
463 var windowsNodeName string
464
465 ginkgo.BeforeEach(func(ctx context.Context) {
466 allNodes, err := e2enode.GetReadyNodesIncludingTainted(ctx, c)
467 framework.ExpectNoError(err)
468 if len(allNodes.Items) == 0 {
469 framework.Fail("Expected at least one node to be present")
470 }
471
472 nodes := allNodes.DeepCopy()
473
474 linuxNodes := getLinuxNodes(nodes)
475 if len(linuxNodes.Items) == 0 {
476 framework.Fail("Expected at least one Linux node to be present")
477 }
478 linuxNodeName = linuxNodes.Items[0].Name
479
480 windowsNodes := getWindowsNodes(allNodes)
481 if len(windowsNodes.Items) == 0 {
482 framework.Logf("No Windows node found")
483 } else {
484 windowsNodeName = windowsNodes.Items[0].Name
485 }
486
487 })
488
489
493
494 ginkgo.It("should return the error with an empty --query option", func() {
495 ginkgo.By("Starting the command")
496 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
497
498 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query", linuxNodeName)
499 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
500 _, _, err := framework.StartCmdAndStreamOutput(cmd)
501 if err != nil {
502 framework.Failf("Failed to start kubectl command! Error: %v", err)
503 }
504 err = cmd.Wait()
505 gomega.Expect(err).To(gomega.HaveOccurred(), "Command kubectl get --raw "+queryCommand+" was expected to return an error!")
506 })
507
508
512
513 ginkgo.It("should return the kubelet logs ", func(ctx context.Context) {
514 ginkgo.By("Starting the command")
515 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
516
517 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet", linuxNodeName)
518 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
519 result := runKubectlCommand(cmd)
520 assertContains("kubelet", result)
521 })
522
523
527
528 ginkgo.It("should return the kubelet logs for the current boot", func(ctx context.Context) {
529 ginkgo.By("Starting the command")
530 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
531
532 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&boot=0", linuxNodeName)
533 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
534 result := runKubectlCommand(cmd)
535 assertContains("kubelet", result)
536 })
537
538
542
543 ginkgo.It("should return the last three lines of the kubelet logs", func(ctx context.Context) {
544 e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
545 ginkgo.By("Starting the command")
546 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
547
548 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&tailLines=3", linuxNodeName)
549 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
550 result := runKubectlCommand(cmd)
551 logs := journalctlCommandOnNode(linuxNodeName, "-u kubelet -n 3")
552 if result != logs {
553 framework.Failf("Failed to receive the correct kubelet logs or the correct amount of lines of logs")
554 }
555 })
556
557
561
562 ginkgo.It("should return the kubelet logs for the current boot with the pattern container", func(ctx context.Context) {
563 e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
564 ginkgo.By("Starting the command")
565 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
566
567 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&boot=0&pattern=container", linuxNodeName)
568 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
569 result := runKubectlCommand(cmd)
570 logs := journalctlCommandOnNode(linuxNodeName, "-u kubelet --grep container --boot 0")
571 if result != logs {
572 framework.Failf("Failed to receive the correct kubelet logs")
573 }
574 })
575
576
580
581 ginkgo.It("should return the kubelet logs since the current date and time", func() {
582 ginkgo.By("Starting the command")
583 start := time.Now().UTC()
584 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
585
586 currentTime := start.Format(time.RFC3339)
587 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=kubelet&sinceTime=%s", linuxNodeName, currentTime)
588 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
589 journalctlDateLayout := "2006-1-2 15:4:5"
590 result := runKubectlCommand(cmd)
591 logs := journalctlCommandOnNode(linuxNodeName, fmt.Sprintf("-u kubelet --since \"%s\"", start.Format(journalctlDateLayout)))
592 if result != logs {
593 framework.Failf("Failed to receive the correct kubelet logs or the correct amount of lines of logs")
594 }
595 })
596
597
601
602 ginkgo.It("should return the Microsoft-Windows-Security-SPP logs", func(ctx context.Context) {
603 if len(windowsNodeName) == 0 {
604 ginkgo.Skip("No Windows node found")
605 }
606 ginkgo.By("Starting the command")
607 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
608
609 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=Microsoft-Windows-Security-SPP", windowsNodeName)
610 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
611 result := runKubectlCommand(cmd)
612 assertContains("ProviderName: Microsoft-Windows-Security-SPP", result)
613 })
614
615
619
620 ginkgo.It("should return the last three lines of the Microsoft-Windows-Security-SPP logs", func(ctx context.Context) {
621 e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
622 if len(windowsNodeName) == 0 {
623 ginkgo.Skip("No Windows node found")
624 }
625 ginkgo.By("Starting the command")
626 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
627
628 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=Microsoft-Windows-Security-SPP&tailLines=3", windowsNodeName)
629 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
630 result := runKubectlCommand(cmd)
631 logs := getWinEventCommandOnNode(windowsNodeName, "Microsoft-Windows-Security-SPP", " -MaxEvents 3")
632 if trimSpaceNewlineInString(result) != trimSpaceNewlineInString(logs) {
633 framework.Failf("Failed to receive the correct Microsoft-Windows-Security-SPP logs or the correct amount of lines of logs")
634 }
635 })
636
637
641
642 ginkgo.It("should return the Microsoft-Windows-Security-SPP logs with the pattern Health", func(ctx context.Context) {
643 e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
644 if len(windowsNodeName) == 0 {
645 ginkgo.Skip("No Windows node found")
646 }
647 ginkgo.By("Starting the command")
648 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
649
650 queryCommand := fmt.Sprintf("/api/v1/nodes/%s/proxy/logs/?query=Microsoft-Windows-Security-SPP&pattern=Health", windowsNodeName)
651 cmd := tk.KubectlCmd("get", "--raw", queryCommand)
652 result := runKubectlCommand(cmd)
653 logs := getWinEventCommandOnNode(windowsNodeName, "Microsoft-Windows-Security-SPP", " | Where-Object -Property Message -Match Health")
654 if trimSpaceNewlineInString(result) != trimSpaceNewlineInString(logs) {
655 framework.Failf("Failed to receive the correct Microsoft-Windows-Security-SPP logs or the correct amount of lines of logs")
656 }
657 })
658 })
659 })
660
661 func getLinuxNodes(nodes *v1.NodeList) *v1.NodeList {
662 filteredNodes := nodes
663 e2enode.Filter(filteredNodes, func(node v1.Node) bool {
664 return isNode(&node, "linux")
665 })
666 return filteredNodes
667 }
668
669 func getWindowsNodes(nodes *v1.NodeList) *v1.NodeList {
670 filteredNodes := nodes
671 e2enode.Filter(filteredNodes, func(node v1.Node) bool {
672 return isNode(&node, "windows")
673 })
674 return filteredNodes
675 }
676
677 func isNode(node *v1.Node, os string) bool {
678 if node == nil {
679 return false
680 }
681 if foundOS, found := node.Labels[v1.LabelOSStable]; found {
682 return (os == foundOS)
683 }
684 return false
685 }
686
687 func runKubectlCommand(cmd *exec.Cmd) (result string) {
688 stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
689 var buf bytes.Buffer
690 if err != nil {
691 framework.Failf("Failed to start kubectl command! Stderr: %v, error: %v", stderr, err)
692 }
693 defer stdout.Close()
694 defer stderr.Close()
695 defer framework.TryKill(cmd)
696
697 b_read, err := io.Copy(&buf, stdout)
698 if err != nil {
699 framework.Failf("Expected output from kubectl alpha node-logs %s: %v\n Stderr: %v", cmd.Args, err, stderr)
700 }
701 out := ""
702 if b_read >= 0 {
703 out = buf.String()
704 }
705
706 framework.Logf("Kubectl output: %s", out)
707 return out
708 }
709
710 func assertContains(expectedString string, result string) {
711 if strings.Contains(result, expectedString) {
712 return
713 }
714 framework.Failf("Failed to find \"%s\"", expectedString)
715 }
716
717 func commandOnNode(nodeName string, cmd string) string {
718 result, err := e2essh.NodeExec(context.Background(), nodeName, cmd, framework.TestContext.Provider)
719 framework.ExpectNoError(err)
720 e2essh.LogResult(result)
721 return result.Stdout
722 }
723
724 func journalctlCommandOnNode(nodeName string, args string) string {
725 return commandOnNode(nodeName, "journalctl --utc --no-pager --output=short-precise "+args)
726 }
727
728 func getWinEventCommandOnNode(nodeName string, providerName, args string) string {
729 output := commandOnNode(nodeName, "Get-WinEvent -FilterHashtable @{LogName='Application'; ProviderName='"+providerName+"'}"+args+" | Sort-Object TimeCreated | Format-Table -AutoSize -Wrap")
730 return output
731 }
732
733 func trimSpaceNewlineInString(s string) string {
734
735 re := regexp.MustCompile(` +\r?\n +`)
736 s = re.ReplaceAllString(s, "")
737
738 return strings.ReplaceAll(s, " ", "")
739 }
740
View as plain text