1
16
17 package gcp
18
19 import (
20 "context"
21 "time"
22
23 v1 "k8s.io/api/core/v1"
24 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
25 "k8s.io/apimachinery/pkg/fields"
26 "k8s.io/apimachinery/pkg/labels"
27 "k8s.io/kubernetes/test/e2e/common"
28 "k8s.io/kubernetes/test/e2e/framework"
29 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
30 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
31 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
32 testutils "k8s.io/kubernetes/test/utils"
33 admissionapi "k8s.io/pod-security-admission/api"
34
35 "github.com/onsi/ginkgo/v2"
36 )
37
38 func nodeNames(nodes []v1.Node) []string {
39 result := make([]string, 0, len(nodes))
40 for i := range nodes {
41 result = append(result, nodes[i].Name)
42 }
43 return result
44 }
45
46 var _ = SIGDescribe("Restart", framework.WithDisruptive(), func() {
47 f := framework.NewDefaultFramework("restart")
48 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
49 var ps *testutils.PodStore
50 var originalNodes []v1.Node
51 var originalPodNames []string
52 var numNodes int
53 var systemNamespace string
54
55 ginkgo.BeforeEach(func(ctx context.Context) {
56
57
58 e2eskipper.SkipUnlessProviderIs("gce", "gke")
59 var err error
60 ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
61 framework.ExpectNoError(err)
62 numNodes, err = e2enode.TotalRegistered(ctx, f.ClientSet)
63 framework.ExpectNoError(err)
64 systemNamespace = metav1.NamespaceSystem
65
66 ginkgo.By("ensuring all nodes are ready")
67 originalNodes, err = e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
68 framework.ExpectNoError(err)
69 framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
70
71 ginkgo.By("ensuring all pods are running and ready")
72 allPods := ps.List()
73 pods := e2epod.FilterNonRestartablePods(allPods)
74
75 originalPodNames = make([]string, len(pods))
76 for i, p := range pods {
77 originalPodNames[i] = p.ObjectMeta.Name
78 }
79 if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
80 printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, originalPodNames, pods)
81 framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
82 }
83 })
84
85 ginkgo.AfterEach(func() {
86 if ps != nil {
87 ps.Stop()
88 }
89 })
90
91
92 f.It(f.WithLabel("KubeUp"), "should restart all nodes and ensure all nodes and pods recover", func(ctx context.Context) {
93 ginkgo.By("restarting all of the nodes")
94 err := common.RestartNodes(f.ClientSet, originalNodes)
95 framework.ExpectNoError(err)
96
97 ginkgo.By("ensuring all nodes are ready after the restart")
98 nodesAfter, err := e2enode.CheckReady(ctx, f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
99 framework.ExpectNoError(err)
100 framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
101
102
103
104 ginkgo.By("ensuring the same number of nodes exist after the restart")
105 if len(originalNodes) != len(nodesAfter) {
106 framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
107 len(originalNodes), len(nodesAfter))
108 }
109
110
111
112
113 ginkgo.By("ensuring the same number of pods are running and ready after restart")
114 podCheckStart := time.Now()
115 podNamesAfter, err := e2epod.WaitForNRestartablePods(ctx, ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
116 framework.ExpectNoError(err)
117 remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
118 if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, f.ClientSet, systemNamespace, podNamesAfter, remaining) {
119 pods := ps.List()
120 printStatusAndLogsForNotReadyPods(ctx, f.ClientSet, systemNamespace, podNamesAfter, pods)
121 framework.Failf("At least one pod wasn't running and ready after the restart.")
122 }
123 })
124 })
125
View as plain text