1
2
3
4
19
20 package e2enode
21
22 import (
23 "context"
24 "fmt"
25 "runtime"
26 "time"
27
28 "github.com/onsi/ginkgo/v2"
29
30 v1 "k8s.io/api/core/v1"
31 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
32 "k8s.io/apimachinery/pkg/types"
33 "k8s.io/apimachinery/pkg/util/wait"
34 v1core "k8s.io/client-go/kubernetes/typed/core/v1"
35 nodeutil "k8s.io/component-helpers/node/util"
36 "k8s.io/kubernetes/test/e2e/framework"
37 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
38 admissionapi "k8s.io/pod-security-admission/api"
39 )
40
41 var _ = SIGDescribe("OSArchLabelReconciliation", framework.WithSerial(), framework.WithSlow(), framework.WithDisruptive(), func() {
42 f := framework.NewDefaultFramework("node-label-reconciliation")
43 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
44 ginkgo.Context("Kubelet", func() {
45 ginkgo.It("should reconcile the OS and Arch labels when restarted", func(ctx context.Context) {
46 node := getLocalNode(ctx, f)
47 e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
48 e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
49
50 ginkgo.By("killing and restarting kubelet")
51
52 startKubelet := stopKubelet()
53
54 newNode := node.DeepCopy()
55 newNode.Labels[v1.LabelOSStable] = "dummyOS"
56 newNode.Labels[v1.LabelArchStable] = "dummyArch"
57 _, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
58 framework.ExpectNoError(err)
59
60 startKubelet()
61 framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, f.ClientSet, framework.RestartNodeReadyAgainTimeout))
62
63 err = waitForNodeLabels(ctx, f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
64 framework.ExpectNoError(err)
65 })
66 ginkgo.It("should reconcile the OS and Arch labels when running", func(ctx context.Context) {
67
68 node := getLocalNode(ctx, f)
69 e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelOSStable, runtime.GOOS)
70 e2enode.ExpectNodeHasLabel(ctx, f.ClientSet, node.Name, v1.LabelArchStable, runtime.GOARCH)
71
72
73 newNode := node.DeepCopy()
74 newNode.Labels[v1.LabelOSStable] = "dummyOS"
75 newNode.Labels[v1.LabelArchStable] = "dummyArch"
76 _, _, err := nodeutil.PatchNodeStatus(f.ClientSet.CoreV1(), types.NodeName(node.Name), node, newNode)
77 framework.ExpectNoError(err)
78 err = waitForNodeLabels(ctx, f.ClientSet.CoreV1(), node.Name, 5*time.Minute)
79 framework.ExpectNoError(err)
80 })
81 })
82 })
83
84
85 func waitForNodeLabels(ctx context.Context, c v1core.CoreV1Interface, nodeName string, timeout time.Duration) error {
86 ginkgo.By(fmt.Sprintf("Waiting for node %v to have appropriate labels", nodeName))
87
88 return wait.PollWithContext(ctx, framework.Poll, timeout,
89 func(ctx context.Context) (bool, error) {
90 node, err := c.Nodes().Get(ctx, nodeName, metav1.GetOptions{})
91 if err != nil {
92 return false, err
93 }
94 osLabel, ok := node.Labels[v1.LabelOSStable]
95 if !ok || osLabel != runtime.GOOS {
96 return false, nil
97 }
98 archLabel, ok := node.Labels[v1.LabelArchStable]
99 if !ok || archLabel != runtime.GOARCH {
100 return false, nil
101 }
102 return true, nil
103 })
104 }
105
View as plain text