1
16
17 package csi_mock
18
19 import (
20 "context"
21 "fmt"
22 "sort"
23 "strings"
24 "sync/atomic"
25 "time"
26
27 "github.com/onsi/ginkgo/v2"
28 "github.com/onsi/gomega"
29 v1 "k8s.io/api/core/v1"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/apimachinery/pkg/fields"
32 "k8s.io/apimachinery/pkg/util/sets"
33 "k8s.io/apimachinery/pkg/util/wait"
34 "k8s.io/component-base/metrics/testutil"
35 "k8s.io/kubernetes/pkg/apis/core/v1/helper"
36 "k8s.io/kubernetes/pkg/features"
37 "k8s.io/kubernetes/pkg/kubelet/events"
38 "k8s.io/kubernetes/test/e2e/feature"
39 "k8s.io/kubernetes/test/e2e/framework"
40 e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
41 e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
42 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
43 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
44 "k8s.io/kubernetes/test/e2e/storage/utils"
45 admissionapi "k8s.io/pod-security-admission/api"
46 )
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61 var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
62 f := framework.NewDefaultFramework("csi-mock-volumes-selinux")
63 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
64 m := newMockDriverSetup(f)
65
66 f.Context("SELinuxMount [LinuxOnly]", feature.SELinux, func() {
67
68 seLinuxOpts1 := v1.SELinuxOptions{
69 User: "system_u",
70 Role: "system_r",
71 Type: "container_t",
72 Level: "s0:c0,c1",
73 }
74 seLinuxMountOption1 := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
75 seLinuxOpts2 := v1.SELinuxOptions{
76 User: "system_u",
77 Role: "system_r",
78 Type: "container_t",
79 Level: "s0:c98,c99",
80 }
81 seLinuxMountOption2 := "context=\"system_u:object_r:container_file_t:s0:c98,c99\""
82
83 tests := []struct {
84 name string
85 csiDriverSELinuxEnabled bool
86 firstPodSELinuxOpts *v1.SELinuxOptions
87 startSecondPod bool
88 secondPodSELinuxOpts *v1.SELinuxOptions
89 mountOptions []string
90 volumeMode v1.PersistentVolumeAccessMode
91 expectedFirstMountOptions []string
92 expectedSecondMountOptions []string
93 expectedUnstage bool
94 testTags []interface{}
95 }{
96
97 {
98 name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
99 csiDriverSELinuxEnabled: true,
100 firstPodSELinuxOpts: &seLinuxOpts1,
101 volumeMode: v1.ReadWriteOncePod,
102 expectedFirstMountOptions: []string{seLinuxMountOption1},
103 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
104 },
105 {
106 name: "should add SELinux mount option to existing mount options",
107 csiDriverSELinuxEnabled: true,
108 firstPodSELinuxOpts: &seLinuxOpts1,
109 mountOptions: []string{"noexec", "noatime"},
110 volumeMode: v1.ReadWriteOncePod,
111 expectedFirstMountOptions: []string{"noexec", "noatime", seLinuxMountOption1},
112 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
113 },
114 {
115 name: "should not pass SELinux mount option for RWO volume with SELinuxMount disabled",
116 csiDriverSELinuxEnabled: true,
117 firstPodSELinuxOpts: &seLinuxOpts1,
118 volumeMode: v1.ReadWriteOnce,
119 expectedFirstMountOptions: nil,
120 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), feature.SELinuxMountReadWriteOncePodOnly},
121 },
122 {
123 name: "should pass SELinux mount option for RWO volume with SELinuxMount enabled",
124 csiDriverSELinuxEnabled: true,
125 firstPodSELinuxOpts: &seLinuxOpts1,
126 volumeMode: v1.ReadWriteOnce,
127 expectedFirstMountOptions: []string{seLinuxMountOption1},
128 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
129 },
130 {
131 name: "should not pass SELinux mount option for Pod without SELinux context",
132 csiDriverSELinuxEnabled: true,
133 firstPodSELinuxOpts: nil,
134 volumeMode: v1.ReadWriteOncePod,
135 expectedFirstMountOptions: nil,
136 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
137 },
138 {
139 name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
140 csiDriverSELinuxEnabled: false,
141 firstPodSELinuxOpts: &seLinuxOpts1,
142 volumeMode: v1.ReadWriteOncePod,
143 expectedFirstMountOptions: nil,
144 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
145 },
146
147 {
148 name: "should not unstage RWOP volume when starting a second pod with the same SELinux context",
149 csiDriverSELinuxEnabled: true,
150 firstPodSELinuxOpts: &seLinuxOpts1,
151 startSecondPod: true,
152 secondPodSELinuxOpts: &seLinuxOpts1,
153 volumeMode: v1.ReadWriteOncePod,
154 expectedFirstMountOptions: []string{seLinuxMountOption1},
155 expectedSecondMountOptions: []string{seLinuxMountOption1},
156 expectedUnstage: false,
157 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
158 },
159 {
160 name: "should unstage RWOP volume when starting a second pod with different SELinux context",
161 csiDriverSELinuxEnabled: true,
162 firstPodSELinuxOpts: &seLinuxOpts1,
163 startSecondPod: true,
164 secondPodSELinuxOpts: &seLinuxOpts2,
165 volumeMode: v1.ReadWriteOncePod,
166 expectedFirstMountOptions: []string{seLinuxMountOption1},
167 expectedSecondMountOptions: []string{seLinuxMountOption2},
168 expectedUnstage: true,
169 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
170 },
171 {
172 name: "should not unstage RWO volume when starting a second pod with the same SELinux context",
173 csiDriverSELinuxEnabled: true,
174 firstPodSELinuxOpts: &seLinuxOpts1,
175 startSecondPod: true,
176 secondPodSELinuxOpts: &seLinuxOpts1,
177 volumeMode: v1.ReadWriteOnce,
178 expectedFirstMountOptions: []string{seLinuxMountOption1},
179 expectedSecondMountOptions: []string{seLinuxMountOption1},
180 expectedUnstage: false,
181 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
182 },
183 {
184 name: "should unstage RWO volume when starting a second pod with different SELinux context",
185 csiDriverSELinuxEnabled: true,
186 firstPodSELinuxOpts: &seLinuxOpts1,
187 startSecondPod: true,
188 secondPodSELinuxOpts: &seLinuxOpts2,
189 volumeMode: v1.ReadWriteOnce,
190 expectedFirstMountOptions: []string{seLinuxMountOption1},
191 expectedSecondMountOptions: []string{seLinuxMountOption2},
192 expectedUnstage: true,
193 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
194 },
195 }
196 for _, t := range tests {
197 t := t
198 testFunc := func(ctx context.Context) {
199 if framework.NodeOSDistroIs("windows") {
200 e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
201 }
202 var nodeStageMountOpts, nodePublishMountOpts []string
203 var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
204 m.init(ctx, testParameters{
205 disableAttach: true,
206 registerDriver: true,
207 enableSELinuxMount: &t.csiDriverSELinuxEnabled,
208 hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
209 })
210 ginkgo.DeferCleanup(m.cleanup)
211
212
213 ginkgo.By("Starting the initial pod")
214 accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
215 _, claim, pod := m.createPodWithSELinux(ctx, accessModes, t.mountOptions, t.firstPodSELinuxOpts)
216 err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
217 framework.ExpectNoError(err, "starting the initial pod")
218
219
220 ginkgo.By("Checking the initial pod mount options")
221 gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodeStage MountFlags for the initial pod")
222 gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedFirstMountOptions), "NodePublish MountFlags for the initial pod")
223
224 ginkgo.By("Checking the CSI driver calls for the initial pod")
225 gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage call count for the initial pod")
226 gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnpublish call count for the initial pod")
227 gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage for the initial pod")
228 gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish for the initial pod")
229
230 if !t.startSecondPod {
231 return
232 }
233
234
235 ginkgo.By("Starting the second pod to check if a volume used by the initial pod is / is not unmounted based on SELinux context")
236
237 nodeStageMountOpts = nil
238 nodePublishMountOpts = nil
239 unstageCalls.Store(0)
240 unpublishCalls.Store(0)
241 stageCalls.Store(0)
242 publishCalls.Store(0)
243
244
245 pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
246 framework.ExpectNoError(err, "getting the initial pod")
247 nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
248 pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
249 framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts)
250 m.pods = append(m.pods, pod2)
251
252
253
254
255
256 ginkgo.By("Waiting for the second pod to start (or fail to start because of ReadWriteOncePod).")
257 reason := events.FailedMountVolume
258 var msg string
259 if t.expectedUnstage {
260
261 msg = "conflicting SELinux labels of volume"
262 } else {
263
264 if t.volumeMode == v1.ReadWriteOncePod {
265
266 msg = "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
267 } else {
268
269 reason = string(events.StartedContainer)
270 msg = "Started container"
271 }
272 }
273 eventSelector := fields.Set{
274 "involvedObject.kind": "Pod",
275 "involvedObject.name": pod2.Name,
276 "involvedObject.namespace": pod2.Namespace,
277 "reason": reason,
278 }.AsSelector().String()
279 err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
280 framework.ExpectNoError(err, "waiting for event %q in the second test pod", msg)
281
282
283 ginkgo.By("Deleting the initial pod")
284 err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
285 framework.ExpectNoError(err, "deleting the initial pod")
286
287
288 ginkgo.By("Waiting for the second pod to start")
289 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
290 framework.ExpectNoError(err, "starting the second pod")
291
292 ginkgo.By("Checking CSI driver calls for the second pod")
293 if t.expectedUnstage {
294
295 gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnstage calls after the first pod is deleted")
296 gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeStage calls for the second pod")
297
298 gomega.Expect(nodeStageMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodeStage MountFlags for the second pod")
299 } else {
300
301 gomega.Expect(unstageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeUnstage calls after the first pod is deleted")
302 gomega.Expect(stageCalls.Load()).To(gomega.BeNumerically("==", 0), "NodeStage calls for the second pod")
303 }
304
305 gomega.Expect(unpublishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodeUnpublish calls after the first pod is deleted")
306 gomega.Expect(publishCalls.Load()).To(gomega.BeNumerically(">", 0), "NodePublish calls for the second pod")
307 gomega.Expect(nodePublishMountOpts).To(gomega.Equal(t.expectedSecondMountOptions), "NodePublish MountFlags for the second pod")
308 }
309
310
311 args := []interface{}{
312 t.name,
313 testFunc,
314 }
315 args = append(args, t.testTags...)
316 framework.It(args...)
317 }
318 })
319 })
320
321 var (
322
323 metricsWithVolumePluginLabel = sets.New[string](
324 "volume_manager_selinux_volume_context_mismatch_errors_total",
325 "volume_manager_selinux_volume_context_mismatch_warnings_total",
326 "volume_manager_selinux_volumes_admitted_total",
327 )
328
329 metricsWithoutVolumePluginLabel = sets.New[string](
330 "volume_manager_selinux_container_errors_total",
331 "volume_manager_selinux_container_warnings_total",
332 "volume_manager_selinux_pod_context_mismatch_errors_total",
333 "volume_manager_selinux_pod_context_mismatch_warnings_total",
334 )
335
336 allMetrics = metricsWithoutVolumePluginLabel.Union(metricsWithVolumePluginLabel)
337 )
338
339 var _ = utils.SIGDescribe("CSI Mock selinux on mount metrics", func() {
340 f := framework.NewDefaultFramework("csi-mock-volumes-selinux-metrics")
341 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
342 m := newMockDriverSetup(f)
343
344
345 f.Context("SELinuxMount metrics [LinuxOnly]", feature.SELinux, f.WithSerial(), func() {
346
347 seLinuxOpts1 := v1.SELinuxOptions{
348 User: "system_u",
349 Role: "system_r",
350 Type: "container_t",
351 Level: "s0:c0,c1",
352 }
353 seLinuxOpts2 := v1.SELinuxOptions{
354 User: "system_u",
355 Role: "system_r",
356 Type: "container_t",
357 Level: "s0:c98,c99",
358 }
359
360 tests := []struct {
361 name string
362 csiDriverSELinuxEnabled bool
363 firstPodSELinuxOpts *v1.SELinuxOptions
364 secondPodSELinuxOpts *v1.SELinuxOptions
365 volumeMode v1.PersistentVolumeAccessMode
366 waitForSecondPodStart bool
367 secondPodFailureEvent string
368 expectIncreases sets.Set[string]
369 testTags []interface{}
370 }{
371 {
372 name: "warning is not bumped on two Pods with the same context on RWO volume",
373 csiDriverSELinuxEnabled: true,
374 firstPodSELinuxOpts: &seLinuxOpts1,
375 secondPodSELinuxOpts: &seLinuxOpts1,
376 volumeMode: v1.ReadWriteOnce,
377 waitForSecondPodStart: true,
378 expectIncreases: sets.New[string]( ),
379 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), feature.SELinuxMountReadWriteOncePodOnly},
380 },
381 {
382 name: "warning is bumped on two Pods with a different context on RWO volume",
383 csiDriverSELinuxEnabled: true,
384 firstPodSELinuxOpts: &seLinuxOpts1,
385 secondPodSELinuxOpts: &seLinuxOpts2,
386 volumeMode: v1.ReadWriteOnce,
387 waitForSecondPodStart: true,
388 expectIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_warnings_total"),
389 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), feature.SELinuxMountReadWriteOncePodOnly},
390 },
391 {
392 name: "error is not bumped on two Pods with the same context on RWO volume and SELinuxMount enabled",
393 csiDriverSELinuxEnabled: true,
394 firstPodSELinuxOpts: &seLinuxOpts1,
395 secondPodSELinuxOpts: &seLinuxOpts1,
396 volumeMode: v1.ReadWriteOnce,
397 waitForSecondPodStart: true,
398 expectIncreases: sets.New[string]( ),
399 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
400 },
401 {
402 name: "error is bumped on two Pods with a different context on RWO volume and SELinuxMount enabled",
403 csiDriverSELinuxEnabled: true,
404 firstPodSELinuxOpts: &seLinuxOpts1,
405 secondPodSELinuxOpts: &seLinuxOpts2,
406 secondPodFailureEvent: "conflicting SELinux labels of volume",
407 volumeMode: v1.ReadWriteOnce,
408 waitForSecondPodStart: false,
409 expectIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"),
410 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
411 },
412 {
413 name: "error is bumped on two Pods with a different context on RWX volume and SELinuxMount enabled",
414 csiDriverSELinuxEnabled: true,
415 firstPodSELinuxOpts: &seLinuxOpts1,
416 secondPodSELinuxOpts: &seLinuxOpts2,
417 secondPodFailureEvent: "conflicting SELinux labels of volume",
418 volumeMode: v1.ReadWriteMany,
419 waitForSecondPodStart: false,
420 expectIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"),
421 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod), framework.WithFeatureGate(features.SELinuxMount)},
422 },
423 {
424 name: "error is bumped on two Pods with a different context on RWOP volume",
425 csiDriverSELinuxEnabled: true,
426 firstPodSELinuxOpts: &seLinuxOpts1,
427 secondPodSELinuxOpts: &seLinuxOpts2,
428 secondPodFailureEvent: "conflicting SELinux labels of volume",
429 volumeMode: v1.ReadWriteOncePod,
430 waitForSecondPodStart: false,
431 expectIncreases: sets.New[string]("volume_manager_selinux_volume_context_mismatch_errors_total"),
432 testTags: []interface{}{framework.WithFeatureGate(features.SELinuxMountReadWriteOncePod)},
433 },
434 }
435 for _, t := range tests {
436 t := t
437 testFunc := func(ctx context.Context) {
438
439 volumePluginLabel := "volume_plugin=\"kubernetes.io/csi/csi-mock-" + f.Namespace.Name + "\""
440
441 if framework.NodeOSDistroIs("windows") {
442 e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
443 }
444 grabber, err := e2emetrics.NewMetricsGrabber(ctx, f.ClientSet, nil, f.ClientConfig(), true, false, false, false, false, false)
445 framework.ExpectNoError(err, "creating the metrics grabber")
446
447 var nodeStageMountOpts, nodePublishMountOpts []string
448 var unstageCalls, stageCalls, unpublishCalls, publishCalls atomic.Int32
449 m.init(ctx, testParameters{
450 disableAttach: true,
451 registerDriver: true,
452 enableSELinuxMount: &t.csiDriverSELinuxEnabled,
453 hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts, &stageCalls, &unstageCalls, &publishCalls, &unpublishCalls),
454 })
455 ginkgo.DeferCleanup(m.cleanup)
456
457 ginkgo.By("Starting the first pod")
458 accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
459 _, claim, pod := m.createPodWithSELinux(ctx, accessModes, []string{}, t.firstPodSELinuxOpts)
460 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
461 framework.ExpectNoError(err, "starting the initial pod")
462
463 ginkgo.By("Grabbing initial metrics")
464 pod, err = m.cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
465 framework.ExpectNoError(err, "getting the initial pod")
466 metrics, err := grabMetrics(ctx, grabber, pod.Spec.NodeName, allMetrics, volumePluginLabel)
467 framework.ExpectNoError(err, "collecting the initial metrics")
468 dumpMetrics(metrics)
469
470
471 ginkgo.By("Starting the second pod")
472
473 nodeSelection := e2epod.NodeSelection{Name: pod.Spec.NodeName}
474 pod2, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, t.secondPodSELinuxOpts)
475 framework.ExpectNoError(err, "creating second pod with SELinux context %s", t.secondPodSELinuxOpts)
476 m.pods = append(m.pods, pod2)
477
478 if t.waitForSecondPodStart {
479 err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
480 framework.ExpectNoError(err, "starting the second pod")
481 } else {
482 ginkgo.By("Waiting for the second pod to fail to start")
483 eventSelector := fields.Set{
484 "involvedObject.kind": "Pod",
485 "involvedObject.name": pod2.Name,
486 "involvedObject.namespace": pod2.Namespace,
487 "reason": events.FailedMountVolume,
488 }.AsSelector().String()
489 err = e2eevents.WaitTimeoutForEvent(ctx, m.cs, pod2.Namespace, eventSelector, t.secondPodFailureEvent, f.Timeouts.PodStart)
490 framework.ExpectNoError(err, "waiting for event %q in the second test pod", t.secondPodFailureEvent)
491 }
492
493
494 expectIncreaseWithLabels := addLabels(t.expectIncreases, volumePluginLabel, t.volumeMode)
495 framework.Logf("Waiting for changes of metrics %+v", expectIncreaseWithLabels)
496 err = waitForMetricIncrease(ctx, grabber, pod.Spec.NodeName, volumePluginLabel, allMetrics, expectIncreaseWithLabels, metrics, framework.PodStartShortTimeout)
497 framework.ExpectNoError(err, "waiting for metrics %s to increase", t.expectIncreases)
498 }
499
500
501 args := []interface{}{
502 t.name,
503 testFunc,
504 }
505 args = append(args, t.testTags...)
506 framework.It(args...)
507 }
508 })
509 })
510
511 func grabMetrics(ctx context.Context, grabber *e2emetrics.Grabber, nodeName string, metricNames sets.Set[string], volumePluginLabel string) (map[string]float64, error) {
512 response, err := grabber.GrabFromKubelet(ctx, nodeName)
513 framework.ExpectNoError(err)
514
515 metrics := map[string]float64{}
516 for _, samples := range response {
517 if len(samples) == 0 {
518 continue
519 }
520
521 for i := range samples {
522
523 metricName := samples[i].Metric[testutil.MetricNameLabel]
524 if metricNames.Has(string(metricName)) {
525
526 metricNameWithLabels := samples[i].Metric.String()
527
528 if strings.Contains(metricNameWithLabels, "volume_plugin=") && !strings.Contains(metricNameWithLabels, volumePluginLabel) {
529 continue
530 }
531
532 metrics[metricNameWithLabels] = float64(samples[i].Value)
533 }
534 }
535 }
536
537 return metrics, nil
538 }
539
540 func waitForMetricIncrease(ctx context.Context, grabber *e2emetrics.Grabber, nodeName string, volumePluginLabel string, allMetricNames, expectedIncreaseNames sets.Set[string], initialValues map[string]float64, timeout time.Duration) error {
541 var noIncreaseMetrics sets.Set[string]
542 var metrics map[string]float64
543
544 err := wait.Poll(time.Second, timeout, func() (bool, error) {
545 var err error
546 metrics, err = grabMetrics(ctx, grabber, nodeName, allMetricNames, volumePluginLabel)
547 if err != nil {
548 return false, err
549 }
550
551 noIncreaseMetrics = sets.New[string]()
552
553 for name := range metrics {
554 if expectedIncreaseNames.Has(name) {
555 if metrics[name] <= initialValues[name] {
556 noIncreaseMetrics.Insert(name)
557 }
558 } else {
559
560 if initialValues[name] != metrics[name] {
561 return false, fmt.Errorf("metric %s unexpectedly increased to %v", name, metrics[name])
562 }
563 }
564 }
565 return noIncreaseMetrics.Len() == 0, nil
566 })
567
568 ginkgo.By("Dumping final metrics")
569 dumpMetrics(metrics)
570
571 if err == context.DeadlineExceeded {
572 return fmt.Errorf("timed out waiting for metrics %v", noIncreaseMetrics.UnsortedList())
573 }
574 return err
575 }
576
577 func dumpMetrics(metrics map[string]float64) {
578
579 keys := make([]string, 0, len(metrics))
580 for key := range metrics {
581 keys = append(keys, key)
582 }
583 sort.Strings(keys)
584
585 for _, key := range keys {
586 framework.Logf("Metric %s: %v", key, metrics[key])
587 }
588 }
589
590
591 func addLabels(metricNames sets.Set[string], volumePluginLabel string, accessMode v1.PersistentVolumeAccessMode) sets.Set[string] {
592 ret := sets.New[string]()
593 accessModeShortString := helper.GetAccessModesAsString([]v1.PersistentVolumeAccessMode{accessMode})
594
595 for metricName := range metricNames {
596 var metricWithLabels string
597 if metricsWithVolumePluginLabel.Has(metricName) {
598 metricWithLabels = fmt.Sprintf("%s{access_mode=\"%s\", %s}", metricName, accessModeShortString, volumePluginLabel)
599 } else {
600 metricWithLabels = fmt.Sprintf("%s{access_mode=\"%s\"}", metricName, accessModeShortString)
601 }
602
603 ret.Insert(metricWithLabels)
604 }
605
606 return ret
607 }
608
View as plain text