1
16
17 package kubelet
18
19 import (
20 "context"
21 "fmt"
22 "testing"
23 "time"
24
25 "github.com/stretchr/testify/assert"
26 v1 "k8s.io/api/core/v1"
27 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28 "k8s.io/apimachinery/pkg/runtime"
29 "k8s.io/apimachinery/pkg/types"
30 core "k8s.io/client-go/testing"
31 "k8s.io/kubernetes/pkg/volume"
32 volumetest "k8s.io/kubernetes/pkg/volume/testing"
33 "k8s.io/kubernetes/pkg/volume/util"
34 )
35
36 func TestListVolumesForPod(t *testing.T) {
37 if testing.Short() {
38 t.Skip("skipping test in short mode.")
39 }
40
41 testKubelet := newTestKubelet(t, false )
42 defer testKubelet.Cleanup()
43 kubelet := testKubelet.kubelet
44
45 pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
46 Containers: []v1.Container{
47 {
48 Name: "container1",
49 VolumeMounts: []v1.VolumeMount{
50 {
51 Name: "vol1",
52 MountPath: "/mnt/vol1",
53 },
54 {
55 Name: "vol2",
56 MountPath: "/mnt/vol2",
57 },
58 },
59 },
60 },
61 Volumes: []v1.Volume{
62 {
63 Name: "vol1",
64 VolumeSource: v1.VolumeSource{
65 GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
66 PDName: "fake-device1",
67 },
68 },
69 },
70 {
71 Name: "vol2",
72 VolumeSource: v1.VolumeSource{
73 GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
74 PDName: "fake-device2",
75 },
76 },
77 },
78 },
79 })
80
81 stopCh := runVolumeManager(kubelet)
82 defer close(stopCh)
83
84 kubelet.podManager.SetPods([]*v1.Pod{pod})
85 err := kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod)
86 assert.NoError(t, err)
87
88 podName := util.GetUniquePodName(pod)
89
90 volumesToReturn, volumeExsit := kubelet.ListVolumesForPod(types.UID(podName))
91 assert.True(t, volumeExsit, "expected to find volumes for pod %q", podName)
92
93 outerVolumeSpecName1 := "vol1"
94 assert.NotNil(t, volumesToReturn[outerVolumeSpecName1], "key %s", outerVolumeSpecName1)
95
96 outerVolumeSpecName2 := "vol2"
97 assert.NotNil(t, volumesToReturn[outerVolumeSpecName2], "key %s", outerVolumeSpecName2)
98 }
99
100 func TestPodVolumesExist(t *testing.T) {
101 if testing.Short() {
102 t.Skip("skipping test in short mode.")
103 }
104
105 testKubelet := newTestKubelet(t, false )
106 defer testKubelet.Cleanup()
107 kubelet := testKubelet.kubelet
108
109 pods := []*v1.Pod{
110 {
111 ObjectMeta: metav1.ObjectMeta{
112 Name: "pod1",
113 UID: "pod1uid",
114 },
115 Spec: v1.PodSpec{
116 Containers: []v1.Container{
117 {
118 Name: "container1",
119 VolumeMounts: []v1.VolumeMount{
120 {
121 Name: "vol1",
122 MountPath: "/mnt/vol1",
123 },
124 },
125 },
126 },
127 Volumes: []v1.Volume{
128 {
129 Name: "vol1",
130 VolumeSource: v1.VolumeSource{
131 RBD: &v1.RBDVolumeSource{
132 RBDImage: "fake1",
133 },
134 },
135 },
136 },
137 },
138 },
139 {
140 ObjectMeta: metav1.ObjectMeta{
141 Name: "pod2",
142 UID: "pod2uid",
143 },
144 Spec: v1.PodSpec{
145 Containers: []v1.Container{
146 {
147 Name: "container2",
148 VolumeMounts: []v1.VolumeMount{
149 {
150 Name: "vol2",
151 MountPath: "/mnt/vol2",
152 },
153 },
154 },
155 },
156 Volumes: []v1.Volume{
157 {
158 Name: "vol2",
159 VolumeSource: v1.VolumeSource{
160 RBD: &v1.RBDVolumeSource{
161 RBDImage: "fake2",
162 },
163 },
164 },
165 },
166 },
167 },
168 {
169 ObjectMeta: metav1.ObjectMeta{
170 Name: "pod3",
171 UID: "pod3uid",
172 },
173 Spec: v1.PodSpec{
174 Containers: []v1.Container{
175 {
176 Name: "container3",
177 VolumeMounts: []v1.VolumeMount{
178 {
179 Name: "vol3",
180 MountPath: "/mnt/vol3",
181 },
182 },
183 },
184 },
185 Volumes: []v1.Volume{
186 {
187 Name: "vol3",
188 VolumeSource: v1.VolumeSource{
189 RBD: &v1.RBDVolumeSource{
190 RBDImage: "fake3",
191 },
192 },
193 },
194 },
195 },
196 },
197 }
198
199 stopCh := runVolumeManager(kubelet)
200 defer close(stopCh)
201
202 kubelet.podManager.SetPods(pods)
203 for _, pod := range pods {
204 err := kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod)
205 assert.NoError(t, err)
206 }
207
208 for _, pod := range pods {
209 podVolumesExist := kubelet.podVolumesExist(pod.UID)
210 assert.True(t, podVolumesExist, "pod %q", pod.UID)
211 }
212 }
213
214 func TestPodVolumeDeadlineAttachAndMount(t *testing.T) {
215 if testing.Short() {
216 t.Skip("skipping test in short mode.")
217 }
218
219 testKubelet := newTestKubeletWithImageList(t, nil , false,
220 false , true )
221
222 defer testKubelet.Cleanup()
223 kubelet := testKubelet.kubelet
224
225
226 pods := []*v1.Pod{
227 {
228 ObjectMeta: metav1.ObjectMeta{
229 Name: "pod1",
230 UID: "pod1uid",
231 },
232 Spec: v1.PodSpec{
233 Containers: []v1.Container{
234 {
235 Name: "container1",
236 VolumeMounts: []v1.VolumeMount{
237 {
238 Name: "vol1",
239 MountPath: "/mnt/vol1",
240 },
241 },
242 },
243 },
244 Volumes: []v1.Volume{
245 {
246 Name: "vol1",
247 VolumeSource: v1.VolumeSource{
248 Secret: &v1.SecretVolumeSource{
249 SecretName: "non-existent",
250 },
251 },
252 },
253 },
254 },
255 },
256 }
257
258 stopCh := runVolumeManager(kubelet)
259 defer close(stopCh)
260
261 kubelet.podManager.SetPods(pods)
262 for _, pod := range pods {
263 start := time.Now()
264
265 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
266 err := kubelet.volumeManager.WaitForAttachAndMount(ctx, pod)
267 delta := time.Since(start)
268
269 assert.Lessf(t, delta, 10*time.Second, "WaitForAttachAndMount should timeout when the context is cancelled")
270 assert.ErrorIs(t, err, context.DeadlineExceeded)
271 cancel()
272 }
273 }
274
275 func TestPodVolumeDeadlineUnmount(t *testing.T) {
276 if testing.Short() {
277 t.Skip("skipping test in short mode.")
278 }
279
280 testKubelet := newTestKubeletWithImageList(t, nil , false,
281 true , true )
282
283 defer testKubelet.Cleanup()
284 kubelet := testKubelet.kubelet
285
286
287 pods := []*v1.Pod{
288 {
289 ObjectMeta: metav1.ObjectMeta{
290 Name: "pod1",
291 UID: "pod1uid",
292 },
293 Spec: v1.PodSpec{
294 Containers: []v1.Container{
295 {
296 Name: "container1",
297 VolumeMounts: []v1.VolumeMount{
298 {
299 Name: "vol1",
300 MountPath: "/mnt/vol1",
301 },
302 },
303 },
304 },
305 Volumes: []v1.Volume{
306 {
307 Name: "vol1",
308 VolumeSource: v1.VolumeSource{
309 RBD: &v1.RBDVolumeSource{
310 RBDImage: "fake-device",
311 },
312 },
313 },
314 },
315 },
316 },
317 }
318
319 stopCh := runVolumeManager(kubelet)
320 defer close(stopCh)
321
322 kubelet.podManager.SetPods(pods)
323 for i, pod := range pods {
324 if err := kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod); err != nil {
325 t.Fatalf("pod %d failed: %v", i, err)
326 }
327 start := time.Now()
328
329 ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(2*time.Second))
330 err := kubelet.volumeManager.WaitForUnmount(ctx, pod)
331 delta := time.Since(start)
332
333 assert.Lessf(t, delta, 10*time.Second, "WaitForUnmount should timeout when the context is cancelled")
334 assert.ErrorIs(t, err, context.DeadlineExceeded)
335 cancel()
336 }
337 }
338
339 func TestVolumeAttachAndMountControllerDisabled(t *testing.T) {
340 if testing.Short() {
341 t.Skip("skipping test in short mode.")
342 }
343
344 testKubelet := newTestKubelet(t, false )
345 defer testKubelet.Cleanup()
346 kubelet := testKubelet.kubelet
347
348 pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
349 Containers: []v1.Container{
350 {
351 Name: "container1",
352 VolumeMounts: []v1.VolumeMount{
353 {
354 Name: "vol1",
355 MountPath: "/mnt/vol1",
356 },
357 },
358 },
359 },
360 Volumes: []v1.Volume{
361 {
362 Name: "vol1",
363 VolumeSource: v1.VolumeSource{
364 RBD: &v1.RBDVolumeSource{
365 RBDImage: "fake",
366 },
367 },
368 },
369 },
370 })
371
372 stopCh := runVolumeManager(kubelet)
373 defer close(stopCh)
374
375 kubelet.podManager.SetPods([]*v1.Pod{pod})
376 err := kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod)
377 assert.NoError(t, err)
378
379 podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
380 util.GetUniquePodName(pod))
381
382 expectedPodVolumes := []string{"vol1"}
383 assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
384 for _, name := range expectedPodVolumes {
385 assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
386 }
387 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
388 assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
389 1 , testKubelet.volumePlugin))
390 assert.NoError(t, volumetest.VerifyAttachCallCount(
391 1 , testKubelet.volumePlugin))
392 assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
393 1 , testKubelet.volumePlugin))
394 assert.NoError(t, volumetest.VerifySetUpCallCount(
395 1 , testKubelet.volumePlugin))
396 }
397
398 func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
399 if testing.Short() {
400 t.Skip("skipping test in short mode.")
401 }
402
403 testKubelet := newTestKubelet(t, false )
404 defer testKubelet.Cleanup()
405 kubelet := testKubelet.kubelet
406
407 pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
408 Containers: []v1.Container{
409 {
410 Name: "container1",
411 VolumeMounts: []v1.VolumeMount{
412 {
413 Name: "vol1",
414 MountPath: "/mnt/vol1",
415 },
416 },
417 },
418 },
419 Volumes: []v1.Volume{
420 {
421 Name: "vol1",
422 VolumeSource: v1.VolumeSource{
423 RBD: &v1.RBDVolumeSource{
424 RBDImage: "fake-device",
425 },
426 },
427 },
428 },
429 })
430
431 stopCh := runVolumeManager(kubelet)
432 defer close(stopCh)
433
434
435 kubelet.podManager.SetPods([]*v1.Pod{pod})
436
437
438 err := kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod)
439 assert.NoError(t, err)
440
441 podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
442 util.GetUniquePodName(pod))
443
444 expectedPodVolumes := []string{"vol1"}
445 assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
446 for _, name := range expectedPodVolumes {
447 assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
448 }
449
450 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
451 assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
452 1 , testKubelet.volumePlugin))
453 assert.NoError(t, volumetest.VerifyAttachCallCount(
454 1 , testKubelet.volumePlugin))
455 assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
456 1 , testKubelet.volumePlugin))
457 assert.NoError(t, volumetest.VerifySetUpCallCount(
458 1 , testKubelet.volumePlugin))
459
460
461
462 kubelet.podWorkers.(*fakePodWorkers).setPodRuntimeBeRemoved(pod.UID)
463 kubelet.podManager.SetPods([]*v1.Pod{})
464
465 assert.NoError(t, kubelet.volumeManager.WaitForUnmount(context.Background(), pod))
466 if actual := kubelet.volumeManager.GetMountedVolumesForPod(util.GetUniquePodName(pod)); len(actual) > 0 {
467 t.Fatalf("expected volume unmount to wait for no volumes: %v", actual)
468 }
469
470
471 podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
472 util.GetUniquePodName(pod))
473
474 assert.Len(t, podVolumes, 0,
475 "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
476
477 assert.NoError(t, volumetest.VerifyTearDownCallCount(
478 1 , testKubelet.volumePlugin))
479
480
481 assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager))
482 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
483 assert.NoError(t, volumetest.VerifyDetachCallCount(
484 1 , testKubelet.volumePlugin))
485 }
486
487 func TestVolumeAttachAndMountControllerEnabled(t *testing.T) {
488 if testing.Short() {
489 t.Skip("skipping test in short mode.")
490 }
491
492 testKubelet := newTestKubelet(t, true )
493 defer testKubelet.Cleanup()
494 kubelet := testKubelet.kubelet
495 kubeClient := testKubelet.fakeKubeClient
496 kubeClient.AddReactor("get", "nodes",
497 func(action core.Action) (bool, runtime.Object, error) {
498 return true, &v1.Node{
499 ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
500 Status: v1.NodeStatus{
501 VolumesAttached: []v1.AttachedVolume{
502 {
503 Name: "fake/fake-device",
504 DevicePath: "fake/path",
505 },
506 }},
507 }, nil
508 })
509 kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
510 return true, nil, fmt.Errorf("no reaction implemented for %s", action)
511 })
512
513 pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
514 Containers: []v1.Container{
515 {
516 Name: "container1",
517 VolumeMounts: []v1.VolumeMount{
518 {
519 Name: "vol1",
520 MountPath: "/mnt/vol1",
521 },
522 },
523 },
524 },
525 Volumes: []v1.Volume{
526 {
527 Name: "vol1",
528 VolumeSource: v1.VolumeSource{
529 RBD: &v1.RBDVolumeSource{
530 RBDImage: "fake-device",
531 },
532 },
533 },
534 },
535 })
536
537 stopCh := runVolumeManager(kubelet)
538 defer close(stopCh)
539
540 kubelet.podManager.SetPods([]*v1.Pod{pod})
541
542
543 go simulateVolumeInUseUpdate(
544 v1.UniqueVolumeName("fake/fake-device"),
545 stopCh,
546 kubelet.volumeManager)
547
548 assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod))
549
550 podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
551 util.GetUniquePodName(pod))
552 allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
553 util.GetUniquePodName(pod))
554 assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
555
556 expectedPodVolumes := []string{"vol1"}
557 assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
558 for _, name := range expectedPodVolumes {
559 assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
560 }
561 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
562 assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
563 1 , testKubelet.volumePlugin))
564 assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
565 assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
566 1 , testKubelet.volumePlugin))
567 assert.NoError(t, volumetest.VerifySetUpCallCount(
568 1 , testKubelet.volumePlugin))
569 }
570
571 func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
572 if testing.Short() {
573 t.Skip("skipping test in short mode.")
574 }
575
576 testKubelet := newTestKubelet(t, true )
577 defer testKubelet.Cleanup()
578 kubelet := testKubelet.kubelet
579 kubeClient := testKubelet.fakeKubeClient
580 kubeClient.AddReactor("get", "nodes",
581 func(action core.Action) (bool, runtime.Object, error) {
582 return true, &v1.Node{
583 ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
584 Status: v1.NodeStatus{
585 VolumesAttached: []v1.AttachedVolume{
586 {
587 Name: "fake/fake-device",
588 DevicePath: "fake/path",
589 },
590 }},
591 }, nil
592 })
593 kubeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
594 return true, nil, fmt.Errorf("no reaction implemented for %s", action)
595 })
596
597 pod := podWithUIDNameNsSpec("12345678", "foo", "test", v1.PodSpec{
598 Containers: []v1.Container{
599 {
600 Name: "container1",
601 VolumeMounts: []v1.VolumeMount{
602 {
603 Name: "vol1",
604 MountPath: "/mnt/vol1",
605 },
606 },
607 },
608 },
609 Volumes: []v1.Volume{
610 {
611 Name: "vol1",
612 VolumeSource: v1.VolumeSource{
613 RBD: &v1.RBDVolumeSource{
614 RBDImage: "fake-device",
615 },
616 },
617 },
618 },
619 })
620
621 stopCh := runVolumeManager(kubelet)
622 defer close(stopCh)
623
624
625 kubelet.podManager.SetPods([]*v1.Pod{pod})
626
627
628 go simulateVolumeInUseUpdate(
629 v1.UniqueVolumeName("fake/fake-device"),
630 stopCh,
631 kubelet.volumeManager)
632
633
634 assert.NoError(t, kubelet.volumeManager.WaitForAttachAndMount(context.Background(), pod))
635
636 podVolumes := kubelet.volumeManager.GetMountedVolumesForPod(
637 util.GetUniquePodName(pod))
638 allPodVolumes := kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
639 util.GetUniquePodName(pod))
640 assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
641
642 expectedPodVolumes := []string{"vol1"}
643 assert.Len(t, podVolumes, len(expectedPodVolumes), "Volumes for pod %+v", pod)
644 for _, name := range expectedPodVolumes {
645 assert.Contains(t, podVolumes, name, "Volumes for pod %+v", pod)
646 }
647
648 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
649 assert.NoError(t, volumetest.VerifyWaitForAttachCallCount(
650 1 , testKubelet.volumePlugin))
651 assert.NoError(t, volumetest.VerifyZeroAttachCalls(testKubelet.volumePlugin))
652 assert.NoError(t, volumetest.VerifyMountDeviceCallCount(
653 1 , testKubelet.volumePlugin))
654 assert.NoError(t, volumetest.VerifySetUpCallCount(
655 1 , testKubelet.volumePlugin))
656
657
658 kubelet.podWorkers.(*fakePodWorkers).setPodRuntimeBeRemoved(pod.UID)
659 kubelet.podManager.SetPods([]*v1.Pod{})
660
661 assert.NoError(t, waitForVolumeUnmount(kubelet.volumeManager, pod))
662
663
664 podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
665 util.GetUniquePodName(pod))
666 allPodVolumes = kubelet.volumeManager.GetPossiblyMountedVolumesForPod(
667 util.GetUniquePodName(pod))
668 assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
669
670 assert.Len(t, podVolumes, 0,
671 "Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
672
673 assert.NoError(t, volumetest.VerifyTearDownCallCount(
674 1 , testKubelet.volumePlugin))
675
676
677 assert.NoError(t, waitForVolumeDetach(v1.UniqueVolumeName("fake/fake-device"), kubelet.volumeManager))
678 assert.True(t, testKubelet.volumePlugin.GetNewAttacherCallCount() >= 1, "Expected plugin NewAttacher to be called at least once")
679 assert.NoError(t, volumetest.VerifyZeroDetachCallCount(testKubelet.volumePlugin))
680 }
681
682 type stubVolume struct {
683 path string
684 volume.MetricsNil
685 }
686
687 func (f *stubVolume) GetPath() string {
688 return f.path
689 }
690
691 func (f *stubVolume) GetAttributes() volume.Attributes {
692 return volume.Attributes{}
693 }
694
695 func (f *stubVolume) SetUp(mounterArgs volume.MounterArgs) error {
696 return nil
697 }
698
699 func (f *stubVolume) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
700 return nil
701 }
702
703 type stubBlockVolume struct {
704 dirPath string
705 volName string
706 }
707
708 func (f *stubBlockVolume) GetGlobalMapPath(spec *volume.Spec) (string, error) {
709 return "", nil
710 }
711
712 func (f *stubBlockVolume) GetPodDeviceMapPath() (string, string) {
713 return f.dirPath, f.volName
714 }
715
716 func (f *stubBlockVolume) SetUpDevice() (string, error) {
717 return "", nil
718 }
719
720 func (f stubBlockVolume) MapPodDevice() error {
721 return nil
722 }
723
724 func (f *stubBlockVolume) TearDownDevice(mapPath string, devicePath string) error {
725 return nil
726 }
727
728 func (f *stubBlockVolume) UnmapPodDevice() error {
729 return nil
730 }
731
732 func (f *stubBlockVolume) SupportsMetrics() bool {
733 return false
734 }
735
736 func (f *stubBlockVolume) GetMetrics() (*volume.Metrics, error) {
737 return nil, nil
738 }
739
View as plain text