1
16
17 package utils
18
19 import (
20 "context"
21 "crypto/sha256"
22 "encoding/base64"
23 "fmt"
24 "math"
25 "math/rand"
26 "path/filepath"
27 "strconv"
28 "strings"
29 "time"
30
31 "github.com/onsi/ginkgo/v2"
32 "github.com/onsi/gomega"
33
34 v1 "k8s.io/api/core/v1"
35 apierrors "k8s.io/apimachinery/pkg/api/errors"
36 "k8s.io/apimachinery/pkg/api/resource"
37 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
38 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
39 "k8s.io/apimachinery/pkg/runtime/schema"
40 "k8s.io/apimachinery/pkg/util/sets"
41 "k8s.io/client-go/dynamic"
42 clientset "k8s.io/client-go/kubernetes"
43 "k8s.io/kubernetes/test/e2e/framework"
44 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
45 e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
46 e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
47 imageutils "k8s.io/kubernetes/test/utils/image"
48 )
49
50
51 type KubeletOpt string
52
53 const (
54
55 NodeStateTimeout = 1 * time.Minute
56
57 KStart KubeletOpt = "start"
58
59 KStop KubeletOpt = "stop"
60
61 KRestart KubeletOpt = "restart"
62 minValidSize string = "1Ki"
63 maxValidSize string = "10Ei"
64 )
65
66
67 func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
68 cmd := fmt.Sprintf("ls -l %s", filePath)
69 stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
70 framework.ExpectNoError(err)
71 framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
72 fsGroupResult := strings.Fields(stdout)[3]
73 gomega.Expect(expectedFSGroup).To(gomega.Equal(fsGroupResult), "Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
74 }
75
76
77 func getKubeletMainPid(ctx context.Context, nodeIP string, sudoPresent bool, systemctlPresent bool) string {
78 command := ""
79 if systemctlPresent {
80 command = "systemctl status kubelet | grep 'Main PID'"
81 } else {
82 command = "service kubelet status | grep 'Main PID'"
83 }
84 if sudoPresent {
85 command = fmt.Sprintf("sudo %s", command)
86 }
87 framework.Logf("Attempting `%s`", command)
88 sshResult, err := e2essh.SSH(ctx, command, nodeIP, framework.TestContext.Provider)
89 framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", nodeIP))
90 e2essh.LogResult(sshResult)
91 gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to get kubelet PID")
92 gomega.Expect(sshResult.Stdout).NotTo(gomega.BeEmpty(), "Kubelet Main PID should not be Empty")
93 return sshResult.Stdout
94 }
95
96
97 func TestKubeletRestartsAndRestoresMount(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
98 byteLen := 64
99 seed := time.Now().UTC().UnixNano()
100
101 ginkgo.By("Writing to the volume.")
102 CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
103
104 ginkgo.By("Restarting kubelet")
105 KubeletCommand(ctx, KRestart, c, clientPod)
106
107 ginkgo.By("Testing that written file is accessible.")
108 CheckReadFromPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
109
110 framework.Logf("Volume mount detected on pod %s and written file %s is readable post-restart.", clientPod.Name, volumePath)
111 }
112
113
114 func TestKubeletRestartsAndRestoresMap(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
115 byteLen := 64
116 seed := time.Now().UTC().UnixNano()
117
118 ginkgo.By("Writing to the volume.")
119 CheckWriteToPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
120
121 ginkgo.By("Restarting kubelet")
122 KubeletCommand(ctx, KRestart, c, clientPod)
123
124 ginkgo.By("Testing that written pv is accessible.")
125 CheckReadFromPath(f, clientPod, v1.PersistentVolumeBlock, false, volumePath, byteLen, seed)
126
127 framework.Logf("Volume map detected on pod %s and written data %s is readable post-restart.", clientPod.Name, volumePath)
128 }
129
130
131
132
133
134 func TestVolumeUnmountsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod, volumePath string) {
135 nodeIP, err := getHostAddress(ctx, c, clientPod)
136 framework.ExpectNoError(err)
137 nodeIP = nodeIP + ":22"
138
139 ginkgo.By("Expecting the volume mount to be found.")
140 result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
141 e2essh.LogResult(result)
142 framework.ExpectNoError(err, "Encountered SSH error.")
143 gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
144
145 if checkSubpath {
146 ginkgo.By("Expecting the volume subpath mount to be found.")
147 result, err := e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
148 e2essh.LogResult(result)
149 framework.ExpectNoError(err, "Encountered SSH error.")
150 gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
151 }
152
153 ginkgo.By("Writing to the volume.")
154 byteLen := 64
155 seed := time.Now().UTC().UnixNano()
156 CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
157
158
159 ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
160 ginkgo.By("Stopping the kubelet.")
161 KubeletCommand(ctx, KStop, c, clientPod)
162
163 if secondPod != nil {
164 ginkgo.By("Starting the second pod")
165 _, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
166 framework.ExpectNoError(err, "when starting the second pod")
167 }
168
169 ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
170 if forceDelete {
171 err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
172 } else {
173 err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
174 }
175 framework.ExpectNoError(err)
176
177 ginkgo.By("Starting the kubelet and waiting for pod to delete.")
178 KubeletCommand(ctx, KStart, c, clientPod)
179 err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
180 if err != nil {
181 framework.ExpectNoError(err, "Expected pod to be not found.")
182 }
183
184 if forceDelete {
185
186
187 time.Sleep(30 * time.Second)
188 }
189
190 if secondPod != nil {
191 ginkgo.By("Waiting for the second pod.")
192 err = e2epod.WaitForPodRunningInNamespace(ctx, c, secondPod)
193 framework.ExpectNoError(err, "while waiting for the second pod Running")
194
195 ginkgo.By("Getting the second pod uuid.")
196 secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
197 framework.ExpectNoError(err, "getting the second UID")
198
199 ginkgo.By("Expecting the volume mount to be found in the second pod.")
200 result, err := e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
201 e2essh.LogResult(result)
202 framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
203 gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
204
205 ginkgo.By("Testing that written file is accessible in the second pod.")
206 CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, volumePath, byteLen, seed)
207 err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
208 framework.ExpectNoError(err, "when deleting the second pod")
209 err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
210 framework.ExpectNoError(err, "when waiting for the second pod to disappear")
211 }
212
213 ginkgo.By("Expecting the volume mount not to be found.")
214 result, err = e2essh.SSH(ctx, fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
215 e2essh.LogResult(result)
216 framework.ExpectNoError(err, "Encountered SSH error.")
217 gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
218 framework.Logf("Volume unmounted on node %s", clientPod.Spec.NodeName)
219
220 if checkSubpath {
221 ginkgo.By("Expecting the volume subpath mount not to be found.")
222 result, err = e2essh.SSH(ctx, fmt.Sprintf("cat /proc/self/mountinfo | grep %s | grep volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
223 e2essh.LogResult(result)
224 framework.ExpectNoError(err, "Encountered SSH error.")
225 gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
226 framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
227 }
228
229 }
230
231
232 func TestVolumeUnmountsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
233 TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, false, nil, volumePath)
234 }
235
236
237 func TestVolumeUnmountsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {
238 TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, false, nil, volumePath)
239 }
240
241
242
243 func TestVolumeUnmapsFromDeletedPodWithForceOption(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, devicePath string) {
244 nodeIP, err := getHostAddress(ctx, c, clientPod)
245 framework.ExpectNoError(err, "Failed to get nodeIP.")
246 nodeIP = nodeIP + ":22"
247
248
249 podDirectoryCmd := fmt.Sprintf("ls /var/lib/kubelet/pods/%s/volumeDevices/*/ | grep '.'", clientPod.UID)
250 if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
251 podDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", podDirectoryCmd)
252 }
253
254
255 globalBlockDirectoryCmd := fmt.Sprintf("find /var/lib/kubelet/plugins -name %s", clientPod.UID)
256 if isSudoPresent(ctx, nodeIP, framework.TestContext.Provider) {
257 globalBlockDirectoryCmd = fmt.Sprintf("sudo sh -c \"%s\"", globalBlockDirectoryCmd)
258 }
259
260 ginkgo.By("Expecting the symlinks from PodDeviceMapPath to be found.")
261 result, err := e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
262 e2essh.LogResult(result)
263 framework.ExpectNoError(err, "Encountered SSH error.")
264 gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
265
266 ginkgo.By("Expecting the symlinks from global map path to be found.")
267 result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
268 e2essh.LogResult(result)
269 framework.ExpectNoError(err, "Encountered SSH error.")
270 gomega.Expect(result.Code).To(gomega.Equal(0), fmt.Sprintf("Expected find exit code of 0, got %d", result.Code))
271
272
273 ginkgo.DeferCleanup(KubeletCommand, KStart, c, clientPod)
274 ginkgo.By("Stopping the kubelet.")
275 KubeletCommand(ctx, KStop, c, clientPod)
276
277 ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
278 if forceDelete {
279 err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, *metav1.NewDeleteOptions(0))
280 } else {
281 err = c.CoreV1().Pods(clientPod.Namespace).Delete(ctx, clientPod.Name, metav1.DeleteOptions{})
282 }
283 framework.ExpectNoError(err, "Failed to delete pod.")
284
285 ginkgo.By("Starting the kubelet and waiting for pod to delete.")
286 KubeletCommand(ctx, KStart, c, clientPod)
287 err = e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
288 framework.ExpectNoError(err, "Expected pod to be not found.")
289
290 if forceDelete {
291
292
293 time.Sleep(30 * time.Second)
294 }
295
296 ginkgo.By("Expecting the symlink from PodDeviceMapPath not to be found.")
297 result, err = e2essh.SSH(ctx, podDirectoryCmd, nodeIP, framework.TestContext.Provider)
298 e2essh.LogResult(result)
299 framework.ExpectNoError(err, "Encountered SSH error.")
300 gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty.")
301
302 ginkgo.By("Expecting the symlinks from global map path not to be found.")
303 result, err = e2essh.SSH(ctx, globalBlockDirectoryCmd, nodeIP, framework.TestContext.Provider)
304 e2essh.LogResult(result)
305 framework.ExpectNoError(err, "Encountered SSH error.")
306 gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected find stdout to be empty.")
307
308 framework.Logf("Volume unmaped on node %s", clientPod.Spec.NodeName)
309 }
310
311
312 func TestVolumeUnmapsFromDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
313 TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, false, devicePath)
314 }
315
316
317 func TestVolumeUnmapsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, devicePath string) {
318 TestVolumeUnmapsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, devicePath)
319 }
320
321
322 func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
323 pod := &v1.Pod{
324 TypeMeta: metav1.TypeMeta{
325 Kind: "Pod",
326 APIVersion: "v1",
327 },
328 ObjectMeta: metav1.ObjectMeta{
329 GenerateName: "pvc-volume-tester-",
330 },
331 Spec: v1.PodSpec{
332 Containers: []v1.Container{
333 {
334 Name: "volume-tester",
335 Image: imageutils.GetE2EImage(imageutils.BusyBox),
336 Command: []string{"/bin/sh"},
337 Args: []string{"-c", command},
338 VolumeMounts: []v1.VolumeMount{
339 {
340 Name: "my-volume",
341 MountPath: "/mnt/test",
342 },
343 },
344 },
345 },
346 RestartPolicy: v1.RestartPolicyNever,
347 Volumes: []v1.Volume{
348 {
349 Name: "my-volume",
350 VolumeSource: v1.VolumeSource{
351 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
352 ClaimName: claimName,
353 ReadOnly: false,
354 },
355 },
356 },
357 },
358 },
359 }
360 pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
361 framework.ExpectNoError(err, "Failed to create pod: %v", err)
362 ginkgo.DeferCleanup(e2epod.DeletePodOrFail, c, ns, pod.Name)
363 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
364 }
365
366
367 func StartExternalProvisioner(ctx context.Context, c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
368 podClient := c.CoreV1().Pods(ns)
369
370 provisionerPod := &v1.Pod{
371 TypeMeta: metav1.TypeMeta{
372 Kind: "Pod",
373 APIVersion: "v1",
374 },
375 ObjectMeta: metav1.ObjectMeta{
376 GenerateName: "external-provisioner-",
377 },
378
379 Spec: v1.PodSpec{
380 Containers: []v1.Container{
381 {
382 Name: "nfs-provisioner",
383 Image: imageutils.GetE2EImage(imageutils.NFSProvisioner),
384 SecurityContext: &v1.SecurityContext{
385 Capabilities: &v1.Capabilities{
386 Add: []v1.Capability{"DAC_READ_SEARCH"},
387 },
388 },
389 Args: []string{
390 "-provisioner=" + externalPluginName,
391 "-grace-period=0",
392 },
393 Ports: []v1.ContainerPort{
394 {Name: "nfs", ContainerPort: 2049},
395 {Name: "mountd", ContainerPort: 20048},
396 {Name: "rpcbind", ContainerPort: 111},
397 {Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
398 },
399 Env: []v1.EnvVar{
400 {
401 Name: "POD_IP",
402 ValueFrom: &v1.EnvVarSource{
403 FieldRef: &v1.ObjectFieldSelector{
404 FieldPath: "status.podIP",
405 },
406 },
407 },
408 },
409 ImagePullPolicy: v1.PullIfNotPresent,
410 VolumeMounts: []v1.VolumeMount{
411 {
412 Name: "export-volume",
413 MountPath: "/export",
414 },
415 },
416 },
417 },
418 Volumes: []v1.Volume{
419 {
420 Name: "export-volume",
421 VolumeSource: v1.VolumeSource{
422 EmptyDir: &v1.EmptyDirVolumeSource{},
423 },
424 },
425 },
426 },
427 }
428 provisionerPod, err := podClient.Create(ctx, provisionerPod, metav1.CreateOptions{})
429 framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
430
431 framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, c, provisionerPod))
432
433 ginkgo.By("locating the provisioner pod")
434 pod, err := podClient.Get(ctx, provisionerPod.Name, metav1.GetOptions{})
435 framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
436
437 return pod
438 }
439
440 func isSudoPresent(ctx context.Context, nodeIP string, provider string) bool {
441 framework.Logf("Checking if sudo command is present")
442 sshResult, err := e2essh.SSH(ctx, "sudo --version", nodeIP, provider)
443 framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
444 if !strings.Contains(sshResult.Stderr, "command not found") {
445 return true
446 }
447 return false
448 }
449
450
451 func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
452 if volMode == v1.PersistentVolumeBlock {
453
454 e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
455
456 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
457
458 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
459
460 e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
461
462 e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
463
464
465 e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
466 } else {
467
468 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
469
470 e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
471
472 e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
473 }
474 }
475
476 func readFile(content, path string) string {
477 if framework.NodeOSDistroIs("windows") {
478 return fmt.Sprintf("Select-String '%s' %s/file1.txt", content, path)
479 }
480 return fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)
481 }
482
483
484 func genBinDataFromSeed(len int, seed int64) []byte {
485 binData := make([]byte, len)
486 rand.Seed(seed)
487
488 _, err := rand.Read(binData)
489 if err != nil {
490 fmt.Printf("Error: %v\n", err)
491 }
492
493 return binData
494 }
495
496
497
498
499
500
501
502 func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, directIO bool, path string, len int, seed int64) {
503 var pathForVolMode string
504 var iflag string
505
506 if volMode == v1.PersistentVolumeBlock {
507 pathForVolMode = path
508 } else {
509 pathForVolMode = filepath.Join(path, "file1.txt")
510 }
511
512 if directIO {
513 iflag = "iflag=direct"
514 }
515
516 sum := sha256.Sum256(genBinDataFromSeed(len, seed))
517
518 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
519 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
520 }
521
522
523
524
525
526
527 func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, nocache bool, path string, len int, seed int64) {
528 var pathForVolMode string
529 var oflag string
530
531 if volMode == v1.PersistentVolumeBlock {
532 pathForVolMode = path
533 } else {
534 pathForVolMode = filepath.Join(path, "file1.txt")
535 }
536
537 if nocache {
538 oflag = "oflag=nocache"
539 }
540
541 encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
542
543 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
544 e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
545 }
546
547
548 func GetSectorSize(f *framework.Framework, pod *v1.Pod, device string) int {
549 stdout, _, err := e2evolume.PodExec(f, pod, fmt.Sprintf("blockdev --getss %s", device))
550 framework.ExpectNoError(err, "Failed to get sector size of %s", device)
551 ss, err := strconv.Atoi(stdout)
552 framework.ExpectNoError(err, "Sector size returned by blockdev command isn't integer value.")
553
554 return ss
555 }
556
557
558 func findMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node, dir string) []string {
559 result, err := hostExec.IssueCommandWithResult(ctx, fmt.Sprintf(`find %s -type d -exec mountpoint {} \; | grep 'is a mountpoint$' || true`, dir), node)
560 framework.ExpectNoError(err, "Encountered HostExec error.")
561 var mountPoints []string
562 if err != nil {
563 for _, line := range strings.Split(result, "\n") {
564 if line == "" {
565 continue
566 }
567 mountPoints = append(mountPoints, strings.TrimSuffix(line, " is a mountpoint"))
568 }
569 }
570 return mountPoints
571 }
572
573
574 func FindVolumeGlobalMountPoints(ctx context.Context, hostExec HostExec, node *v1.Node) sets.String {
575 return sets.NewString(findMountPoints(ctx, hostExec, node, "/var/lib/kubelet/plugins")...)
576 }
577
578
579
580 func CreateDriverNamespace(ctx context.Context, f *framework.Framework) *v1.Namespace {
581 ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.Namespace.Name))
582
583 namespace, err := f.CreateNamespace(ctx, f.Namespace.Name, map[string]string{
584 "e2e-framework": f.BaseName,
585 "e2e-test-namespace": f.Namespace.Name,
586 })
587 framework.ExpectNoError(err)
588
589 if framework.TestContext.VerifyServiceAccount {
590 ginkgo.By("Waiting for a default service account to be provisioned in namespace")
591 err = framework.WaitForDefaultServiceAccountInNamespace(ctx, f.ClientSet, namespace.Name)
592 framework.ExpectNoError(err)
593 } else {
594 framework.Logf("Skipping waiting for service account")
595 }
596 return namespace
597 }
598
599
600 func WaitForGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName string, poll, timeout time.Duration) error {
601 framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
602
603 if successful := WaitUntil(poll, timeout, func() bool {
604 _, err := c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
605 if err != nil && apierrors.IsNotFound(err) {
606 framework.Logf("%s %v is not found and has been deleted", gvr.Resource, objectName)
607 return true
608 } else if err != nil {
609 framework.Logf("Get %s returned an error: %v", objectName, err.Error())
610 } else {
611 framework.Logf("%s %v has been found and is not deleted", gvr.Resource, objectName)
612 }
613
614 return false
615 }); successful {
616 return nil
617 }
618
619 return fmt.Errorf("%s %s is not deleted within %v", gvr.Resource, objectName, timeout)
620 }
621
622
623 func WaitForNamespacedGVRDeletion(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, ns, objectName string, poll, timeout time.Duration) error {
624 framework.Logf("Waiting up to %v for %s %s to be deleted", timeout, gvr.Resource, objectName)
625
626 if successful := WaitUntil(poll, timeout, func() bool {
627 _, err := c.Resource(gvr).Namespace(ns).Get(ctx, objectName, metav1.GetOptions{})
628 if err != nil && apierrors.IsNotFound(err) {
629 framework.Logf("%s %s is not found in namespace %s and has been deleted", gvr.Resource, objectName, ns)
630 return true
631 } else if err != nil {
632 framework.Logf("Get %s in namespace %s returned an error: %v", objectName, ns, err.Error())
633 } else {
634 framework.Logf("%s %s has been found in namespace %s and is not deleted", gvr.Resource, objectName, ns)
635 }
636
637 return false
638 }); successful {
639 return nil
640 }
641
642 return fmt.Errorf("%s %s in namespace %s is not deleted within %v", gvr.Resource, objectName, ns, timeout)
643 }
644
645
646 func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
647
648 for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
649 if checkDone() {
650 framework.Logf("WaitUntil finished successfully after %v", time.Since(start))
651 return true
652 }
653 }
654
655 framework.Logf("WaitUntil failed after reaching the timeout %v", timeout)
656 return false
657 }
658
659
660
661 func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName, objectNamespace, finalizer string, poll, timeout time.Duration) error {
662 framework.Logf("Waiting up to %v for object %s %s of resource %s to contain finalizer %s", timeout, objectNamespace, objectName, gvr.Resource, finalizer)
663 var (
664 err error
665 resource *unstructured.Unstructured
666 )
667 if successful := WaitUntil(poll, timeout, func() bool {
668 switch objectNamespace {
669 case "":
670 resource, err = c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
671 default:
672 resource, err = c.Resource(gvr).Namespace(objectNamespace).Get(ctx, objectName, metav1.GetOptions{})
673 }
674 if err != nil {
675 framework.Logf("Failed to get object %s %s with err: %v. Will retry in %v", objectNamespace, objectName, err, timeout)
676 return false
677 }
678 for _, f := range resource.GetFinalizers() {
679 if f == finalizer {
680 return true
681 }
682 }
683 return false
684 }); successful {
685 return nil
686 }
687 if err == nil {
688 err = fmt.Errorf("finalizer %s not added to object %s %s of resource %s", finalizer, objectNamespace, objectName, gvr)
689 }
690 return err
691 }
692
693
694 func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
695 cmd := fmt.Sprintf("ls -l %s", filePath)
696 stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
697 framework.ExpectNoError(err)
698 framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
699 ll := strings.Fields(stdout)
700 framework.Logf("stdout split: %v, expected gid: %v", ll, expectedGid)
701 gomega.Expect(ll[3]).To(gomega.Equal(expectedGid))
702 }
703
704
705 func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
706 cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
707 _, _, err := e2evolume.PodExec(f, pod, cmd)
708 framework.ExpectNoError(err)
709 VerifyFilePathGidInPod(f, filePath, targetGid, pod)
710 }
711
712
713 func DeleteStorageClass(ctx context.Context, cs clientset.Interface, className string) error {
714 err := cs.StorageV1().StorageClasses().Delete(ctx, className, metav1.DeleteOptions{})
715 if err != nil && !apierrors.IsNotFound(err) {
716 return err
717 }
718 return nil
719 }
720
721
722 func CreateVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
723 return &v1.VolumeSource{
724 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
725 ClaimName: pvcName,
726 ReadOnly: readOnly,
727 },
728 }
729 }
730
731
732 func TryFunc(f func()) error {
733 var err error
734 if f == nil {
735 return nil
736 }
737 defer func() {
738 if recoverError := recover(); recoverError != nil {
739 err = fmt.Errorf("%v", recoverError)
740 }
741 }()
742 f()
743 return err
744 }
745
746
747
748
749
750 func GetSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
751 var firstMin, firstMax, secondMin, secondMax resource.Quantity
752 var err error
753
754
755 if len(first.Min) == 0 {
756 first.Min = minValidSize
757 }
758 if len(first.Max) == 0 {
759 first.Max = maxValidSize
760 }
761 if len(second.Min) == 0 {
762 second.Min = minValidSize
763 }
764 if len(second.Max) == 0 {
765 second.Max = maxValidSize
766 }
767
768 if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
769 return "", err
770 }
771 if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
772 return "", err
773 }
774 if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
775 return "", err
776 }
777 if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
778 return "", err
779 }
780
781 interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
782 intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
783
784
785 var intersectionMin resource.Quantity
786
787 if intersectionEnd-interSectionStart >= 0 {
788 intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI")
789
790 return intersectionMin.String(), nil
791 }
792 return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
793 }
794
View as plain text