1
16
17
39
40 package volume
41
42 import (
43 "context"
44 "crypto/sha256"
45 "fmt"
46 "path/filepath"
47 "strconv"
48 "strings"
49 "time"
50
51 v1 "k8s.io/api/core/v1"
52 apierrors "k8s.io/apimachinery/pkg/api/errors"
53 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
54 "k8s.io/apimachinery/pkg/util/wait"
55 clientset "k8s.io/client-go/kubernetes"
56 clientexec "k8s.io/client-go/util/exec"
57 "k8s.io/kubernetes/test/e2e/framework"
58 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
59 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
60 e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
61 imageutils "k8s.io/kubernetes/test/utils/image"
62 admissionapi "k8s.io/pod-security-admission/api"
63 uexec "k8s.io/utils/exec"
64
65 "github.com/onsi/ginkgo/v2"
66 "github.com/onsi/gomega"
67 )
68
69 const (
70
71 Kb int64 = 1000
72
73 Mb int64 = 1000 * Kb
74
75 Gb int64 = 1000 * Mb
76
77 Tb int64 = 1000 * Gb
78
79 KiB int64 = 1024
80
81 MiB int64 = 1024 * KiB
82
83 GiB int64 = 1024 * MiB
84
85 TiB int64 = 1024 * GiB
86
87
88 VolumeServerPodStartupTimeout = 3 * time.Minute
89
90
91
92 PodCleanupTimeout = 20 * time.Second
93 )
94
95
96
97
98
99
100 type SizeRange struct {
101
102
103
104 Max string
105
106
107
108 Min string
109 }
110
111
112
113
114 type TestConfig struct {
115 Namespace string
116
117 Prefix string
118
119 ServerImage string
120
121 ServerPorts []int
122
123 ServerCmds []string
124
125 ServerArgs []string
126
127
128
129 ServerVolumes map[string]string
130
131 ServerReadyMessage string
132
133 ServerHostNetwork bool
134
135
136 WaitForCompletion bool
137
138 ClientNodeSelection e2epod.NodeSelection
139 }
140
141
142
143 type Test struct {
144 Volume v1.VolumeSource
145 Mode v1.PersistentVolumeMode
146
147 File string
148 ExpectedContent string
149 }
150
151
152 func NewNFSServer(ctx context.Context, cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
153 return NewNFSServerWithNodeName(ctx, cs, namespace, args, "")
154 }
155
156 func NewNFSServerWithNodeName(ctx context.Context, cs clientset.Interface, namespace string, args []string, nodeName string) (config TestConfig, pod *v1.Pod, host string) {
157 config = TestConfig{
158 Namespace: namespace,
159 Prefix: "nfs",
160 ServerImage: imageutils.GetE2EImage(imageutils.VolumeNFSServer),
161 ServerPorts: []int{2049},
162 ServerVolumes: map[string]string{"": "/exports"},
163 ServerReadyMessage: "NFS started",
164 }
165 if nodeName != "" {
166 config.ClientNodeSelection = e2epod.NodeSelection{Name: nodeName}
167 }
168
169 if len(args) > 0 {
170 config.ServerArgs = args
171 }
172 pod, host = CreateStorageServer(ctx, cs, config)
173 if strings.Contains(host, ":") {
174 host = "[" + host + "]"
175 }
176 return config, pod, host
177 }
178
179
180
181
182 func CreateStorageServer(ctx context.Context, cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
183 pod = startVolumeServer(ctx, cs, config)
184 gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
185 ip = pod.Status.PodIP
186 gomega.Expect(ip).NotTo(gomega.BeEmpty(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
187 framework.Logf("%s server pod IP address: %s", config.Prefix, ip)
188 return pod, ip
189 }
190
191
192
193 func GetVolumeAttachmentName(ctx context.Context, cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
194 var nodeName string
195
196
197
198
199
200 if config.ClientNodeSelection.Name == "" {
201 claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
202 pvName := claim.Spec.VolumeName
203 volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{})
204 for _, volumeAttachment := range volumeAttachments.Items {
205 if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
206 nodeName = volumeAttachment.Spec.NodeName
207 break
208 }
209 }
210 } else {
211 nodeName = config.ClientNodeSelection.Name
212 }
213 handle := getVolumeHandle(ctx, cs, claimName, claimNamespace)
214 attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
215 return fmt.Sprintf("csi-%x", attachmentHash)
216 }
217
218
219 func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName string, claimNamespace string) string {
220
221 claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
222 if err != nil {
223 framework.ExpectNoError(err, "Cannot get PVC")
224 return ""
225 }
226 pvName := claim.Spec.VolumeName
227 pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
228 if err != nil {
229 framework.ExpectNoError(err, "Cannot get PV")
230 return ""
231 }
232 if pv.Spec.CSI == nil {
233 gomega.Expect(pv.Spec.CSI).NotTo(gomega.BeNil())
234 return ""
235 }
236 return pv.Spec.CSI.VolumeHandle
237 }
238
239
240 func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
241 waitErr := wait.PollUntilContextTimeout(ctx, 10*time.Second, timeout, true, func(ctx context.Context) (bool, error) {
242 _, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
243 if err != nil {
244
245 if apierrors.IsNotFound(err) {
246 return true, nil
247 }
248 return false, err
249 }
250 return false, nil
251 })
252 if waitErr != nil {
253 return fmt.Errorf("error waiting volume attachment %v to terminate: %v", attachmentName, waitErr)
254 }
255 return nil
256 }
257
258
259
260
261 func startVolumeServer(ctx context.Context, client clientset.Interface, config TestConfig) *v1.Pod {
262 podClient := client.CoreV1().Pods(config.Namespace)
263
264 portCount := len(config.ServerPorts)
265 serverPodPorts := make([]v1.ContainerPort, portCount)
266
267 for i := 0; i < portCount; i++ {
268 portName := fmt.Sprintf("%s-%d", config.Prefix, i)
269
270 serverPodPorts[i] = v1.ContainerPort{
271 Name: portName,
272 ContainerPort: int32(config.ServerPorts[i]),
273 Protocol: v1.ProtocolTCP,
274 }
275 }
276
277 volumeCount := len(config.ServerVolumes)
278 volumes := make([]v1.Volume, volumeCount)
279 mounts := make([]v1.VolumeMount, volumeCount)
280
281 i := 0
282 for src, dst := range config.ServerVolumes {
283 mountName := fmt.Sprintf("path%d", i)
284 volumes[i].Name = mountName
285 if src == "" {
286 volumes[i].VolumeSource.EmptyDir = &v1.EmptyDirVolumeSource{}
287 } else {
288 volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
289 Path: src,
290 }
291 }
292
293 mounts[i].Name = mountName
294 mounts[i].ReadOnly = false
295 mounts[i].MountPath = dst
296
297 i++
298 }
299
300 serverPodName := fmt.Sprintf("%s-server", config.Prefix)
301 ginkgo.By(fmt.Sprint("creating ", serverPodName, " pod"))
302 privileged := new(bool)
303 *privileged = true
304
305 restartPolicy := v1.RestartPolicyAlways
306 if config.WaitForCompletion {
307 restartPolicy = v1.RestartPolicyNever
308 }
309 serverPod := &v1.Pod{
310 TypeMeta: metav1.TypeMeta{
311 Kind: "Pod",
312 APIVersion: "v1",
313 },
314 ObjectMeta: metav1.ObjectMeta{
315 Name: serverPodName,
316 Labels: map[string]string{
317 "role": serverPodName,
318 },
319 },
320
321 Spec: v1.PodSpec{
322 HostNetwork: config.ServerHostNetwork,
323 Containers: []v1.Container{
324 {
325 Name: serverPodName,
326 Image: config.ServerImage,
327 SecurityContext: &v1.SecurityContext{
328 Privileged: privileged,
329 },
330 Command: config.ServerCmds,
331 Args: config.ServerArgs,
332 Ports: serverPodPorts,
333 VolumeMounts: mounts,
334 },
335 },
336 Volumes: volumes,
337 RestartPolicy: restartPolicy,
338 },
339 }
340
341 if config.ClientNodeSelection.Name != "" {
342 serverPod.Spec.NodeName = config.ClientNodeSelection.Name
343 }
344
345 var pod *v1.Pod
346 serverPod, err := podClient.Create(ctx, serverPod, metav1.CreateOptions{})
347
348 if err != nil {
349 if apierrors.IsAlreadyExists(err) {
350 framework.Logf("Ignore \"already-exists\" error, re-get pod...")
351 ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
352 serverPod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
353 framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
354 pod = serverPod
355 } else {
356 framework.ExpectNoError(err, "Failed to create %q pod: %v", serverPodName, err)
357 }
358 }
359 if config.WaitForCompletion {
360 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, client, serverPod.Name, serverPod.Namespace))
361 framework.ExpectNoError(podClient.Delete(ctx, serverPod.Name, metav1.DeleteOptions{}))
362 } else {
363 framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, client, serverPod))
364 if pod == nil {
365 ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
366 pod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
367 framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
368 }
369 }
370 if config.ServerReadyMessage != "" {
371 _, err := e2epodoutput.LookForStringInLogWithoutKubectl(ctx, client, pod.Namespace, pod.Name, serverPodName, config.ServerReadyMessage, VolumeServerPodStartupTimeout)
372 framework.ExpectNoError(err, "Failed to find %q in pod logs: %s", config.ServerReadyMessage, err)
373 }
374 return pod
375 }
376
377
378 func TestServerCleanup(ctx context.Context, f *framework.Framework, config TestConfig) {
379 ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
380 defer ginkgo.GinkgoRecover()
381
382 if config.ServerImage == "" {
383 return
384 }
385
386 err := e2epod.DeletePodWithWaitByName(ctx, f.ClientSet, config.Prefix+"-server", config.Namespace)
387 framework.ExpectNoError(err, "delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
388 }
389
390 func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
391 ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
392 var gracePeriod int64 = 1
393 var command string
394
395
402 securityLevel := admissionapi.LevelBaseline
403 if privileged || config.Prefix == "hostpathsymlink" || config.Prefix == "hostpath" {
404 securityLevel = admissionapi.LevelPrivileged
405 }
406 command = "while true ; do sleep 2; done "
407 seLinuxOptions := &v1.SELinuxOptions{Level: "s0:c0,c1"}
408 clientPod := &v1.Pod{
409 TypeMeta: metav1.TypeMeta{
410 Kind: "Pod",
411 APIVersion: "v1",
412 },
413 ObjectMeta: metav1.ObjectMeta{
414 Name: config.Prefix + "-" + podSuffix,
415 Labels: map[string]string{
416 "role": config.Prefix + "-" + podSuffix,
417 },
418 },
419 Spec: v1.PodSpec{
420 Containers: []v1.Container{
421 {
422 Name: config.Prefix + "-" + podSuffix,
423 Image: e2epod.GetDefaultTestImage(),
424 WorkingDir: "/opt",
425
426
427
428 Command: e2epod.GenerateScriptCmd(command),
429 VolumeMounts: []v1.VolumeMount{},
430 },
431 },
432 TerminationGracePeriodSeconds: &gracePeriod,
433 SecurityContext: e2epod.GeneratePodSecurityContext(fsGroup, seLinuxOptions),
434 Volumes: []v1.Volume{},
435 },
436 }
437 e2epod.SetNodeSelection(&clientPod.Spec, config.ClientNodeSelection)
438
439 for i, test := range tests {
440 volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i)
441
442
443
444
445
446
447 if privileged && test.Mode == v1.PersistentVolumeBlock {
448 securityLevel = admissionapi.LevelBaseline
449 }
450 clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(securityLevel)
451
452 if test.Mode == v1.PersistentVolumeBlock {
453 clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
454 Name: volumeName,
455 DevicePath: fmt.Sprintf("/opt/%d", i),
456 })
457 } else {
458 clientPod.Spec.Containers[0].VolumeMounts = append(clientPod.Spec.Containers[0].VolumeMounts, v1.VolumeMount{
459 Name: volumeName,
460 MountPath: fmt.Sprintf("/opt/%d", i),
461 })
462 }
463 clientPod.Spec.Volumes = append(clientPod.Spec.Volumes, v1.Volume{
464 Name: volumeName,
465 VolumeSource: test.Volume,
466 })
467 }
468 podsNamespacer := client.CoreV1().Pods(config.Namespace)
469 clientPod, err := podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
470 if err != nil {
471 return nil, err
472 }
473 if slow {
474 err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
475 } else {
476 err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
477 }
478 if err != nil {
479 e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
480 _ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodDelete)
481 return nil, err
482 }
483 return clientPod, nil
484 }
485
486 func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string, fsGroup *int64, fsType string, tests []Test) {
487 ginkgo.By("Checking that text file contents are perfect.")
488 for i, test := range tests {
489 if test.Mode == v1.PersistentVolumeBlock {
490
491 deviceName := fmt.Sprintf("/opt/%d", i)
492 commands := GenerateReadBlockCmd(deviceName, len(test.ExpectedContent))
493 _, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
494 framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
495
496
497 CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
498 } else {
499
500 fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
501 commands := GenerateReadFileCmd(fileName)
502 _, err := e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, commands, test.ExpectedContent, time.Minute)
503 framework.ExpectNoError(err, "failed: finding the contents of the mounted file %s.", fileName)
504
505
506 dirName := filepath.Dir(fileName)
507 CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
508
509 if !framework.NodeOSDistroIs("windows") {
510
511 if fsGroup != nil {
512 ginkgo.By("Checking fsGroup is correct.")
513 _, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"ls", "-ld", dirName}, strconv.Itoa(int(*fsGroup)), time.Minute)
514 framework.ExpectNoError(err, "failed: getting the right privileges in the file %v", int(*fsGroup))
515 }
516
517
518 if fsType != "" {
519 ginkgo.By("Checking fsType is correct.")
520 _, err = e2epodoutput.LookForStringInPodExecToContainer(pod.Namespace, pod.Name, containerName, []string{"grep", " " + dirName + " ", "/proc/mounts"}, fsType, time.Minute)
521 framework.ExpectNoError(err, "failed: getting the right fsType %s", fsType)
522 }
523 }
524 }
525 }
526 }
527
528
529
530
531
532
533
534
535 func TestVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
536 testVolumeClient(ctx, f, config, fsGroup, fsType, tests, false)
537 }
538
539
540
541
542
543
544 func TestVolumeClientSlow(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
545 testVolumeClient(ctx, f, config, fsGroup, fsType, tests, true)
546 }
547
548 func testVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
549 timeouts := f.Timeouts
550 clientPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
551 if err != nil {
552 framework.Failf("Failed to create client pod: %v", err)
553 }
554 defer func() {
555
556
557 e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
558 framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Name, clientPod.Namespace, timeouts.PodDelete))
559 }()
560
561 testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
562
563 ginkgo.By("Repeating the test on an ephemeral container (if enabled)")
564 ec := &v1.EphemeralContainer{
565 EphemeralContainerCommon: v1.EphemeralContainerCommon(clientPod.Spec.Containers[0]),
566 }
567 ec.Resources = v1.ResourceRequirements{}
568 ec.Name = "volume-ephemeral-container"
569 err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
570
571 framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
572 testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
573 }
574
575
576
577
578 func InjectContent(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
579 privileged := true
580 timeouts := f.Timeouts
581 if framework.NodeOSDistroIs("windows") {
582 privileged = false
583 }
584 injectorPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false )
585 if err != nil {
586 framework.Failf("Failed to create injector pod: %v", err)
587 return
588 }
589 defer func() {
590
591
592 e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
593 framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Name, injectorPod.Namespace, timeouts.PodDelete))
594 }()
595
596 ginkgo.By("Writing text file contents in the container.")
597 for i, test := range tests {
598 commands := []string{"exec", injectorPod.Name, fmt.Sprintf("--namespace=%v", injectorPod.Namespace), "--"}
599 if test.Mode == v1.PersistentVolumeBlock {
600
601 deviceName := fmt.Sprintf("/opt/%d", i)
602 commands = append(commands, generateWriteBlockCmd(test.ExpectedContent, deviceName)...)
603
604 } else {
605
606 fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
607 commands = append(commands, generateWriteFileCmd(test.ExpectedContent, fileName)...)
608 }
609 out, err := e2ekubectl.RunKubectl(injectorPod.Namespace, commands...)
610 framework.ExpectNoError(err, "failed: writing the contents: %s", out)
611 }
612
613
614
615 testVolumeContent(f, injectorPod, "", fsGroup, fsType, tests)
616 }
617
618
619 func generateWriteCmd(content, path string) []string {
620 var commands []string
621 commands = []string{"/bin/sh", "-c", "echo '" + content + "' > " + path + "; sync"}
622 return commands
623 }
624
625
626 func GenerateReadBlockCmd(fullPath string, numberOfCharacters int) []string {
627 var commands []string
628 commands = []string{"head", "-c", strconv.Itoa(numberOfCharacters), fullPath}
629 return commands
630 }
631
632
633 func generateWriteBlockCmd(content, fullPath string) []string {
634 return generateWriteCmd(content, fullPath)
635 }
636
637
638 func GenerateReadFileCmd(fullPath string) []string {
639 var commands []string
640 commands = []string{"cat", fullPath}
641 return commands
642 }
643
644
645 func generateWriteFileCmd(content, fullPath string) []string {
646 return generateWriteCmd(content, fullPath)
647 }
648
649
650 func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
651 if volMode == v1.PersistentVolumeBlock {
652
653 VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
654
655
656 VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
657 } else {
658
659 VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
660
661
662 VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
663 }
664 }
665
666
667
668
669 func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
670 return e2epod.ExecCommandInContainerWithFullOutput(f, pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
671 }
672
673
674
675
676 func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
677 stdout, stderr, err := PodExec(f, pod, shExec)
678 if err != nil {
679 if exiterr, ok := err.(uexec.CodeExitError); ok {
680 exitCode := exiterr.ExitStatus()
681 framework.ExpectNoError(err,
682 "%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
683 shExec, exitCode, exiterr, stdout, stderr)
684 } else {
685 framework.ExpectNoError(err,
686 "%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
687 shExec, err, stdout, stderr)
688 }
689 }
690 }
691
692
693
694
695 func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
696 stdout, stderr, err := PodExec(f, pod, shExec)
697 if err != nil {
698 if exiterr, ok := err.(clientexec.ExitError); ok {
699 actualExitCode := exiterr.ExitStatus()
700 gomega.Expect(actualExitCode).To(gomega.Equal(exitCode),
701 "%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
702 shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
703 } else {
704 framework.ExpectNoError(err,
705 "%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
706 shExec, exitCode, err, stdout, stderr)
707 }
708 }
709 gomega.Expect(err).To(gomega.HaveOccurred(), "%q should fail with exit code %d, but exit without error", shExec, exitCode)
710 }
711
View as plain text