1
2
3
4
19
20 package storage
21
22 import (
23 "context"
24 "fmt"
25 "math/rand"
26 "strings"
27 "time"
28
29 "google.golang.org/api/googleapi"
30
31 "github.com/onsi/ginkgo/v2"
32 "github.com/onsi/gomega"
33 v1 "k8s.io/api/core/v1"
34 policyv1 "k8s.io/api/policy/v1"
35 "k8s.io/apimachinery/pkg/api/resource"
36 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
37 "k8s.io/apimachinery/pkg/types"
38 "k8s.io/apimachinery/pkg/util/uuid"
39 "k8s.io/apimachinery/pkg/util/wait"
40 clientset "k8s.io/client-go/kubernetes"
41 v1core "k8s.io/client-go/kubernetes/typed/core/v1"
42 "k8s.io/kubernetes/test/e2e/feature"
43 "k8s.io/kubernetes/test/e2e/framework"
44 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
45 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
46 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
47 "k8s.io/kubernetes/test/e2e/framework/providers/gce"
48 e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
49 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
50 "k8s.io/kubernetes/test/e2e/storage/utils"
51 imageutils "k8s.io/kubernetes/test/utils/image"
52 admissionapi "k8s.io/pod-security-admission/api"
53 )
54
55 const (
56 gcePDDetachTimeout = 10 * time.Minute
57 gcePDDetachPollTime = 10 * time.Second
58 nodeStatusTimeout = 10 * time.Minute
59 nodeStatusPollTime = 1 * time.Second
60 podEvictTimeout = 2 * time.Minute
61 )
62
63 var _ = utils.SIGDescribe("Pod Disks", feature.StorageProvider, func() {
64 var (
65 ns string
66 cs clientset.Interface
67 podClient v1core.PodInterface
68 nodeClient v1core.NodeInterface
69 host0Name types.NodeName
70 host1Name types.NodeName
71 nodes *v1.NodeList
72 )
73 f := framework.NewDefaultFramework("pod-disks")
74 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
75
76 ginkgo.BeforeEach(func(ctx context.Context) {
77 e2eskipper.SkipUnlessNodeCountIsAtLeast(minNodes)
78 cs = f.ClientSet
79 ns = f.Namespace.Name
80
81 e2eskipper.SkipIfMultizone(ctx, cs)
82
83 podClient = cs.CoreV1().Pods(ns)
84 nodeClient = cs.CoreV1().Nodes()
85 var err error
86 nodes, err = e2enode.GetReadySchedulableNodes(ctx, cs)
87 framework.ExpectNoError(err)
88 if len(nodes.Items) < minNodes {
89 e2eskipper.Skipf("The test requires %d schedulable nodes, got only %d", minNodes, len(nodes.Items))
90 }
91 host0Name = types.NodeName(nodes.Items[0].ObjectMeta.Name)
92 host1Name = types.NodeName(nodes.Items[1].ObjectMeta.Name)
93 })
94
95 f.Context("schedule pods each with a PD, delete pod and verify detach", f.WithSlow(), func() {
96 const (
97 podDefaultGrace = "default (30s)"
98 podImmediateGrace = "immediate (0s)"
99 )
100 var readOnlyMap = map[bool]string{
101 true: "read-only",
102 false: "RW",
103 }
104 type testT struct {
105 descr string
106 readOnly bool
107 deleteOpt metav1.DeleteOptions
108 }
109 tests := []testT{
110 {
111 descr: podImmediateGrace,
112 readOnly: false,
113 deleteOpt: *metav1.NewDeleteOptions(0),
114 },
115 {
116 descr: podDefaultGrace,
117 readOnly: false,
118 deleteOpt: metav1.DeleteOptions{},
119 },
120 {
121 descr: podImmediateGrace,
122 readOnly: true,
123 deleteOpt: *metav1.NewDeleteOptions(0),
124 },
125 {
126 descr: podDefaultGrace,
127 readOnly: true,
128 deleteOpt: metav1.DeleteOptions{},
129 },
130 }
131
132 for _, t := range tests {
133 podDelOpt := t.deleteOpt
134 readOnly := t.readOnly
135 readOnlyTxt := readOnlyMap[readOnly]
136
137 ginkgo.It(fmt.Sprintf("for %s PD with pod delete grace period of %q", readOnlyTxt, t.descr), func(ctx context.Context) {
138 e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
139 if readOnly {
140 e2eskipper.SkipIfProviderIs("aws")
141 }
142
143 ginkgo.By("creating PD")
144 diskName, err := e2epv.CreatePDWithRetry(ctx)
145 framework.ExpectNoError(err, "Error creating PD")
146
147 var fmtPod *v1.Pod
148 if readOnly {
149
150 ginkgo.By("creating RW fmt Pod to ensure PD is formatted")
151 fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
152 _, err = podClient.Create(ctx, fmtPod, metav1.CreateOptions{})
153 framework.ExpectNoError(err, "Failed to create fmtPod")
154 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
155
156 ginkgo.By("deleting the fmtPod")
157 framework.ExpectNoError(podClient.Delete(ctx, fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
158 framework.Logf("deleted fmtPod %q", fmtPod.Name)
159 ginkgo.By("waiting for PD to detach")
160 framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
161 }
162
163
164 host0Pod := testPDPod([]string{diskName}, host0Name, readOnly, 1)
165 host1Pod := testPDPod([]string{diskName}, host1Name, readOnly, 1)
166
167 defer func() {
168
169 ginkgo.By("defer: cleaning up PD-RW test environment")
170 framework.Logf("defer cleanup errors can usually be ignored")
171 if fmtPod != nil {
172 podClient.Delete(ctx, fmtPod.Name, podDelOpt)
173 }
174 podClient.Delete(ctx, host0Pod.Name, podDelOpt)
175 podClient.Delete(ctx, host1Pod.Name, podDelOpt)
176 detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name, host1Name})
177 }()
178
179 ginkgo.By("creating host0Pod on node0")
180 _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{})
181 framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
182 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
183 framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
184
185 var containerName, testFile, testFileContents string
186 if !readOnly {
187 ginkgo.By("writing content to host0Pod on node0")
188 containerName = "mycontainer"
189 testFile = "/testpd1/tracker"
190 testFileContents = fmt.Sprintf("%v", rand.Int())
191 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
192 framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
193 framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
194 ginkgo.By("verifying PD is present in node0's VolumeInUse list")
195 framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, true ))
196 ginkgo.By("deleting host0Pod")
197 framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
198 framework.Logf("deleted host0Pod %q", host0Pod.Name)
199 e2epod.WaitForPodNotFoundInNamespace(ctx, cs, host0Pod.Name, host0Pod.Namespace, f.Timeouts.PodDelete)
200 framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name)
201 }
202
203 ginkgo.By("creating host1Pod on node1")
204 _, err = podClient.Create(ctx, host1Pod, metav1.CreateOptions{})
205 framework.ExpectNoError(err, "Failed to create host1Pod")
206 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
207 framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
208
209 if readOnly {
210 ginkgo.By("deleting host0Pod")
211 framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
212 framework.Logf("deleted host0Pod %q", host0Pod.Name)
213 } else {
214 ginkgo.By("verifying PD contents in host1Pod")
215 verifyPDContentsViaContainer(ns, f, host1Pod.Name, containerName, map[string]string{testFile: testFileContents})
216 framework.Logf("verified PD contents in pod %q", host1Pod.Name)
217 ginkgo.By("verifying PD is removed from node0")
218 framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, false ))
219 framework.Logf("PD %q removed from node %q's VolumeInUse list", diskName, host1Pod.Name)
220 }
221
222 ginkgo.By("deleting host1Pod")
223 framework.ExpectNoError(podClient.Delete(ctx, host1Pod.Name, podDelOpt), "Failed to delete host1Pod")
224 framework.Logf("deleted host1Pod %q", host1Pod.Name)
225
226 ginkgo.By("Test completed successfully, waiting for PD to detach from both nodes")
227 waitForPDDetach(diskName, host0Name)
228 waitForPDDetach(diskName, host1Name)
229 })
230 }
231 })
232
233 f.Context("schedule a pod w/ RW PD(s) mounted to 1 or more containers, write to PD, verify content, delete pod, and repeat in rapid succession", f.WithSlow(), func() {
234 type testT struct {
235 numContainers int
236 numPDs int
237 repeatCnt int
238 }
239 tests := []testT{
240 {
241 numContainers: 4,
242 numPDs: 1,
243 repeatCnt: 3,
244 },
245 {
246 numContainers: 1,
247 numPDs: 2,
248 repeatCnt: 3,
249 },
250 }
251
252 for _, t := range tests {
253 numPDs := t.numPDs
254 numContainers := t.numContainers
255 t := t
256
257 ginkgo.It(fmt.Sprintf("using %d containers and %d PDs", numContainers, numPDs), func(ctx context.Context) {
258 e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
259 var host0Pod *v1.Pod
260 var err error
261 fileAndContentToVerify := make(map[string]string)
262 diskNames := make([]string, 0, numPDs)
263
264 ginkgo.By(fmt.Sprintf("creating %d PD(s)", numPDs))
265 for i := 0; i < numPDs; i++ {
266 name, err := e2epv.CreatePDWithRetry(ctx)
267 framework.ExpectNoError(err, fmt.Sprintf("Error creating PD %d", i))
268 diskNames = append(diskNames, name)
269 }
270
271 defer func() {
272
273 ginkgo.By("defer: cleaning up PD-RW test environment")
274 framework.Logf("defer cleanup errors can usually be ignored")
275 if host0Pod != nil {
276 podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0))
277 }
278 for _, diskName := range diskNames {
279 detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name})
280 }
281 }()
282
283 for i := 0; i < t.repeatCnt; i++ {
284 framework.Logf("PD Read/Writer Iteration #%v", i)
285 ginkgo.By(fmt.Sprintf("creating host0Pod with %d containers on node0", numContainers))
286 host0Pod = testPDPod(diskNames, host0Name, false , numContainers)
287 _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{})
288 framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
289 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
290
291 ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
292 containerName := "mycontainer"
293 if numContainers > 1 {
294 containerName = fmt.Sprintf("mycontainer%v", rand.Intn(numContainers)+1)
295 }
296 for x := 1; x <= numPDs; x++ {
297 testFile := fmt.Sprintf("/testpd%d/tracker%d", x, i)
298 testFileContents := fmt.Sprintf("%v", rand.Int())
299 fileAndContentToVerify[testFile] = testFileContents
300 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
301 framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
302 framework.Logf("wrote %q to file %q in pod %q (container %q) on node %q", testFileContents, testFile, host0Pod.Name, containerName, host0Name)
303 }
304
305 ginkgo.By("verifying PD contents via a container")
306 if numContainers > 1 {
307 containerName = fmt.Sprintf("mycontainer%v", rand.Intn(numContainers)+1)
308 }
309 verifyPDContentsViaContainer(ns, f, host0Pod.Name, containerName, fileAndContentToVerify)
310
311 ginkgo.By("deleting host0Pod")
312 framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete host0Pod")
313 }
314 ginkgo.By(fmt.Sprintf("Test completed successfully, waiting for %d PD(s) to detach from node0", numPDs))
315 for _, diskName := range diskNames {
316 waitForPDDetach(diskName, host0Name)
317 }
318 })
319 }
320 })
321
322 f.Context("detach in a disrupted environment", f.WithSlow(), f.WithDisruptive(), func() {
323 const (
324 deleteNode = 1
325 deleteNodeObj = 2
326 evictPod = 3
327 )
328 type testT struct {
329 descr string
330 disruptOp int
331 }
332 tests := []testT{
333
334
335
336
337
338
339 {
340 descr: "node's API object is deleted",
341 disruptOp: deleteNodeObj,
342 },
343 {
344 descr: "pod is evicted",
345 disruptOp: evictPod,
346 },
347 }
348
349 for _, t := range tests {
350 disruptOp := t.disruptOp
351 ginkgo.It(fmt.Sprintf("when %s", t.descr), func(ctx context.Context) {
352 e2eskipper.SkipUnlessProviderIs("gce")
353 origNodeCnt := len(nodes.Items)
354
355 ginkgo.By("creating a pd")
356 diskName, err := e2epv.CreatePDWithRetry(ctx)
357 framework.ExpectNoError(err, "Error creating a pd")
358
359 targetNode := &nodes.Items[0]
360 host0Pod := testPDPod([]string{diskName}, host0Name, false, 1)
361 containerName := "mycontainer"
362
363 ginkgo.DeferCleanup(func(ctx context.Context) {
364 ginkgo.By("defer: cleaning up PD-RW test env")
365 framework.Logf("defer cleanup errors can usually be ignored")
366 ginkgo.By("defer: delete host0Pod")
367 podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0))
368 ginkgo.By("defer: detach and delete PDs")
369 detachAndDeletePDs(ctx, diskName, []types.NodeName{host0Name})
370 if disruptOp == deleteNode || disruptOp == deleteNodeObj {
371 if disruptOp == deleteNodeObj {
372 targetNode.ObjectMeta.SetResourceVersion("0")
373
374 ginkgo.By("defer: re-create host0 node object")
375 _, err := nodeClient.Create(ctx, targetNode, metav1.CreateOptions{})
376 framework.ExpectNoError(err, fmt.Sprintf("defer: Unable to re-create the deleted node object %q", targetNode.Name))
377 }
378 ginkgo.By("defer: verify the number of ready nodes")
379 numNodes := countReadyNodes(ctx, cs, host0Name)
380
381
382 if numNodes != origNodeCnt {
383 framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt)
384 }
385 }
386 })
387
388 ginkgo.By("creating host0Pod on node0")
389 _, err = podClient.Create(ctx, host0Pod, metav1.CreateOptions{})
390 framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
391 ginkgo.By("waiting for host0Pod to be running")
392 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
393
394 ginkgo.By("writing content to host0Pod")
395 testFile := "/testpd1/tracker"
396 testFileContents := fmt.Sprintf("%v", rand.Int())
397 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
398 framework.ExpectNoError(tk.WriteFileViaContainer(host0Pod.Name, containerName, testFile, testFileContents))
399 framework.Logf("wrote %q to file %q in pod %q on node %q", testFileContents, testFile, host0Pod.Name, host0Name)
400
401 ginkgo.By("verifying PD is present in node0's VolumeInUse list")
402 framework.ExpectNoError(waitForPDInVolumesInUse(ctx, nodeClient, diskName, host0Name, nodeStatusTimeout, true ))
403
404 if disruptOp == deleteNode {
405 ginkgo.By("getting gce instances")
406 gceCloud, err := gce.GetGCECloud()
407 framework.ExpectNoError(err, fmt.Sprintf("Unable to create gcloud client err=%v", err))
408 output, err := gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
409 framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
410 gomega.Expect(string(output)).Should(gomega.ContainSubstring(string(host0Name)))
411
412 ginkgo.By("deleting host0")
413 err = gceCloud.DeleteInstance(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone, string(host0Name))
414 framework.ExpectNoError(err, fmt.Sprintf("Failed to delete host0Pod: err=%v", err))
415 ginkgo.By("expecting host0 node to be re-created")
416 numNodes := countReadyNodes(ctx, cs, host0Name)
417 gomega.Expect(numNodes).To(gomega.Equal(origNodeCnt), fmt.Sprintf("Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt))
418 output, err = gceCloud.ListInstanceNames(framework.TestContext.CloudConfig.ProjectID, framework.TestContext.CloudConfig.Zone)
419 framework.ExpectNoError(err, fmt.Sprintf("Unable to get list of node instances err=%v output=%s", err, output))
420 gomega.Expect(string(output)).Should(gomega.ContainSubstring(string(host0Name)))
421
422 } else if disruptOp == deleteNodeObj {
423 ginkgo.By("deleting host0's node api object")
424 framework.ExpectNoError(nodeClient.Delete(ctx, string(host0Name), *metav1.NewDeleteOptions(0)), "Unable to delete host0's node object")
425 ginkgo.By("deleting host0Pod")
426 framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, *metav1.NewDeleteOptions(0)), "Unable to delete host0Pod")
427
428 } else if disruptOp == evictPod {
429 evictTarget := &policyv1.Eviction{
430 ObjectMeta: metav1.ObjectMeta{
431 Name: host0Pod.Name,
432 Namespace: ns,
433 },
434 }
435 ginkgo.By("evicting host0Pod")
436 err = wait.PollImmediate(framework.Poll, podEvictTimeout, func() (bool, error) {
437 if err := cs.CoreV1().Pods(ns).EvictV1(ctx, evictTarget); err != nil {
438 framework.Logf("Failed to evict host0Pod, ignoring error: %v", err)
439 return false, nil
440 }
441 return true, nil
442 })
443 framework.ExpectNoError(err, "failed to evict host0Pod after %v", podEvictTimeout)
444 }
445
446 ginkgo.By("waiting for pd to detach from host0")
447 waitForPDDetach(diskName, host0Name)
448 })
449 }
450 })
451
452 ginkgo.It("should be able to delete a non-existent PD without error", func(ctx context.Context) {
453 e2eskipper.SkipUnlessProviderIs("gce")
454
455 ginkgo.By("delete a PD")
456 framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, "non-exist"))
457 })
458
459
460
461 f.It(f.WithSerial(), "attach on previously attached volumes should work", func(ctx context.Context) {
462 e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
463 ginkgo.By("creating PD")
464 diskName, err := e2epv.CreatePDWithRetry(ctx)
465 framework.ExpectNoError(err, "Error creating PD")
466
467
468
469 ginkgo.DeferCleanup(detachAndDeletePDs, diskName, []types.NodeName{host0Name})
470
471 ginkgo.By("Attaching volume to a node")
472 err = attachPD(host0Name, diskName)
473 framework.ExpectNoError(err, "Error attaching PD")
474
475 pod := testPDPod([]string{diskName}, host0Name , false, 1)
476 ginkgo.By("Creating test pod with same volume")
477 _, err = podClient.Create(ctx, pod, metav1.CreateOptions{})
478 framework.ExpectNoError(err, "Failed to create pod")
479 framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
480
481 ginkgo.By("deleting the pod")
482 framework.ExpectNoError(podClient.Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod")
483 framework.Logf("deleted pod %q", pod.Name)
484 ginkgo.By("waiting for PD to detach")
485 framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
486 })
487 })
488
489 func countReadyNodes(ctx context.Context, c clientset.Interface, hostName types.NodeName) int {
490 e2enode.WaitForNodeToBeReady(ctx, c, string(hostName), nodeStatusTimeout)
491 e2enode.WaitForAllNodesSchedulable(ctx, c, nodeStatusTimeout)
492 nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
493 framework.ExpectNoError(err)
494 return len(nodes.Items)
495 }
496
497 func verifyPDContentsViaContainer(namespace string, f *framework.Framework, podName, containerName string, fileAndContentToVerify map[string]string) {
498 for filePath, expectedContents := range fileAndContentToVerify {
499
500 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, namespace)
501 v, err := tk.ReadFileViaContainer(podName, containerName, filePath)
502 framework.ExpectNoError(err, "Error reading file %s via container %s", filePath, containerName)
503 framework.Logf("Read file %q with content: %v", filePath, v)
504 if strings.TrimSpace(v) != strings.TrimSpace(expectedContents) {
505 framework.Failf("Read content <%q> does not match execpted content <%q>.", v, expectedContents)
506 }
507 }
508 }
509
510
511 func detachPD(nodeName types.NodeName, pdName string) error {
512 if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
513 gceCloud, err := gce.GetGCECloud()
514 if err != nil {
515 return err
516 }
517 err = gceCloud.DetachDisk(pdName, nodeName)
518 if err != nil {
519 if gerr, ok := err.(*googleapi.Error); ok && strings.Contains(gerr.Message, "Invalid value for field 'disk'") {
520
521 return nil
522 }
523 framework.Logf("Error detaching PD %q: %v", pdName, err)
524 }
525 return err
526
527 } else {
528 return fmt.Errorf("Provider does not support volume detaching")
529 }
530 }
531
532
533 func attachPD(nodeName types.NodeName, pdName string) error {
534 if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
535 gceCloud, err := gce.GetGCECloud()
536 if err != nil {
537 return err
538 }
539 err = gceCloud.AttachDisk(pdName, nodeName, false , false )
540 if err != nil {
541 framework.Logf("Error attaching PD %q: %v", pdName, err)
542 }
543 return err
544
545 } else {
546 return fmt.Errorf("Provider does not support volume attaching")
547 }
548 }
549
550
551
552
553
554
555
556 func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *v1.Pod {
557
558 if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" ||
559 framework.TestContext.Provider == "aws") {
560 framework.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider))
561 }
562
563 containers := make([]v1.Container, numContainers)
564 for i := range containers {
565 containers[i].Name = "mycontainer"
566 if numContainers > 1 {
567 containers[i].Name = fmt.Sprintf("mycontainer%v", i+1)
568 }
569 containers[i].Image = e2epod.GetTestImage(imageutils.BusyBox)
570 containers[i].Command = []string{"sleep", "6000"}
571 containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
572 for k := range diskNames {
573 containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1)
574 containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1)
575 }
576 containers[i].Resources.Limits = v1.ResourceList{}
577 containers[i].Resources.Limits[v1.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)
578 }
579
580 pod := &v1.Pod{
581 TypeMeta: metav1.TypeMeta{
582 Kind: "Pod",
583 APIVersion: "v1",
584 },
585 ObjectMeta: metav1.ObjectMeta{
586 Name: "pd-test-" + string(uuid.NewUUID()),
587 },
588 Spec: v1.PodSpec{
589 Containers: containers,
590 NodeName: string(targetNode),
591 },
592 }
593
594 pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
595 for k, diskName := range diskNames {
596 pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
597 if framework.TestContext.Provider == "aws" {
598 pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
599 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
600 VolumeID: diskName,
601 FSType: "ext4",
602 ReadOnly: readOnly,
603 },
604 }
605 } else {
606 pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
607 GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
608 PDName: diskName,
609 FSType: e2epv.GetDefaultFSType(),
610 ReadOnly: readOnly,
611 },
612 }
613 }
614 }
615 return pod
616 }
617
618
619 func waitForPDDetach(diskName string, nodeName types.NodeName) error {
620 if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
621 framework.Logf("Waiting for GCE PD %q to detach from node %q.", diskName, nodeName)
622 gceCloud, err := gce.GetGCECloud()
623 if err != nil {
624 return err
625 }
626 for start := time.Now(); time.Since(start) < gcePDDetachTimeout; time.Sleep(gcePDDetachPollTime) {
627 diskAttached, err := gceCloud.DiskIsAttached(diskName, nodeName)
628 if err != nil {
629 framework.Logf("Error waiting for PD %q to detach from node %q. 'DiskIsAttached(...)' failed with %v", diskName, nodeName, err)
630 return err
631 }
632 if !diskAttached {
633
634 framework.Logf("GCE PD %q appears to have successfully detached from %q.", diskName, nodeName)
635 return nil
636 }
637 framework.Logf("Waiting for GCE PD %q to detach from %q.", diskName, nodeName)
638 }
639 return fmt.Errorf("Gave up waiting for GCE PD %q to detach from %q after %v", diskName, nodeName, gcePDDetachTimeout)
640 }
641 return nil
642 }
643
644 func detachAndDeletePDs(ctx context.Context, diskName string, hosts []types.NodeName) {
645 for _, host := range hosts {
646 framework.Logf("Detaching GCE PD %q from node %q.", diskName, host)
647 detachPD(host, diskName)
648 ginkgo.By(fmt.Sprintf("Waiting for PD %q to detach from %q", diskName, host))
649 waitForPDDetach(diskName, host)
650 }
651 ginkgo.By(fmt.Sprintf("Deleting PD %q", diskName))
652 framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName))
653 }
654
655 func waitForPDInVolumesInUse(
656 ctx context.Context,
657 nodeClient v1core.NodeInterface,
658 diskName string,
659 nodeName types.NodeName,
660 timeout time.Duration,
661 shouldExist bool) error {
662 logStr := "to contain"
663 if !shouldExist {
664 logStr = "to NOT contain"
665 }
666 framework.Logf("Waiting for node %s's VolumesInUse Status %s PD %q", nodeName, logStr, diskName)
667 for start := time.Now(); time.Since(start) < timeout; time.Sleep(nodeStatusPollTime) {
668 nodeObj, err := nodeClient.Get(ctx, string(nodeName), metav1.GetOptions{})
669 if err != nil || nodeObj == nil {
670 framework.Logf("Failed to fetch node object %q from API server. err=%v", nodeName, err)
671 continue
672 }
673 exists := false
674 for _, volumeInUse := range nodeObj.Status.VolumesInUse {
675 volumeInUseStr := string(volumeInUse)
676 if strings.Contains(volumeInUseStr, diskName) {
677 if shouldExist {
678 framework.Logf("Found PD %q in node %q's VolumesInUse Status: %q", diskName, nodeName, volumeInUseStr)
679 return nil
680 }
681 exists = true
682 }
683 }
684 if !shouldExist && !exists {
685 framework.Logf("Verified PD %q does not exist in node %q's VolumesInUse Status.", diskName, nodeName)
686 return nil
687 }
688 }
689 return fmt.Errorf("Timed out waiting for node %s VolumesInUse Status %s diskName %q", nodeName, logStr, diskName)
690 }
691
View as plain text