1
16
17 package testsuites
18
19 import (
20 "context"
21 "fmt"
22 "strconv"
23 "strings"
24 "sync"
25 "time"
26
27 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
28
29 "github.com/onsi/ginkgo/v2"
30 "github.com/onsi/gomega"
31
32 appsv1 "k8s.io/api/apps/v1"
33 v1 "k8s.io/api/core/v1"
34 storagev1 "k8s.io/api/storage/v1"
35 apierrors "k8s.io/apimachinery/pkg/api/errors"
36 "k8s.io/apimachinery/pkg/api/resource"
37 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
38 "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
39 "k8s.io/apimachinery/pkg/runtime/schema"
40 "k8s.io/client-go/dynamic"
41 clientset "k8s.io/client-go/kubernetes"
42 "k8s.io/kubernetes/test/e2e/feature"
43 "k8s.io/kubernetes/test/e2e/framework"
44 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
45 e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
46 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
47 e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
48 storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
49 storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
50 admissionapi "k8s.io/pod-security-admission/api"
51 )
52
53
54
55 type StorageClassTest struct {
56 Client clientset.Interface
57 Timeouts *framework.TimeoutContext
58 Claim *v1.PersistentVolumeClaim
59 SourceClaim *v1.PersistentVolumeClaim
60 Class *storagev1.StorageClass
61 Name string
62 CloudProviders []string
63 Provisioner string
64 Parameters map[string]string
65 DelayBinding bool
66 ClaimSize string
67 ExpectedSize string
68 PvCheck func(ctx context.Context, claim *v1.PersistentVolumeClaim)
69 VolumeMode v1.PersistentVolumeMode
70 AllowVolumeExpansion bool
71 NodeSelection e2epod.NodeSelection
72 MountOptions []string
73 }
74
75 type provisioningTestSuite struct {
76 tsInfo storageframework.TestSuiteInfo
77 }
78
79
80
81 func InitCustomProvisioningTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
82 return &provisioningTestSuite{
83 tsInfo: storageframework.TestSuiteInfo{
84 Name: "provisioning",
85 TestPatterns: patterns,
86 SupportedSizeRange: e2evolume.SizeRange{
87 Min: "1Mi",
88 },
89 },
90 }
91 }
92
93
94
95 func InitProvisioningTestSuite() storageframework.TestSuite {
96 patterns := []storageframework.TestPattern{
97 storageframework.DefaultFsDynamicPV,
98 storageframework.BlockVolModeDynamicPV,
99 storageframework.NtfsDynamicPV,
100 }
101 return InitCustomProvisioningTestSuite(patterns)
102 }
103
104 func (p *provisioningTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
105 return p.tsInfo
106 }
107
108 func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
109
110 if pattern.VolType != storageframework.DynamicPV {
111 e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType)
112 }
113 dInfo := driver.GetDriverInfo()
114 if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
115 e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
116 }
117 }
118
119 func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
120 type local struct {
121 config *storageframework.PerTestConfig
122
123 testCase *StorageClassTest
124 cs clientset.Interface
125 pvc *v1.PersistentVolumeClaim
126 sourcePVC *v1.PersistentVolumeClaim
127 sc *storagev1.StorageClass
128
129 migrationCheck *migrationOpCheck
130 }
131 var (
132 dInfo = driver.GetDriverInfo()
133 dDriver storageframework.DynamicPVTestDriver
134 l local
135 )
136
137
138
139 f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
140 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
141
142 init := func(ctx context.Context) {
143 l = local{}
144 dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
145
146 l.config = driver.PrepareTest(ctx, f)
147 l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
148 ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts)
149 l.cs = l.config.Framework.ClientSet
150 testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
151 driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
152 claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
153 framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
154
155 l.sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType)
156 if l.sc == nil {
157 e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
158 }
159 l.pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
160 ClaimSize: claimSize,
161 StorageClassName: &(l.sc.Name),
162 VolumeMode: &pattern.VolMode,
163 }, l.config.Framework.Namespace.Name)
164 l.sourcePVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
165 ClaimSize: claimSize,
166 StorageClassName: &(l.sc.Name),
167 VolumeMode: &pattern.VolMode,
168 }, l.config.Framework.Namespace.Name)
169 framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
170 l.testCase = &StorageClassTest{
171 Client: l.config.Framework.ClientSet,
172 Timeouts: f.Timeouts,
173 Claim: l.pvc,
174 SourceClaim: l.sourcePVC,
175 Class: l.sc,
176 Provisioner: l.sc.Provisioner,
177 ClaimSize: claimSize,
178 ExpectedSize: claimSize,
179 VolumeMode: pattern.VolMode,
180 NodeSelection: l.config.ClientNodeSelection,
181 }
182 }
183
184 ginkgo.It("should provision storage with mount options", func(ctx context.Context) {
185 if dInfo.SupportedMountOption == nil {
186 e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
187 }
188 if pattern.VolMode == v1.PersistentVolumeBlock {
189 e2eskipper.Skipf("Block volumes do not support mount options - skipping")
190 }
191
192 init(ctx)
193
194 l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
195 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
196 PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
197 }
198 SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
199
200 l.testCase.TestDynamicProvisioning(ctx)
201 })
202
203 f.It("should provision storage with snapshot data source", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
204 if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
205 e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
206 }
207 if !dInfo.SupportedFsType.Has(pattern.FsType) {
208 e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
209 }
210
211 sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
212 if !ok {
213 framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
214 }
215
216 init(ctx)
217
218 dc := l.config.Framework.DynamicClient
219 testConfig := storageframework.ConvertTestConfig(l.config)
220 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
221 dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
222
223 l.pvc.Spec.DataSourceRef = dataSourceRef
224 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
225 ginkgo.By("checking whether the created volume has the pre-populated data")
226 tests := []e2evolume.Test{
227 {
228 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
229 Mode: pattern.VolMode,
230 File: "index.html",
231 ExpectedContent: expectedContent,
232 },
233 }
234 e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
235 }
236 l.testCase.TestDynamicProvisioning(ctx)
237 })
238
239 f.It("should provision storage with snapshot data source (ROX mode)", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
240 if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
241 e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
242 }
243 if !dInfo.SupportedFsType.Has(pattern.FsType) {
244 e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
245 }
246 if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
247 e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
248 }
249
250 sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
251 if !ok {
252 framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
253 }
254
255 init(ctx)
256
257 dc := l.config.Framework.DynamicClient
258 testConfig := storageframework.ConvertTestConfig(l.config)
259 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
260 dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
261
262 l.pvc.Spec.DataSourceRef = dataSourceRef
263 l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
264 v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
265 }
266 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
267 ginkgo.By("checking whether the created volume has the pre-populated data")
268 tests := []e2evolume.Test{
269 {
270 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
271 Mode: pattern.VolMode,
272 File: "index.html",
273 ExpectedContent: expectedContent,
274 },
275 }
276 e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
277 }
278 l.testCase.TestDynamicProvisioning(ctx)
279 })
280
281 f.It("should provision storage with any volume data source", f.WithSerial(), func(ctx context.Context) {
282 if len(dInfo.InTreePluginName) != 0 {
283 e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
284 }
285 if pattern.VolMode == v1.PersistentVolumeBlock {
286 e2eskipper.Skipf("Test for Block volumes is not implemented - skipping")
287 }
288
289 init(ctx)
290
291 ginkgo.By("Creating validator namespace")
292 valNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
293 "e2e-framework": f.BaseName,
294 "e2e-test-namespace": f.Namespace.Name,
295 })
296 framework.ExpectNoError(err)
297 ginkgo.DeferCleanup(f.DeleteNamespace, valNamespace.Name)
298
299 ginkgo.By("Deploying validator")
300 valManifests := []string{
301 "test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/populator.storage.k8s.io_volumepopulators.yaml",
302 "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml",
303 "test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml",
304 }
305 err = storageutils.CreateFromManifests(ctx, f, valNamespace,
306 func(item interface{}) error { return nil },
307 valManifests...)
308
309 framework.ExpectNoError(err)
310
311 ginkgo.By("Creating populator namespace")
312 popNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
313 "e2e-framework": f.BaseName,
314 "e2e-test-namespace": f.Namespace.Name,
315 })
316 framework.ExpectNoError(err)
317 ginkgo.DeferCleanup(f.DeleteNamespace, popNamespace.Name)
318
319 ginkgo.By("Deploying hello-populator")
320 popManifests := []string{
321 "test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml",
322 "test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml",
323 }
324 err = storageutils.CreateFromManifests(ctx, f, popNamespace,
325 func(item interface{}) error {
326 switch item := item.(type) {
327 case *appsv1.Deployment:
328 for i, container := range item.Spec.Template.Spec.Containers {
329 switch container.Name {
330 case "hello":
331 args := []string{}
332 var foundNS, foundImage bool
333 for _, arg := range container.Args {
334 if strings.HasPrefix(arg, "--namespace=") {
335 args = append(args, fmt.Sprintf("--namespace=%s", popNamespace.Name))
336 foundNS = true
337 } else if strings.HasPrefix(arg, "--image-name=") {
338 args = append(args, fmt.Sprintf("--image-name=%s", container.Image))
339 foundImage = true
340 } else {
341 args = append(args, arg)
342 }
343 }
344 if !foundNS {
345 args = append(args, fmt.Sprintf("--namespace=%s", popNamespace.Name))
346 framework.Logf("container name: %s", container.Name)
347 }
348 if !foundImage {
349 args = append(args, fmt.Sprintf("--image-name=%s", container.Image))
350 framework.Logf("container image: %s", container.Image)
351 }
352 container.Args = args
353 item.Spec.Template.Spec.Containers[i] = container
354 default:
355 }
356 }
357 }
358 return nil
359 },
360 popManifests...)
361
362 framework.ExpectNoError(err)
363
364 dc := l.config.Framework.DynamicClient
365
366
367 ginkgo.By("Creating VolumePopulator CR datasource")
368 volumePopulatorGVR := schema.GroupVersionResource{Group: "populator.storage.k8s.io", Version: "v1beta1", Resource: "volumepopulators"}
369 helloPopulatorCR := &unstructured.Unstructured{
370 Object: map[string]interface{}{
371 "kind": "VolumePopulator",
372 "apiVersion": "populator.storage.k8s.io/v1beta1",
373 "metadata": map[string]interface{}{
374 "name": fmt.Sprintf("%s-%s", "hello-populator", f.Namespace.Name),
375 },
376 "sourceKind": map[string]interface{}{
377 "group": "hello.example.com",
378 "kind": "Hello",
379 },
380 },
381 }
382
383 _, err = dc.Resource(volumePopulatorGVR).Create(ctx, helloPopulatorCR, metav1.CreateOptions{})
384 framework.ExpectNoError(err)
385
386 defer func() {
387 framework.Logf("deleting VolumePopulator CR datasource %q/%q", helloPopulatorCR.GetNamespace(), helloPopulatorCR.GetName())
388 err = dc.Resource(volumePopulatorGVR).Delete(ctx, helloPopulatorCR.GetName(), metav1.DeleteOptions{})
389 if err != nil && !apierrors.IsNotFound(err) {
390 framework.Failf("Error deleting VolumePopulator CR datasource %q. Error: %v", helloPopulatorCR.GetName(), err)
391 }
392 }()
393
394
395 ginkgo.By("Creating Hello CR datasource")
396 helloCRName := "example-hello"
397 fileName := fmt.Sprintf("example-%s.txt", f.Namespace.Name)
398 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
399 helloGVR := schema.GroupVersionResource{Group: "hello.example.com", Version: "v1alpha1", Resource: "hellos"}
400 helloCR := &unstructured.Unstructured{
401 Object: map[string]interface{}{
402 "kind": "Hello",
403 "apiVersion": "hello.example.com/v1alpha1",
404 "metadata": map[string]interface{}{
405 "name": helloCRName,
406 "namespace": f.Namespace.Name,
407 },
408 "spec": map[string]interface{}{
409 "fileName": fileName,
410 "fileContents": expectedContent,
411 },
412 },
413 }
414
415 _, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(ctx, helloCR, metav1.CreateOptions{})
416 framework.ExpectNoError(err)
417
418 defer func() {
419 framework.Logf("deleting Hello CR datasource %q/%q", helloCR.GetNamespace(), helloCR.GetName())
420 err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(ctx, helloCR.GetName(), metav1.DeleteOptions{})
421 if err != nil && !apierrors.IsNotFound(err) {
422 framework.Failf("Error deleting Hello CR datasource %q. Error: %v", helloCR.GetName(), err)
423 }
424 }()
425
426 apiGroup := "hello.example.com"
427 l.pvc.Spec.DataSourceRef = &v1.TypedObjectReference{
428 APIGroup: &apiGroup,
429 Kind: "Hello",
430 Name: helloCRName,
431 }
432
433 testConfig := storageframework.ConvertTestConfig(l.config)
434 l.testCase.NodeSelection = testConfig.ClientNodeSelection
435 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
436 ginkgo.By("checking whether the created volume has the pre-populated data")
437 tests := []e2evolume.Test{
438 {
439 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
440 Mode: pattern.VolMode,
441 File: fileName,
442 ExpectedContent: expectedContent,
443 },
444 }
445 e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
446 }
447
448 SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
449
450 l.testCase.TestDynamicProvisioning(ctx)
451 })
452
453 f.It("should provision correct filesystem size when restoring snapshot to larger size pvc", feature.VolumeSnapshotDataSource, func(ctx context.Context) {
454
455 if framework.NodeOSDistroIs("windows") {
456 e2eskipper.Skipf("Test is not valid Windows - skipping")
457 }
458
459 if pattern.VolMode == "Block" {
460 e2eskipper.Skipf("Test is not valid for Block volume mode - skipping")
461 }
462
463 if dInfo.Capabilities[storageframework.CapFSResizeFromSourceNotSupported] {
464 e2eskipper.Skipf("Driver %q does not support filesystem resizing - skipping", dInfo.Name)
465 }
466
467 if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
468 e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
469 }
470
471 if !dInfo.SupportedFsType.Has(pattern.FsType) {
472 e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
473 }
474
475 sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
476 if !ok {
477 framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
478 }
479
480 init(ctx)
481 pvc2 := l.pvc.DeepCopy()
482 l.pvc.Name = "pvc-origin"
483 dc := l.config.Framework.DynamicClient
484 testConfig := storageframework.ConvertTestConfig(l.config)
485 dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, "")
486
487
488 c, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(ctx, l.pvc.Name, metav1.GetOptions{})
489 framework.ExpectNoError(err, "Failed to get pvc: %v", err)
490 actualPVSize := c.Status.Capacity.Storage().Value()
491
492 createdClaims := []*v1.PersistentVolumeClaim{c}
493 pod, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims, f.NamespacePodSecurityLevel, "")
494 framework.ExpectNoError(err, "Failed to create pod: %v", err)
495
496
497 mountpath := findVolumeMountPath(pod, c)
498 gomega.Expect(mountpath).ShouldNot(gomega.BeEmpty())
499
500
501 originFSSize, err := getFilesystemSizeBytes(pod, mountpath)
502 framework.ExpectNoError(err, "Failed to obtain filesystem size of a volume mount: %v", err)
503
504
505 storageRequest := resource.NewQuantity(actualPVSize, resource.BinarySI)
506 storageRequest.Add(resource.MustParse("1Gi"))
507 pvc2.Spec.Resources.Requests = v1.ResourceList{
508 v1.ResourceStorage: *storageRequest,
509 }
510
511
512 pvc2.Spec.DataSourceRef = dataSourceRef
513
514
515 c2, err := l.testCase.Client.CoreV1().PersistentVolumeClaims(pvc2.Namespace).Create(ctx, pvc2, metav1.CreateOptions{})
516 framework.ExpectNoError(err, "Failed to create pvc: %v", err)
517 createdClaims2 := []*v1.PersistentVolumeClaim{c2}
518 pod2, err := e2epod.CreatePod(ctx, l.testCase.Client, f.Namespace.Name, nil, createdClaims2, f.NamespacePodSecurityLevel, "")
519 framework.ExpectNoError(err, "Failed to create pod: %v", err)
520
521
522 mountpath2 := findVolumeMountPath(pod2, c2)
523 gomega.Expect(mountpath2).ShouldNot(gomega.BeEmpty())
524
525
526 restoredFSSize, err := getFilesystemSizeBytes(pod2, mountpath2)
527 framework.ExpectNoError(err, "Failed to obtain filesystem size of a volume mount: %v", err)
528
529
530 msg := fmt.Sprintf("Filesystem resize failed when restoring from snapshot to PVC with larger size. "+
531 "Restored fs size: %v bytes is not larger than origin fs size: %v bytes.\n"+
532 "HINT: Your driver needs to check the volume in NodeStageVolume and resize fs if needed.\n"+
533 "HINT: For an example patch see: https://github.com/kubernetes/cloud-provider-openstack/pull/1563/files",
534 restoredFSSize, originFSSize)
535 gomega.Expect(restoredFSSize).Should(gomega.BeNumerically(">", originFSSize), msg)
536 })
537
538 ginkgo.It("should provision storage with pvc data source", func(ctx context.Context) {
539 if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
540 e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
541 }
542 init(ctx)
543
544 if l.config.ClientNodeSelection.Name == "" {
545
546
547 if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
548 framework.Failf("Error setting topology requirements: %v", err)
549 }
550 }
551 testConfig := storageframework.ConvertTestConfig(l.config)
552 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
553 dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
554 l.pvc.Spec.DataSourceRef = dataSourceRef
555 l.testCase.NodeSelection = testConfig.ClientNodeSelection
556 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
557 ginkgo.By("checking whether the created volume has the pre-populated data")
558 tests := []e2evolume.Test{
559 {
560 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
561 Mode: pattern.VolMode,
562 File: "index.html",
563 ExpectedContent: expectedContent,
564 },
565 }
566 e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
567 }
568
569 volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
570 framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
571 l.testCase.TestDynamicProvisioning(ctx)
572 })
573
574 ginkgo.It("should provision storage with pvc data source (ROX mode)", func(ctx context.Context) {
575 if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
576 e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
577 }
578 if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
579 e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
580 }
581 init(ctx)
582
583 if l.config.ClientNodeSelection.Name == "" {
584
585
586 if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
587 framework.Failf("Error setting topology requirements: %v", err)
588 }
589 }
590 testConfig := storageframework.ConvertTestConfig(l.config)
591 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
592 dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
593 l.pvc.Spec.DataSourceRef = dataSourceRef
594 l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
595 v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
596 }
597 l.testCase.NodeSelection = testConfig.ClientNodeSelection
598 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
599 ginkgo.By("checking whether the created volume has the pre-populated data")
600 tests := []e2evolume.Test{
601 {
602 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
603 Mode: pattern.VolMode,
604 File: "index.html",
605 ExpectedContent: expectedContent,
606 },
607 }
608 e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
609 }
610
611 volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
612 framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
613 l.testCase.TestDynamicProvisioning(ctx)
614 })
615
616 f.It("should provision storage with pvc data source in parallel", f.WithSlow(), func(ctx context.Context) {
617
618 if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
619 e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
620 }
621 if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageframework.CapBlock] {
622 e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name)
623 }
624
625 init(ctx)
626
627 if l.config.ClientNodeSelection.Name == "" {
628
629
630 if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
631 framework.Failf("Error setting topology requirements: %v", err)
632 }
633 }
634 testConfig := storageframework.ConvertTestConfig(l.config)
635 expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
636 dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
637 l.pvc.Spec.DataSourceRef = dataSourceRef
638
639 var wg sync.WaitGroup
640 for i := 0; i < 5; i++ {
641 wg.Add(1)
642 go func(i int) {
643 defer ginkgo.GinkgoRecover()
644 defer wg.Done()
645 ginkgo.By(fmt.Sprintf("Cloning volume nr. %d", i))
646
647 myTestConfig := testConfig
648 myTestConfig.Prefix = fmt.Sprintf("%s-%d", myTestConfig.Prefix, i)
649
650 t := *l.testCase
651 t.NodeSelection = testConfig.ClientNodeSelection
652 t.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
653 ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
654 tests := []e2evolume.Test{
655 {
656 Volume: *storageutils.CreateVolumeSource(claim.Name, false ),
657 Mode: pattern.VolMode,
658 File: "index.html",
659 ExpectedContent: expectedContent,
660 },
661 }
662 e2evolume.TestVolumeClientSlow(ctx, f, myTestConfig, nil, "", tests)
663 }
664
665 volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
666 framework.ExpectNoError(e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision))
667 t.TestDynamicProvisioning(ctx)
668 }(i)
669 }
670 wg.Wait()
671 })
672
673 ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func(ctx context.Context) {
674
675
676
677 if pattern.VolMode == v1.PersistentVolumeBlock {
678 e2eskipper.Skipf("skipping multiple PV mount test for block mode")
679 }
680
681 if !dInfo.Capabilities[storageframework.CapMultiplePVsSameID] {
682 e2eskipper.Skipf("this driver does not support multiple PVs with the same volumeHandle")
683 }
684
685 init(ctx)
686
687 l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
688 MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
689 }
690 SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
691
692 l.testCase.TestDynamicProvisioning(ctx)
693 })
694 }
695
696
697
698
699 func SetupStorageClass(
700 ctx context.Context,
701 client clientset.Interface,
702 class *storagev1.StorageClass,
703 ) *storagev1.StorageClass {
704 gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required")
705
706 var err error
707 var computedStorageClass *storagev1.StorageClass
708 if class != nil {
709 computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
710 if err == nil {
711
712 ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.")
713 } else {
714 ginkgo.By("Creating a StorageClass")
715 class, err = client.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{})
716 framework.ExpectNoError(err)
717 computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
718 framework.ExpectNoError(err)
719 clearComputedStorageClass := func(ctx context.Context) {
720 framework.Logf("deleting storage class %s", computedStorageClass.Name)
721 err := client.StorageV1().StorageClasses().Delete(ctx, computedStorageClass.Name, metav1.DeleteOptions{})
722 if err != nil && !apierrors.IsNotFound(err) {
723 framework.ExpectNoError(err, "delete storage class")
724 }
725 }
726 ginkgo.DeferCleanup(clearComputedStorageClass)
727 }
728 } else {
729
730 scName, err := e2epv.GetDefaultStorageClassName(ctx, client)
731 framework.ExpectNoError(err)
732 ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName)
733 computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
734 framework.ExpectNoError(err)
735 }
736
737 return computedStorageClass
738 }
739
740
741
742
743 func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.PersistentVolume {
744 var err error
745 client := t.Client
746 gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
747 claim := t.Claim
748 gomega.Expect(claim).NotTo(gomega.BeNil(), "StorageClassTest.Claim is required")
749 gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty")
750 class := t.Class
751 gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required")
752 class, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
753 framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster")
754
755 ginkgo.By(fmt.Sprintf("creating claim=%+v", claim))
756 claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
757 framework.ExpectNoError(err)
758 defer func() {
759 framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
760
761 err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
762 if err != nil && !apierrors.IsNotFound(err) {
763 framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
764 }
765 }()
766
767
768 gomega.Expect(*claim.Spec.StorageClassName).To(gomega.Equal(class.Name))
769
770
771 if *class.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
772 ginkgo.By(fmt.Sprintf("creating a pod referring to the class=%+v claim=%+v", class, claim))
773 var podConfig *e2epod.Config = &e2epod.Config{
774 NS: claim.Namespace,
775 PVCs: []*v1.PersistentVolumeClaim{claim},
776 NodeSelection: t.NodeSelection,
777 }
778
779 var pod *v1.Pod
780 pod, err := e2epod.CreateSecPod(ctx, client, podConfig, t.Timeouts.DataSourceProvision)
781
782 framework.ExpectNoError(err)
783 e2epod.DeletePodOrFail(ctx, client, pod.Namespace, pod.Name)
784 }
785
786
787 if t.PvCheck != nil {
788 t.PvCheck(ctx, claim)
789 }
790
791 pv := t.checkProvisioning(ctx, client, claim, class)
792
793 ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
794 framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
795
796
797
798
799
800
801
802
803 if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
804 ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
805 framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow))
806 }
807
808 return pv
809 }
810
811
812 func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
813
814 claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
815 if err != nil {
816 return nil, err
817 }
818
819
820 pv, err := client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
821 return pv, err
822 }
823
824
825 func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
826 err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
827 framework.ExpectNoError(err)
828
829 ginkgo.By("checking the claim")
830 pv, err := getBoundPV(ctx, client, claim)
831 framework.ExpectNoError(err)
832
833
834 expectedCapacity := resource.MustParse(t.ExpectedSize)
835 pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
836 gomega.Expect(pvCapacity.Value()).To(gomega.BeNumerically(">=", expectedCapacity.Value()), "pvCapacity is not greater or equal to expectedCapacity")
837
838 requestedCapacity := resource.MustParse(t.ClaimSize)
839 claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
840 gomega.Expect(claimCapacity.Value()).To(gomega.BeNumerically(">=", requestedCapacity.Value()), "claimCapacity is not greater or equal to requestedCapacity")
841
842
843 ginkgo.By("checking the PV")
844
845
846 gomega.Expect(pv.Spec.AccessModes).NotTo(gomega.BeZero())
847 for _, pvMode := range pv.Spec.AccessModes {
848 found := false
849 for _, pvcMode := range claim.Spec.AccessModes {
850 if pvMode == pvcMode {
851 found = true
852 break
853 }
854 }
855 if !found {
856 framework.Failf("Actual access modes %v are not in claim's access mode", pv.Spec.AccessModes)
857 }
858 }
859
860 gomega.Expect(pv.Spec.ClaimRef.Name).To(gomega.Equal(claim.ObjectMeta.Name))
861 gomega.Expect(pv.Spec.ClaimRef.Namespace).To(gomega.Equal(claim.ObjectMeta.Namespace))
862 if class == nil {
863 gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(v1.PersistentVolumeReclaimDelete))
864 } else {
865 gomega.Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(gomega.Equal(*class.ReclaimPolicy))
866 gomega.Expect(pv.Spec.MountOptions).To(gomega.Equal(class.MountOptions))
867 }
868 if claim.Spec.VolumeMode != nil {
869 gomega.Expect(pv.Spec.VolumeMode).NotTo(gomega.BeNil())
870 gomega.Expect(*pv.Spec.VolumeMode).To(gomega.Equal(*claim.Spec.VolumeMode))
871 }
872 return pv
873 }
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889 func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
890 ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
891 command := "echo 'hello world' > /mnt/test/data"
892 pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
893 ginkgo.DeferCleanup(func(ctx context.Context) {
894
895 StopPod(ctx, client, pod)
896 })
897 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
898 runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
899 framework.ExpectNoError(err, "get pod")
900 actualNodeName := runningPod.Spec.NodeName
901 StopPod(ctx, client, pod)
902 pod = nil
903
904
905 e2evolume, err := getBoundPV(ctx, client, claim)
906 framework.ExpectNoError(err)
907
908 ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
909 command = "grep 'hello world' /mnt/test/data"
910
911
912
913 for _, option := range e2evolume.Spec.MountOptions {
914
915 command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
916 }
917 command += " || (mount | grep 'on /mnt/test'; false)"
918
919 if framework.NodeOSDistroIs("windows") {
920
921 command = "grep 'hello world' /mnt/test/data"
922 }
923 RunInPodWithVolume(ctx, client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName, Selector: node.Selector})
924 return e2evolume
925 }
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941 func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
942 gomega.Expect(node.Name).To(gomega.BeZero(), "this test only works when not locked onto a single node")
943
944 var pod *v1.Pod
945 defer func() {
946
947 StopPod(ctx, client, pod)
948 }()
949
950 ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
951 command := "echo 'hello world' > /mnt/test/data"
952 pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
953 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
954 runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
955 framework.ExpectNoError(err, "get pod")
956 actualNodeName := runningPod.Spec.NodeName
957 StopPod(ctx, client, pod)
958 pod = nil
959
960
961 secondNode := node
962 e2epod.SetAntiAffinity(&secondNode, actualNodeName)
963 ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
964 command = "grep 'hello world' /mnt/test/data"
965 pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
966 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
967 runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
968 framework.ExpectNoError(err, "get pod")
969 gomega.Expect(runningPod.Spec.NodeName).ToNot(gomega.Equal(actualNodeName), "second pod should have run on a different node")
970 StopPod(ctx, client, pod)
971 pod = nil
972 }
973
974
975 func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
976 var err error
977 gomega.Expect(claims).ToNot(gomega.BeEmpty())
978 namespace := claims[0].Namespace
979
980 ginkgo.By("creating claims")
981 var claimNames []string
982 var createdClaims []*v1.PersistentVolumeClaim
983 for _, claim := range claims {
984 c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
985 claimNames = append(claimNames, c.Name)
986 createdClaims = append(createdClaims, c)
987 framework.ExpectNoError(err)
988 }
989 defer func() {
990 errors := map[string]error{}
991 for _, claim := range createdClaims {
992 err := e2epv.DeletePersistentVolumeClaim(ctx, t.Client, claim.Name, claim.Namespace)
993 if err != nil {
994 errors[claim.Name] = err
995 }
996 }
997 if len(errors) > 0 {
998 for claimName, err := range errors {
999 framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
1000 }
1001 }
1002 }()
1003
1004
1005 ginkgo.By("checking the claims are in pending state")
1006 err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second , t.Timeouts.ClaimProvisionShort, true)
1007 framework.ExpectError(err)
1008 verifyPVCsPending(ctx, t.Client, createdClaims)
1009
1010 ginkgo.By("creating a pod referring to the claims")
1011
1012 var pod *v1.Pod
1013 if expectUnschedulable {
1014 pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, admissionapi.LevelPrivileged, "" )
1015 } else {
1016 pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil , createdClaims, admissionapi.LevelPrivileged, "" )
1017 }
1018 framework.ExpectNoError(err)
1019 ginkgo.DeferCleanup(func(ctx context.Context) error {
1020 e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
1021 return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Name, pod.Namespace, t.Timeouts.PodDelete)
1022 })
1023 if expectUnschedulable {
1024
1025 verifyPVCsPending(ctx, t.Client, createdClaims)
1026 return nil, nil
1027 }
1028
1029
1030 node, err := t.Client.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
1031 framework.ExpectNoError(err)
1032
1033 ginkgo.By("re-checking the claims to see they bound")
1034 var pvs []*v1.PersistentVolume
1035 for _, claim := range createdClaims {
1036
1037 claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
1038 framework.ExpectNoError(err)
1039
1040 err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
1041 framework.ExpectNoError(err)
1042
1043 pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
1044 framework.ExpectNoError(err)
1045 pvs = append(pvs, pv)
1046 }
1047 gomega.Expect(pvs).To(gomega.HaveLen(len(createdClaims)))
1048 return pvs, node
1049 }
1050
1051
1052
1053 func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
1054 pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node)
1055 defer StopPod(ctx, c, pod)
1056 framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
1057
1058 pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
1059 framework.ExpectNoError(err)
1060 return pod
1061 }
1062
1063
1064
1065 func StartInPodWithVolume(ctx context.Context, c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
1066 return StartInPodWithVolumeSource(ctx, c, v1.VolumeSource{
1067 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
1068 ClaimName: claimName,
1069 },
1070 }, ns, podName, command, node)
1071 }
1072
1073
1074
1075 func StartInPodWithVolumeSource(ctx context.Context, c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
1076 pod := &v1.Pod{
1077 TypeMeta: metav1.TypeMeta{
1078 Kind: "Pod",
1079 APIVersion: "v1",
1080 },
1081 ObjectMeta: metav1.ObjectMeta{
1082 GenerateName: podName + "-",
1083 Labels: map[string]string{
1084 "app": podName,
1085 },
1086 },
1087 Spec: v1.PodSpec{
1088 Containers: []v1.Container{
1089 {
1090 Name: "volume-tester",
1091 Image: e2epod.GetDefaultTestImage(),
1092 Command: e2epod.GenerateScriptCmd(command),
1093 VolumeMounts: []v1.VolumeMount{
1094 {
1095 Name: "my-volume",
1096 MountPath: "/mnt/test",
1097 },
1098 },
1099 },
1100 },
1101 RestartPolicy: v1.RestartPolicyNever,
1102 Volumes: []v1.Volume{
1103 {
1104 Name: "my-volume",
1105 VolumeSource: volSrc,
1106 },
1107 },
1108 },
1109 }
1110
1111 e2epod.SetNodeSelection(&pod.Spec, node)
1112 pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
1113 framework.ExpectNoError(err, "Failed to create pod: %v", err)
1114 return pod
1115 }
1116
1117
1118
1119 func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) {
1120 if pod == nil {
1121 return
1122 }
1123 body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
1124 if err != nil {
1125 framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
1126 } else {
1127 framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
1128 }
1129 framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))
1130 }
1131
1132
1133
1134
1135 func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
1136 if pod == nil {
1137 return
1138 }
1139 body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
1140 if err != nil {
1141 framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
1142 } else {
1143 framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
1144 }
1145
1146
1147
1148 pvs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
1149 framework.ExpectNoError(err, "list PVs")
1150 var podPVs []v1.PersistentVolume
1151 for _, pv := range pvs.Items {
1152 if pv.Spec.ClaimRef == nil ||
1153 pv.Spec.ClaimRef.Namespace != pod.Namespace {
1154 continue
1155 }
1156 pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{})
1157 if err != nil && apierrors.IsNotFound(err) {
1158
1159 continue
1160 }
1161 framework.ExpectNoError(err, "get PVC")
1162 if pv.Spec.ClaimRef.UID == pvc.UID && metav1.IsControlledBy(pvc, pod) {
1163 podPVs = append(podPVs, pv)
1164 }
1165 }
1166
1167 framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
1168 deletionPolicy := metav1.DeletePropagationForeground
1169 err = c.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name,
1170 metav1.DeleteOptions{
1171
1172
1173
1174 PropagationPolicy: &deletionPolicy,
1175 })
1176 if err != nil {
1177 if apierrors.IsNotFound(err) {
1178 return
1179 }
1180 framework.Logf("pod Delete API error: %v", err)
1181 }
1182 framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
1183 framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, c, pod.Name, pod.Namespace, timeouts.PodDelete))
1184 if len(podPVs) > 0 {
1185 for _, pv := range podPVs {
1186
1187
1188 framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
1189 framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, timeouts.PodDelete))
1190 }
1191 }
1192 }
1193
1194 func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
1195 for _, claim := range pvcs {
1196
1197 claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
1198 framework.ExpectNoError(err)
1199 gomega.Expect(claim.Status.Phase).To(gomega.Equal(v1.ClaimPending))
1200 }
1201 }
1202
1203 func prepareSnapshotDataSourceForProvisioning(
1204 ctx context.Context,
1205 f *framework.Framework,
1206 config e2evolume.TestConfig,
1207 perTestConfig *storageframework.PerTestConfig,
1208 pattern storageframework.TestPattern,
1209 client clientset.Interface,
1210 dynamicClient dynamic.Interface,
1211 initClaim *v1.PersistentVolumeClaim,
1212 class *storagev1.StorageClass,
1213 sDriver storageframework.SnapshottableTestDriver,
1214 mode v1.PersistentVolumeMode,
1215 injectContent string,
1216 ) *v1.TypedObjectReference {
1217 SetupStorageClass(ctx, client, class)
1218
1219 if initClaim.ResourceVersion != "" {
1220 ginkgo.By("Skipping creation of PVC, it already exists")
1221 } else {
1222 ginkgo.By("[Initialize dataSource]creating a initClaim")
1223 updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(ctx, initClaim, metav1.CreateOptions{})
1224 if apierrors.IsAlreadyExists(err) {
1225 err = nil
1226 }
1227 framework.ExpectNoError(err)
1228 initClaim = updatedClaim
1229 }
1230
1231
1232 tests := []e2evolume.Test{
1233 {
1234 Volume: *storageutils.CreateVolumeSource(initClaim.Name, false ),
1235 Mode: mode,
1236 File: "index.html",
1237 ExpectedContent: injectContent,
1238 },
1239 }
1240 e2evolume.InjectContent(ctx, f, config, nil, "", tests)
1241
1242 parameters := map[string]string{}
1243 snapshotResource := storageframework.CreateSnapshotResource(ctx, sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters)
1244 group := "snapshot.storage.k8s.io"
1245 dataSourceRef := &v1.TypedObjectReference{
1246 APIGroup: &group,
1247 Kind: "VolumeSnapshot",
1248 Name: snapshotResource.Vs.GetName(),
1249 }
1250
1251 cleanupFunc := func(ctx context.Context) {
1252 framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name)
1253 err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(ctx, initClaim.Name, metav1.DeleteOptions{})
1254 if err != nil && !apierrors.IsNotFound(err) {
1255 framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
1256 }
1257
1258 err = snapshotResource.CleanupResource(ctx, f.Timeouts)
1259 framework.ExpectNoError(err)
1260 }
1261 ginkgo.DeferCleanup(cleanupFunc)
1262
1263 return dataSourceRef
1264 }
1265
1266 func preparePVCDataSourceForProvisioning(
1267 ctx context.Context,
1268 f *framework.Framework,
1269 config e2evolume.TestConfig,
1270 client clientset.Interface,
1271 source *v1.PersistentVolumeClaim,
1272 class *storagev1.StorageClass,
1273 mode v1.PersistentVolumeMode,
1274 injectContent string,
1275 ) *v1.TypedObjectReference {
1276 SetupStorageClass(ctx, client, class)
1277
1278 if source.ResourceVersion != "" {
1279 ginkgo.By("Skipping creation of PVC, it already exists")
1280 } else {
1281 ginkgo.By("[Initialize dataSource]creating a source PVC")
1282 var err error
1283 source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(ctx, source, metav1.CreateOptions{})
1284 framework.ExpectNoError(err)
1285 }
1286
1287 tests := []e2evolume.Test{
1288 {
1289 Volume: *storageutils.CreateVolumeSource(source.Name, false ),
1290 Mode: mode,
1291 File: "index.html",
1292 ExpectedContent: injectContent,
1293 },
1294 }
1295 e2evolume.InjectContent(ctx, f, config, nil, "", tests)
1296
1297 dataSourceRef := &v1.TypedObjectReference{
1298 Kind: "PersistentVolumeClaim",
1299 Name: source.GetName(),
1300 }
1301
1302 cleanupFunc := func(ctx context.Context) {
1303 framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name)
1304 err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(ctx, source.Name, metav1.DeleteOptions{})
1305 if err != nil && !apierrors.IsNotFound(err) {
1306 framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err)
1307 }
1308 }
1309 ginkgo.DeferCleanup(cleanupFunc)
1310
1311 return dataSourceRef
1312 }
1313
1314
1315 func findVolumeMountPath(pod *v1.Pod, claim *v1.PersistentVolumeClaim) string {
1316
1317 volumeName = ""
1318 for _, volume := range pod.Spec.Volumes {
1319 if volume.PersistentVolumeClaim.ClaimName == claim.Name {
1320 volumeName = volume.Name
1321 break
1322 }
1323 }
1324
1325
1326 containerMountPath := ""
1327 for _, volumeMount := range pod.Spec.Containers[0].VolumeMounts {
1328 if volumeMount.Name == volumeName {
1329 containerMountPath = volumeMount.MountPath
1330 break
1331 }
1332 }
1333 return containerMountPath
1334 }
1335
1336
1337 func getFilesystemSizeBytes(pod *v1.Pod, mountPath string) (int, error) {
1338 cmd := fmt.Sprintf("stat -f -c %%s %v", mountPath)
1339 blockSize, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--", "/bin/sh", "-c", cmd)
1340 if err != nil {
1341 return 0, err
1342 }
1343
1344 cmd = fmt.Sprintf("stat -f -c %%b %v", mountPath)
1345 blockCount, err := e2ekubectl.RunKubectl(pod.Namespace, "exec", pod.Name, "--", "/bin/sh", "-c", cmd)
1346 if err != nil {
1347 return 0, err
1348 }
1349
1350 bs, err := strconv.Atoi(strings.TrimSuffix(blockSize, "\n"))
1351 if err != nil {
1352 return 0, err
1353 }
1354
1355 bc, err := strconv.Atoi(strings.TrimSuffix(blockCount, "\n"))
1356 if err != nil {
1357 return 0, err
1358 }
1359
1360 return bs * bc, nil
1361 }
1362
1363
1364
1365
1366
1367
1368
1369 func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
1370 pod1Config := e2epod.Config{
1371 NS: claim.Namespace,
1372 NodeSelection: node,
1373 PVCs: []*v1.PersistentVolumeClaim{claim},
1374 }
1375 pod1, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod1Config, timeouts.PodStart)
1376 framework.ExpectNoError(err)
1377 defer func() {
1378 ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name))
1379 framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod1))
1380 }()
1381 ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
1382
1383
1384 e2evolume, err := getBoundPV(ctx, client, claim)
1385 framework.ExpectNoError(err)
1386 pv2Config := e2epv.PersistentVolumeConfig{
1387 NamePrefix: fmt.Sprintf("%s-", "pv"),
1388 StorageClassName: *claim.Spec.StorageClassName,
1389 PVSource: e2evolume.Spec.PersistentVolumeSource,
1390 AccessModes: e2evolume.Spec.AccessModes,
1391 VolumeMode: e2evolume.Spec.VolumeMode,
1392 ReclaimPolicy: v1.PersistentVolumeReclaimRetain,
1393 }
1394
1395 pvc2Config := e2epv.PersistentVolumeClaimConfig{
1396 NamePrefix: fmt.Sprintf("%s-", "pvc"),
1397 StorageClassName: &claim.Namespace,
1398 AccessModes: e2evolume.Spec.AccessModes,
1399 VolumeMode: e2evolume.Spec.VolumeMode,
1400 }
1401
1402 pv2, pvc2, err := e2epv.CreatePVCPV(ctx, client, timeouts, pv2Config, pvc2Config, claim.Namespace, true)
1403 framework.ExpectNoError(err, "PVC, PV creation failed")
1404 framework.Logf("Created PVC %s/%s and PV %s", pvc2.Namespace, pvc2.Name, pv2.Name)
1405
1406 pod2Config := e2epod.Config{
1407 NS: pvc2.Namespace,
1408 NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName, Selector: node.Selector},
1409 PVCs: []*v1.PersistentVolumeClaim{pvc2},
1410 }
1411 pod2, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod2Config, timeouts.PodStart)
1412 framework.ExpectNoError(err)
1413 ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName))
1414
1415 ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name))
1416 framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod2))
1417
1418 err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, pvc2.Namespace)
1419 framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name)
1420
1421 err = e2epv.DeletePersistentVolume(ctx, client, pv2.Name)
1422 framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name)
1423 }
1424
View as plain text