...

Source file src/k8s.io/kubernetes/test/e2e/storage/testsuites/ephemeral.go

Documentation: k8s.io/kubernetes/test/e2e/storage/testsuites

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	"github.com/onsi/ginkgo/v2"
    24  	"github.com/onsi/gomega"
    25  
    26  	v1 "k8s.io/api/core/v1"
    27  	storagev1 "k8s.io/api/storage/v1"
    28  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    29  	"k8s.io/apimachinery/pkg/api/resource"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	utilerrors "k8s.io/apimachinery/pkg/util/errors"
    32  	clientset "k8s.io/client-go/kubernetes"
    33  	"k8s.io/kubernetes/test/e2e/framework"
    34  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    35  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    36  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    37  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    38  	admissionapi "k8s.io/pod-security-admission/api"
    39  )
    40  
    41  type ephemeralTestSuite struct {
    42  	tsInfo storageframework.TestSuiteInfo
    43  }
    44  
    45  // InitCustomEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
    46  // using custom test patterns
    47  func InitCustomEphemeralTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    48  	return &ephemeralTestSuite{
    49  		tsInfo: storageframework.TestSuiteInfo{
    50  			Name:         "ephemeral",
    51  			TestPatterns: patterns,
    52  		},
    53  	}
    54  }
    55  
    56  // GenericEphemeralTestPatterns returns the test patterns for
    57  // generic ephemeral inline volumes.
    58  func GenericEphemeralTestPatterns() []storageframework.TestPattern {
    59  	genericLateBinding := storageframework.DefaultFsGenericEphemeralVolume
    60  	genericLateBinding.Name += " (late-binding)"
    61  	genericLateBinding.BindingMode = storagev1.VolumeBindingWaitForFirstConsumer
    62  
    63  	genericImmediateBinding := storageframework.DefaultFsGenericEphemeralVolume
    64  	genericImmediateBinding.Name += " (immediate-binding)"
    65  	genericImmediateBinding.BindingMode = storagev1.VolumeBindingImmediate
    66  
    67  	return []storageframework.TestPattern{
    68  		genericLateBinding,
    69  		genericImmediateBinding,
    70  		storageframework.BlockVolModeGenericEphemeralVolume,
    71  	}
    72  }
    73  
    74  // CSIEphemeralTestPatterns returns the test patterns for
    75  // CSI ephemeral inline volumes.
    76  func CSIEphemeralTestPatterns() []storageframework.TestPattern {
    77  	return []storageframework.TestPattern{
    78  		storageframework.DefaultFsCSIEphemeralVolume,
    79  	}
    80  }
    81  
    82  // AllEphemeralTestPatterns returns all pre-defined test patterns for
    83  // generic and CSI ephemeral inline volumes.
    84  func AllEphemeralTestPatterns() []storageframework.TestPattern {
    85  	return append(GenericEphemeralTestPatterns(), CSIEphemeralTestPatterns()...)
    86  }
    87  
    88  // InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
    89  // using test suite default patterns
    90  func InitEphemeralTestSuite() storageframework.TestSuite {
    91  	return InitCustomEphemeralTestSuite(AllEphemeralTestPatterns())
    92  }
    93  
    94  func (p *ephemeralTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    95  	return p.tsInfo
    96  }
    97  
    98  func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    99  	if pattern.VolMode == v1.PersistentVolumeBlock {
   100  		skipTestIfBlockNotSupported(driver)
   101  	}
   102  }
   103  
   104  func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
   105  	type local struct {
   106  		config *storageframework.PerTestConfig
   107  
   108  		testCase *EphemeralTest
   109  		resource *storageframework.VolumeResource
   110  	}
   111  	var (
   112  		dInfo   = driver.GetDriverInfo()
   113  		eDriver storageframework.EphemeralTestDriver
   114  		l       local
   115  	)
   116  
   117  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   118  	// f must run inside an It or Context callback.
   119  	f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver))
   120  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   121  
   122  	init := func(ctx context.Context) {
   123  		if pattern.VolType == storageframework.CSIInlineVolume {
   124  			eDriver, _ = driver.(storageframework.EphemeralTestDriver)
   125  		}
   126  		if pattern.VolType == storageframework.GenericEphemeralVolume {
   127  			// The GenericEphemeralVolume feature is GA, but
   128  			// perhaps this test is run against an older Kubernetes
   129  			// where the feature might be disabled.
   130  			enabled, err := GenericEphemeralVolumesEnabled(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name)
   131  			framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
   132  			if !enabled {
   133  				e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType)
   134  			}
   135  		}
   136  		// A driver might support the Topology capability which is incompatible with the VolumeBindingMode immediate because
   137  		// volumes might be provisioned immediately in a different zone to where the workload is located.
   138  		if pattern.BindingMode == storagev1.VolumeBindingImmediate && len(dInfo.TopologyKeys) > 0 {
   139  			e2eskipper.Skipf("VolumeBindingMode immediate is not compatible with a multi-topology environment.")
   140  		}
   141  
   142  		l = local{}
   143  
   144  		if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
   145  			pattern.AllowExpansion = false
   146  		}
   147  
   148  		// Now do the more expensive test initialization.
   149  		l.config = driver.PrepareTest(ctx, f)
   150  		l.resource = storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, e2evolume.SizeRange{})
   151  
   152  		switch pattern.VolType {
   153  		case storageframework.CSIInlineVolume:
   154  			l.testCase = &EphemeralTest{
   155  				Client:     l.config.Framework.ClientSet,
   156  				Timeouts:   f.Timeouts,
   157  				Namespace:  f.Namespace.Name,
   158  				DriverName: eDriver.GetCSIDriverName(l.config),
   159  				Node:       l.config.ClientNodeSelection,
   160  				GetVolume: func(volumeNumber int) (map[string]string, bool, bool) {
   161  					return eDriver.GetVolume(l.config, volumeNumber)
   162  				},
   163  			}
   164  		case storageframework.GenericEphemeralVolume:
   165  			l.testCase = &EphemeralTest{
   166  				Client:    l.config.Framework.ClientSet,
   167  				Timeouts:  f.Timeouts,
   168  				Namespace: f.Namespace.Name,
   169  				Node:      l.config.ClientNodeSelection,
   170  				VolSource: l.resource.VolSource,
   171  			}
   172  		}
   173  	}
   174  
   175  	cleanup := func(ctx context.Context) {
   176  		var cleanUpErrs []error
   177  		cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource(ctx))
   178  		err := utilerrors.NewAggregate(cleanUpErrs)
   179  		framework.ExpectNoError(err, "while cleaning up")
   180  	}
   181  
   182  	ginkgo.It("should create read-only inline ephemeral volume", func(ctx context.Context) {
   183  		if pattern.VolMode == v1.PersistentVolumeBlock {
   184  			e2eskipper.Skipf("raw block volumes cannot be read-only")
   185  		}
   186  
   187  		init(ctx)
   188  		ginkgo.DeferCleanup(cleanup)
   189  
   190  		l.testCase.ReadOnly = true
   191  		l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} {
   192  			command := "mount | grep /mnt/test | grep ro,"
   193  			if framework.NodeOSDistroIs("windows") {
   194  				// attempt to create a dummy file and expect for it not to be created
   195  				command = "ls /mnt/test* && (touch /mnt/test-0/hello-world || true) && [ ! -f /mnt/test-0/hello-world ]"
   196  			}
   197  			e2evolume.VerifyExecInPodSucceed(f, pod, command)
   198  			return nil
   199  		}
   200  		l.testCase.TestEphemeral(ctx)
   201  	})
   202  
   203  	ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) {
   204  		init(ctx)
   205  		ginkgo.DeferCleanup(cleanup)
   206  
   207  		l.testCase.ReadOnly = false
   208  		l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} {
   209  			command := "mount | grep /mnt/test | grep rw,"
   210  			if framework.NodeOSDistroIs("windows") {
   211  				// attempt to create a dummy file and expect for it to be created
   212  				command = "ls /mnt/test* && touch /mnt/test-0/hello-world && [ -f /mnt/test-0/hello-world ]"
   213  			}
   214  			if pattern.VolMode == v1.PersistentVolumeBlock {
   215  				command = "if ! [ -b /mnt/test-0 ]; then echo /mnt/test-0 is not a block device; exit 1; fi"
   216  			}
   217  			e2evolume.VerifyExecInPodSucceed(f, pod, command)
   218  			return nil
   219  		}
   220  		l.testCase.TestEphemeral(ctx)
   221  	})
   222  
   223  	ginkgo.It("should support expansion of pvcs created for ephemeral pvcs", func(ctx context.Context) {
   224  		if pattern.VolType != storageframework.GenericEphemeralVolume {
   225  			e2eskipper.Skipf("Skipping %s test for expansion", pattern.VolType)
   226  		}
   227  
   228  		init(ctx)
   229  		ginkgo.DeferCleanup(cleanup)
   230  
   231  		if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
   232  			e2eskipper.Skipf("Driver %q does not support online volume expansion - skipping", driver.GetDriverInfo().Name)
   233  		}
   234  
   235  		l.testCase.ReadOnly = false
   236  		l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} {
   237  			podName := pod.Name
   238  			framework.Logf("Running volume expansion checks %s", podName)
   239  
   240  			outerPodVolumeSpecName := ""
   241  			for i := range pod.Spec.Volumes {
   242  				volume := pod.Spec.Volumes[i]
   243  				if volume.Ephemeral != nil {
   244  					outerPodVolumeSpecName = volume.Name
   245  					break
   246  				}
   247  			}
   248  			pvcName := fmt.Sprintf("%s-%s", podName, outerPodVolumeSpecName)
   249  			pvc, err := f.ClientSet.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
   250  			framework.ExpectNoError(err, "error getting ephemeral pvc")
   251  
   252  			ginkgo.By("Expanding current pvc")
   253  			currentPvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
   254  			newSize := currentPvcSize.DeepCopy()
   255  			newSize.Add(resource.MustParse("1Gi"))
   256  			framework.Logf("currentPvcSize %s, requested new size %s", currentPvcSize.String(), newSize.String())
   257  
   258  			newPVC, err := ExpandPVCSize(ctx, pvc, newSize, f.ClientSet)
   259  			framework.ExpectNoError(err, "While updating pvc for more size")
   260  			pvc = newPVC
   261  			gomega.Expect(pvc).NotTo(gomega.BeNil())
   262  
   263  			pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
   264  			if pvcSize.Cmp(newSize) != 0 {
   265  				framework.Failf("error updating pvc %s from %s to %s size", pvc.Name, currentPvcSize.String(), newSize.String())
   266  			}
   267  
   268  			ginkgo.By("Waiting for cloudprovider resize to finish")
   269  			err = WaitForControllerVolumeResize(ctx, pvc, f.ClientSet, totalResizeWaitPeriod)
   270  			framework.ExpectNoError(err, "While waiting for pvc resize to finish")
   271  
   272  			ginkgo.By("Waiting for file system resize to finish")
   273  			pvc, err = WaitForFSResize(ctx, pvc, f.ClientSet)
   274  			framework.ExpectNoError(err, "while waiting for fs resize to finish")
   275  
   276  			pvcConditions := pvc.Status.Conditions
   277  			gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
   278  			return nil
   279  		}
   280  		l.testCase.TestEphemeral(ctx)
   281  
   282  	})
   283  
   284  	ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) {
   285  		init(ctx)
   286  		ginkgo.DeferCleanup(cleanup)
   287  
   288  		// We test in read-only mode if that is all that the driver supports,
   289  		// otherwise read/write. For PVC, both are assumed to be false.
   290  		shared := false
   291  		readOnly := false
   292  		if eDriver != nil {
   293  			_, shared, readOnly = eDriver.GetVolume(l.config, 0)
   294  		}
   295  
   296  		l.testCase.RunningPodCheck = func(ctx context.Context, pod *v1.Pod) interface{} {
   297  			// Create another pod with the same inline volume attributes.
   298  			pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000",
   299  				[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
   300  				readOnly,
   301  				l.testCase.Node)
   302  			framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume")
   303  
   304  			// If (and only if) we were able to mount
   305  			// read/write and volume data is not shared
   306  			// between pods, then we can check whether
   307  			// data written in one pod is really not
   308  			// visible in the other.
   309  			if pattern.VolMode != v1.PersistentVolumeBlock && !readOnly && !shared {
   310  				ginkgo.By("writing data in one pod and checking the second does not see it (it should get its own volume)")
   311  				e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
   312  				e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
   313  			}
   314  
   315  			// TestEphemeral expects the pod to be fully deleted
   316  			// when this function returns, so don't delay this
   317  			// cleanup.
   318  			StopPodAndDependents(ctx, f.ClientSet, f.Timeouts, pod2)
   319  
   320  			return nil
   321  		}
   322  
   323  		l.testCase.TestEphemeral(ctx)
   324  	})
   325  
   326  	ginkgo.It("should support multiple inline ephemeral volumes", func(ctx context.Context) {
   327  		if pattern.BindingMode == storagev1.VolumeBindingImmediate &&
   328  			pattern.VolType == storageframework.GenericEphemeralVolume {
   329  			e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.")
   330  		}
   331  
   332  		init(ctx)
   333  		ginkgo.DeferCleanup(cleanup)
   334  
   335  		l.testCase.NumInlineVolumes = 2
   336  		l.testCase.TestEphemeral(ctx)
   337  	})
   338  }
   339  
   340  // EphemeralTest represents parameters to be used by tests for inline volumes.
   341  // Not all parameters are used by all tests.
   342  type EphemeralTest struct {
   343  	Client     clientset.Interface
   344  	Timeouts   *framework.TimeoutContext
   345  	Namespace  string
   346  	DriverName string
   347  	VolSource  *v1.VolumeSource
   348  	Node       e2epod.NodeSelection
   349  
   350  	// GetVolume returns the volume attributes for a
   351  	// certain inline ephemeral volume, enumerated starting with
   352  	// #0. Some tests might require more than one volume. They can
   353  	// all be the same or different, depending what the driver supports
   354  	// and/or wants to test.
   355  	//
   356  	// For each volume, the test driver can specify the
   357  	// attributes, whether two pods using those attributes will
   358  	// end up sharing the same backend storage (i.e. changes made
   359  	// in one pod will be visible in the other), and whether
   360  	// the volume can be mounted read/write or only read-only.
   361  	GetVolume func(volumeNumber int) (attributes map[string]string, shared bool, readOnly bool)
   362  
   363  	// RunningPodCheck is invoked while a pod using an inline volume is running.
   364  	// It can execute additional checks on the pod and its volume(s). Any data
   365  	// returned by it is passed to StoppedPodCheck.
   366  	RunningPodCheck func(ctx context.Context, pod *v1.Pod) interface{}
   367  
   368  	// StoppedPodCheck is invoked after ensuring that the pod is gone.
   369  	// It is passed the data gather by RunningPodCheck or nil if that
   370  	// isn't defined and then can do additional checks on the node,
   371  	// like for example verifying that the ephemeral volume was really
   372  	// removed. How to do such a check is driver-specific and not
   373  	// covered by the generic storage test suite.
   374  	StoppedPodCheck func(ctx context.Context, nodeName string, runningPodData interface{})
   375  
   376  	// NumInlineVolumes sets the number of ephemeral inline volumes per pod.
   377  	// Unset (= zero) is the same as one.
   378  	NumInlineVolumes int
   379  
   380  	// ReadOnly limits mounting to read-only.
   381  	ReadOnly bool
   382  }
   383  
   384  // TestEphemeral tests pod creation with one ephemeral volume.
   385  func (t EphemeralTest) TestEphemeral(ctx context.Context) {
   386  	client := t.Client
   387  	gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
   388  
   389  	ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
   390  	command := "sleep 10000"
   391  
   392  	var volumes []v1.VolumeSource
   393  	numVolumes := t.NumInlineVolumes
   394  	if numVolumes == 0 {
   395  		numVolumes = 1
   396  	}
   397  	for i := 0; i < numVolumes; i++ {
   398  		var volume v1.VolumeSource
   399  		switch {
   400  		case t.GetVolume != nil:
   401  			attributes, _, readOnly := t.GetVolume(i)
   402  			if readOnly && !t.ReadOnly {
   403  				e2eskipper.Skipf("inline ephemeral volume #%d is read-only, but the test needs a read/write volume", i)
   404  			}
   405  			volume = v1.VolumeSource{
   406  				CSI: &v1.CSIVolumeSource{
   407  					Driver:           t.DriverName,
   408  					VolumeAttributes: attributes,
   409  				},
   410  			}
   411  		case t.VolSource != nil:
   412  			volume = *t.VolSource
   413  		default:
   414  			framework.Failf("EphemeralTest has neither GetVolume nor VolSource")
   415  		}
   416  		volumes = append(volumes, volume)
   417  	}
   418  	pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
   419  	defer func() {
   420  		// pod might be nil now.
   421  		StopPodAndDependents(ctx, client, t.Timeouts, pod)
   422  	}()
   423  	framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume")
   424  	runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
   425  	framework.ExpectNoError(err, "get pod")
   426  	actualNodeName := runningPod.Spec.NodeName
   427  
   428  	// Run the checker of the running pod.
   429  	var runningPodData interface{}
   430  	if t.RunningPodCheck != nil {
   431  		runningPodData = t.RunningPodCheck(ctx, pod)
   432  	}
   433  
   434  	StopPodAndDependents(ctx, client, t.Timeouts, pod)
   435  	pod = nil // Don't stop twice.
   436  
   437  	// There should be no dangling PVCs in the namespace now. There might be for
   438  	// generic ephemeral volumes, if something went wrong...
   439  	pvcs, err := client.CoreV1().PersistentVolumeClaims(t.Namespace).List(ctx, metav1.ListOptions{})
   440  	framework.ExpectNoError(err, "list PVCs")
   441  	gomega.Expect(pvcs.Items).Should(gomega.BeEmpty(), "no dangling PVCs")
   442  
   443  	if t.StoppedPodCheck != nil {
   444  		t.StoppedPodCheck(ctx, actualNodeName, runningPodData)
   445  	}
   446  }
   447  
   448  // StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test-<number> directory.
   449  // The caller is responsible for checking the pod and deleting it.
   450  func StartInPodWithInlineVolume(ctx context.Context, c clientset.Interface, ns, podName, command string, volumes []v1.VolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod {
   451  	pod := &v1.Pod{
   452  		TypeMeta: metav1.TypeMeta{
   453  			Kind:       "Pod",
   454  			APIVersion: "v1",
   455  		},
   456  		ObjectMeta: metav1.ObjectMeta{
   457  			GenerateName: podName + "-",
   458  			Labels: map[string]string{
   459  				"app": podName,
   460  			},
   461  		},
   462  		Spec: v1.PodSpec{
   463  			Containers: []v1.Container{
   464  				{
   465  					Name:  "csi-volume-tester",
   466  					Image: e2epod.GetDefaultTestImage(),
   467  					// NOTE: /bin/sh works on both agnhost and busybox
   468  					Command: []string{"/bin/sh", "-c", command},
   469  				},
   470  			},
   471  			RestartPolicy: v1.RestartPolicyNever,
   472  		},
   473  	}
   474  	e2epod.SetNodeSelection(&pod.Spec, node)
   475  
   476  	for i, volume := range volumes {
   477  		name := fmt.Sprintf("my-volume-%d", i)
   478  		path := fmt.Sprintf("/mnt/test-%d", i)
   479  		if volume.Ephemeral != nil && volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != nil &&
   480  			*volume.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode == v1.PersistentVolumeBlock {
   481  			pod.Spec.Containers[0].VolumeDevices = append(pod.Spec.Containers[0].VolumeDevices,
   482  				v1.VolumeDevice{
   483  					Name:       name,
   484  					DevicePath: path,
   485  				})
   486  		} else {
   487  			pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
   488  				v1.VolumeMount{
   489  					Name:      name,
   490  					MountPath: path,
   491  					ReadOnly:  readOnly,
   492  				})
   493  		}
   494  		pod.Spec.Volumes = append(pod.Spec.Volumes,
   495  			v1.Volume{
   496  				Name:         name,
   497  				VolumeSource: volume,
   498  			})
   499  	}
   500  
   501  	pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   502  	framework.ExpectNoError(err, "failed to create pod")
   503  	return pod
   504  }
   505  
   506  // CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
   507  // It does that by trying to create a pod that uses that feature.
   508  func CSIInlineVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
   509  	return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{
   510  		CSI: &v1.CSIVolumeSource{
   511  			Driver: "no-such-driver.example.com",
   512  		},
   513  	})
   514  }
   515  
   516  // GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled.
   517  // It does that by trying to create a pod that uses that feature.
   518  func GenericEphemeralVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
   519  	storageClassName := "no-such-storage-class"
   520  	return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{
   521  		Ephemeral: &v1.EphemeralVolumeSource{
   522  			VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
   523  				Spec: v1.PersistentVolumeClaimSpec{
   524  					StorageClassName: &storageClassName,
   525  					AccessModes:      []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
   526  					Resources: v1.VolumeResourceRequirements{
   527  						Requests: v1.ResourceList{
   528  							v1.ResourceStorage: resource.MustParse("1Gi"),
   529  						},
   530  					},
   531  				},
   532  			},
   533  		},
   534  	})
   535  }
   536  
   537  // VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying
   538  // to create a pod that uses it.
   539  func VolumeSourceEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) {
   540  	pod := &v1.Pod{
   541  		TypeMeta: metav1.TypeMeta{
   542  			Kind:       "Pod",
   543  			APIVersion: "v1",
   544  		},
   545  		ObjectMeta: metav1.ObjectMeta{
   546  			GenerateName: "inline-volume-",
   547  		},
   548  		Spec: v1.PodSpec{
   549  			Containers: []v1.Container{
   550  				{
   551  					Name:  "volume-tester",
   552  					Image: "no-such-registry/no-such-image",
   553  					VolumeMounts: []v1.VolumeMount{
   554  						{
   555  							Name:      "my-volume",
   556  							MountPath: "/mnt/test",
   557  						},
   558  					},
   559  				},
   560  			},
   561  			RestartPolicy: v1.RestartPolicyNever,
   562  			Volumes: []v1.Volume{
   563  				{
   564  					Name:         "my-volume",
   565  					VolumeSource: volume,
   566  				},
   567  			},
   568  		},
   569  	}
   570  
   571  	pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   572  
   573  	switch {
   574  	case err == nil:
   575  		// Pod was created, feature supported.
   576  		StopPodAndDependents(ctx, c, t, pod)
   577  		return true, nil
   578  	case apierrors.IsInvalid(err):
   579  		// "Invalid" because it uses a feature that isn't supported.
   580  		return false, nil
   581  	default:
   582  		// Unexpected error.
   583  		return false, err
   584  	}
   585  }
   586  

View as plain text