...

Source file src/k8s.io/kubernetes/test/e2e/storage/testsuites/volumemode.go

Documentation: k8s.io/kubernetes/test/e2e/storage/testsuites

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path/filepath"
    23  	"strings"
    24  
    25  	"github.com/onsi/ginkgo/v2"
    26  	"github.com/onsi/gomega"
    27  
    28  	v1 "k8s.io/api/core/v1"
    29  	storagev1 "k8s.io/api/storage/v1"
    30  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    31  	"k8s.io/apimachinery/pkg/fields"
    32  	"k8s.io/apimachinery/pkg/util/errors"
    33  	clientset "k8s.io/client-go/kubernetes"
    34  	volevents "k8s.io/kubernetes/pkg/controller/volume/events"
    35  	"k8s.io/kubernetes/pkg/kubelet/events"
    36  	"k8s.io/kubernetes/test/e2e/framework"
    37  	e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
    38  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    39  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    40  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    41  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    42  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    43  	storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
    44  	admissionapi "k8s.io/pod-security-admission/api"
    45  )
    46  
    47  const (
    48  	noProvisioner = "kubernetes.io/no-provisioner"
    49  	pvNamePrefix  = "pv"
    50  )
    51  
    52  type volumeModeTestSuite struct {
    53  	tsInfo storageframework.TestSuiteInfo
    54  }
    55  
    56  var _ storageframework.TestSuite = &volumeModeTestSuite{}
    57  
    58  // InitCustomVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
    59  // using custom test patterns
    60  func InitCustomVolumeModeTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    61  	return &volumeModeTestSuite{
    62  		tsInfo: storageframework.TestSuiteInfo{
    63  			Name:         "volumeMode",
    64  			TestPatterns: patterns,
    65  			SupportedSizeRange: e2evolume.SizeRange{
    66  				Min: "1Mi",
    67  			},
    68  		},
    69  	}
    70  }
    71  
    72  // InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
    73  // using testsuite default patterns
    74  func InitVolumeModeTestSuite() storageframework.TestSuite {
    75  	patterns := []storageframework.TestPattern{
    76  		storageframework.FsVolModePreprovisionedPV,
    77  		storageframework.FsVolModeDynamicPV,
    78  		storageframework.BlockVolModePreprovisionedPV,
    79  		storageframework.BlockVolModeDynamicPV,
    80  	}
    81  	return InitCustomVolumeModeTestSuite(patterns)
    82  }
    83  
    84  func (t *volumeModeTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    85  	return t.tsInfo
    86  }
    87  
    88  func (t *volumeModeTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    89  }
    90  
    91  func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    92  	type local struct {
    93  		config        *storageframework.PerTestConfig
    94  		driverCleanup func()
    95  
    96  		cs clientset.Interface
    97  		ns *v1.Namespace
    98  		// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
    99  		storageframework.VolumeResource
   100  
   101  		migrationCheck *migrationOpCheck
   102  	}
   103  	var (
   104  		dInfo = driver.GetDriverInfo()
   105  		l     local
   106  	)
   107  
   108  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
   109  	// f must run inside an It or Context callback.
   110  	f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageframework.GetDriverTimeouts(driver))
   111  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   112  
   113  	init := func(ctx context.Context) {
   114  		l = local{}
   115  		l.ns = f.Namespace
   116  		l.cs = f.ClientSet
   117  
   118  		// Now do the more expensive test initialization.
   119  		l.config = driver.PrepareTest(ctx, f)
   120  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
   121  	}
   122  
   123  	// manualInit initializes l.VolumeResource without creating the PV & PVC objects.
   124  	manualInit := func(ctx context.Context) {
   125  		init(ctx)
   126  
   127  		fsType := pattern.FsType
   128  		volBindMode := storagev1.VolumeBindingImmediate
   129  
   130  		var (
   131  			scName             string
   132  			pvSource           *v1.PersistentVolumeSource
   133  			volumeNodeAffinity *v1.VolumeNodeAffinity
   134  		)
   135  
   136  		l.VolumeResource = storageframework.VolumeResource{
   137  			Config:  l.config,
   138  			Pattern: pattern,
   139  		}
   140  
   141  		// Create volume for pre-provisioned volume tests
   142  		l.Volume = storageframework.CreateVolume(ctx, driver, l.config, pattern.VolType)
   143  
   144  		switch pattern.VolType {
   145  		case storageframework.PreprovisionedPV:
   146  			if pattern.VolMode == v1.PersistentVolumeBlock {
   147  				scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
   148  			} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
   149  				scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
   150  			}
   151  			if pDriver, ok := driver.(storageframework.PreprovisionedPVTestDriver); ok {
   152  				pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume)
   153  				if pvSource == nil {
   154  					e2eskipper.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
   155  				}
   156  
   157  				storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity)
   158  				l.Sc = storageClass
   159  				l.Pv = e2epv.MakePersistentVolume(pvConfig)
   160  				l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
   161  			}
   162  		case storageframework.DynamicPV:
   163  			if dDriver, ok := driver.(storageframework.DynamicPVTestDriver); ok {
   164  				l.Sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, fsType)
   165  				if l.Sc == nil {
   166  					e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
   167  				}
   168  				l.Sc.VolumeBindingMode = &volBindMode
   169  				testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
   170  				driverVolumeSizeRange := dInfo.SupportedSizeRange
   171  				claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
   172  				framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
   173  
   174  				l.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
   175  					ClaimSize:        claimSize,
   176  					StorageClassName: &(l.Sc.Name),
   177  					VolumeMode:       &pattern.VolMode,
   178  				}, l.ns.Name)
   179  			}
   180  		default:
   181  			framework.Failf("Volume mode test doesn't support: %s", pattern.VolType)
   182  		}
   183  	}
   184  
   185  	cleanup := func(ctx context.Context) {
   186  		var errs []error
   187  		errs = append(errs, l.CleanupResource(ctx))
   188  		errs = append(errs, storageutils.TryFunc(l.driverCleanup))
   189  		l.driverCleanup = nil
   190  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   191  		l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
   192  	}
   193  
   194  	// We register different tests depending on the drive
   195  	isBlockSupported := dInfo.Capabilities[storageframework.CapBlock]
   196  	switch pattern.VolType {
   197  	case storageframework.PreprovisionedPV:
   198  		if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
   199  			f.It("should fail to create pod by failing to mount volume", f.WithSlow(), func(ctx context.Context) {
   200  				manualInit(ctx)
   201  				ginkgo.DeferCleanup(cleanup)
   202  
   203  				var err error
   204  
   205  				ginkgo.By("Creating sc")
   206  				l.Sc, err = l.cs.StorageV1().StorageClasses().Create(ctx, l.Sc, metav1.CreateOptions{})
   207  				framework.ExpectNoError(err, "Failed to create sc")
   208  
   209  				ginkgo.By("Creating pv and pvc")
   210  				l.Pv, err = l.cs.CoreV1().PersistentVolumes().Create(ctx, l.Pv, metav1.CreateOptions{})
   211  				framework.ExpectNoError(err, "Failed to create pv")
   212  
   213  				// Prebind pv
   214  				l.Pvc.Spec.VolumeName = l.Pv.Name
   215  				l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, l.Pvc, metav1.CreateOptions{})
   216  				framework.ExpectNoError(err, "Failed to create pvc")
   217  
   218  				framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
   219  
   220  				ginkgo.By("Creating pod")
   221  				podConfig := e2epod.Config{
   222  					NS:            l.ns.Name,
   223  					PVCs:          []*v1.PersistentVolumeClaim{l.Pvc},
   224  					SeLinuxLabel:  e2epod.GetLinuxLabel(),
   225  					NodeSelection: l.config.ClientNodeSelection,
   226  					ImageID:       e2epod.GetDefaultTestImageID(),
   227  				}
   228  				pod, err := e2epod.MakeSecPod(&podConfig)
   229  				framework.ExpectNoError(err, "Failed to create pod")
   230  
   231  				pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{})
   232  				framework.ExpectNoError(err, "Failed to create pod")
   233  				defer func() {
   234  					framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod), "Failed to delete pod")
   235  				}()
   236  
   237  				eventSelector := fields.Set{
   238  					"involvedObject.kind":      "Pod",
   239  					"involvedObject.name":      pod.Name,
   240  					"involvedObject.namespace": l.ns.Name,
   241  					"reason":                   events.FailedMountVolume,
   242  				}.AsSelector().String()
   243  				msg := "Unable to attach or mount volumes"
   244  
   245  				err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
   246  				// Events are unreliable, don't depend on the event. It's used only to speed up the test.
   247  				if err != nil {
   248  					framework.Logf("Warning: did not get event about FailedMountVolume")
   249  				}
   250  
   251  				// Check the pod is still not running
   252  				p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   253  				framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
   254  				gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
   255  			})
   256  		}
   257  
   258  	case storageframework.DynamicPV:
   259  		if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
   260  			f.It("should fail in binding dynamic provisioned PV to PVC", f.WithSlow(), "[LinuxOnly]", func(ctx context.Context) {
   261  				manualInit(ctx)
   262  				ginkgo.DeferCleanup(cleanup)
   263  
   264  				var err error
   265  
   266  				ginkgo.By("Creating sc")
   267  				l.Sc, err = l.cs.StorageV1().StorageClasses().Create(ctx, l.Sc, metav1.CreateOptions{})
   268  				framework.ExpectNoError(err, "Failed to create sc")
   269  
   270  				ginkgo.By("Creating pv and pvc")
   271  				l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(ctx, l.Pvc, metav1.CreateOptions{})
   272  				framework.ExpectNoError(err, "Failed to create pvc")
   273  
   274  				eventSelector := fields.Set{
   275  					"involvedObject.kind":      "PersistentVolumeClaim",
   276  					"involvedObject.name":      l.Pvc.Name,
   277  					"involvedObject.namespace": l.ns.Name,
   278  					"reason":                   volevents.ProvisioningFailed,
   279  				}.AsSelector().String()
   280  				// The error message is different for each storage driver
   281  				msg := ""
   282  
   283  				err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
   284  				// Events are unreliable, don't depend on the event. It's used only to speed up the test.
   285  				if err != nil {
   286  					framework.Logf("Warning: did not get event about provisioning failed")
   287  				}
   288  
   289  				// Check the pvc is still pending
   290  				pvc, err := l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Get(ctx, l.Pvc.Name, metav1.GetOptions{})
   291  				framework.ExpectNoError(err, "Failed to re-read the pvc after event (or timeout)")
   292  				gomega.Expect(pvc.Status.Phase).To(gomega.Equal(v1.ClaimPending), "PVC phase isn't pending")
   293  			})
   294  		}
   295  	default:
   296  		framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType)
   297  	}
   298  
   299  	f.It("should fail to use a volume in a pod with mismatched mode", f.WithSlow(), func(ctx context.Context) {
   300  		skipTestIfBlockNotSupported(driver)
   301  		init(ctx)
   302  		testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
   303  		l.VolumeResource = *storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange)
   304  		ginkgo.DeferCleanup(cleanup)
   305  
   306  		ginkgo.By("Creating pod")
   307  		var err error
   308  		podConfig := e2epod.Config{
   309  			NS:           l.ns.Name,
   310  			PVCs:         []*v1.PersistentVolumeClaim{l.Pvc},
   311  			SeLinuxLabel: e2epod.GetLinuxLabel(),
   312  			ImageID:      e2epod.GetDefaultTestImageID(),
   313  		}
   314  		pod, err := e2epod.MakeSecPod(&podConfig)
   315  		framework.ExpectNoError(err)
   316  
   317  		// Change volumeMounts to volumeDevices and the other way around
   318  		pod = swapVolumeMode(pod)
   319  
   320  		// Run the pod
   321  		pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{})
   322  		framework.ExpectNoError(err, "Failed to create pod")
   323  		defer func() {
   324  			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod), "Failed to delete pod")
   325  		}()
   326  
   327  		ginkgo.By("Waiting for the pod to fail")
   328  		// Wait for an event that the pod is invalid.
   329  		eventSelector := fields.Set{
   330  			"involvedObject.kind":      "Pod",
   331  			"involvedObject.name":      pod.Name,
   332  			"involvedObject.namespace": l.ns.Name,
   333  			"reason":                   events.FailedMountVolume,
   334  		}.AsSelector().String()
   335  
   336  		var msg string
   337  		if pattern.VolMode == v1.PersistentVolumeBlock {
   338  			msg = "has volumeMode Block, but is specified in volumeMounts"
   339  		} else {
   340  			msg = "has volumeMode Filesystem, but is specified in volumeDevices"
   341  		}
   342  		err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
   343  		// Events are unreliable, don't depend on them. They're used only to speed up the test.
   344  		if err != nil {
   345  			framework.Logf("Warning: did not get event about mismatched volume use")
   346  		}
   347  
   348  		// Check the pod is still not running
   349  		p, err := l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   350  		framework.ExpectNoError(err, "could not re-read the pod after event (or timeout)")
   351  		gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodPending), "Pod phase isn't pending")
   352  	})
   353  
   354  	ginkgo.It("should not mount / map unused volumes in a pod [LinuxOnly]", func(ctx context.Context) {
   355  		if pattern.VolMode == v1.PersistentVolumeBlock {
   356  			skipTestIfBlockNotSupported(driver)
   357  		}
   358  		init(ctx)
   359  		testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
   360  		l.VolumeResource = *storageframework.CreateVolumeResource(ctx, driver, l.config, pattern, testVolumeSizeRange)
   361  		ginkgo.DeferCleanup(cleanup)
   362  
   363  		ginkgo.By("Creating pod")
   364  		var err error
   365  		podConfig := e2epod.Config{
   366  			NS:           l.ns.Name,
   367  			PVCs:         []*v1.PersistentVolumeClaim{l.Pvc},
   368  			SeLinuxLabel: e2epod.GetLinuxLabel(),
   369  			ImageID:      e2epod.GetDefaultTestImageID(),
   370  		}
   371  		pod, err := e2epod.MakeSecPod(&podConfig)
   372  		framework.ExpectNoError(err)
   373  
   374  		for i := range pod.Spec.Containers {
   375  			pod.Spec.Containers[i].VolumeDevices = nil
   376  			pod.Spec.Containers[i].VolumeMounts = nil
   377  		}
   378  
   379  		// Run the pod
   380  		pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(ctx, pod, metav1.CreateOptions{})
   381  		framework.ExpectNoError(err)
   382  		defer func() {
   383  			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, l.cs, pod))
   384  		}()
   385  
   386  		err = e2epod.WaitForPodNameRunningInNamespace(ctx, l.cs, pod.Name, pod.Namespace)
   387  		framework.ExpectNoError(err)
   388  
   389  		// Reload the pod to get its node
   390  		pod, err = l.cs.CoreV1().Pods(l.ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
   391  		framework.ExpectNoError(err)
   392  		gomega.Expect(pod.Spec.NodeName).ToNot(gomega.BeEmpty(), "pod should be scheduled to a node")
   393  		node, err := l.cs.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
   394  		framework.ExpectNoError(err)
   395  
   396  		ginkgo.By("Listing mounted volumes in the pod")
   397  		hostExec := storageutils.NewHostExec(f)
   398  		ginkgo.DeferCleanup(hostExec.Cleanup)
   399  		volumePaths, devicePaths, err := listPodVolumePluginDirectory(ctx, hostExec, pod, node)
   400  		framework.ExpectNoError(err)
   401  
   402  		driverInfo := driver.GetDriverInfo()
   403  		volumePlugin := driverInfo.InTreePluginName
   404  		if len(volumePlugin) == 0 {
   405  			// TODO: check if it's a CSI volume first
   406  			volumePlugin = "kubernetes.io/csi"
   407  		}
   408  		ginkgo.By(fmt.Sprintf("Checking that volume plugin %s is not used in pod directory", volumePlugin))
   409  		safeVolumePlugin := strings.ReplaceAll(volumePlugin, "/", "~")
   410  		for _, path := range volumePaths {
   411  			gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be mounted into pod directory", volumePlugin))
   412  		}
   413  		for _, path := range devicePaths {
   414  			gomega.Expect(path).NotTo(gomega.ContainSubstring(safeVolumePlugin), fmt.Sprintf("no %s volume should be symlinked into pod directory", volumePlugin))
   415  		}
   416  	})
   417  }
   418  
   419  func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode,
   420  	volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource, volumeNodeAffinity *v1.VolumeNodeAffinity) (*storagev1.StorageClass,
   421  	e2epv.PersistentVolumeConfig, e2epv.PersistentVolumeClaimConfig) {
   422  	// StorageClass
   423  	scConfig := &storagev1.StorageClass{
   424  		ObjectMeta: metav1.ObjectMeta{
   425  			Name: scName,
   426  		},
   427  		Provisioner:       noProvisioner,
   428  		VolumeBindingMode: &volBindMode,
   429  	}
   430  	// PV
   431  	pvConfig := e2epv.PersistentVolumeConfig{
   432  		PVSource:         pvSource,
   433  		NodeAffinity:     volumeNodeAffinity,
   434  		NamePrefix:       pvNamePrefix,
   435  		StorageClassName: scName,
   436  		VolumeMode:       &volMode,
   437  	}
   438  	// PVC
   439  	pvcConfig := e2epv.PersistentVolumeClaimConfig{
   440  		AccessModes:      []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
   441  		StorageClassName: &scName,
   442  		VolumeMode:       &volMode,
   443  	}
   444  
   445  	return scConfig, pvConfig, pvcConfig
   446  }
   447  
   448  // swapVolumeMode changes volumeMounts to volumeDevices and the other way around
   449  func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod {
   450  	pod := podTemplate.DeepCopy()
   451  	for c := range pod.Spec.Containers {
   452  		container := &pod.Spec.Containers[c]
   453  		container.VolumeDevices = []v1.VolumeDevice{}
   454  		container.VolumeMounts = []v1.VolumeMount{}
   455  
   456  		// Change VolumeMounts to VolumeDevices
   457  		for _, volumeMount := range podTemplate.Spec.Containers[c].VolumeMounts {
   458  			container.VolumeDevices = append(container.VolumeDevices, v1.VolumeDevice{
   459  				Name:       volumeMount.Name,
   460  				DevicePath: volumeMount.MountPath,
   461  			})
   462  		}
   463  		// Change VolumeDevices to VolumeMounts
   464  		for _, volumeDevice := range podTemplate.Spec.Containers[c].VolumeDevices {
   465  			container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
   466  				Name:      volumeDevice.Name,
   467  				MountPath: volumeDevice.DevicePath,
   468  			})
   469  		}
   470  	}
   471  	return pod
   472  }
   473  
   474  // listPodVolumePluginDirectory returns all volumes in /var/lib/kubelet/pods/<pod UID>/volumes/* and
   475  // /var/lib/kubelet/pods/<pod UID>/volumeDevices/*
   476  // Sample output:
   477  //
   478  //	/var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
   479  //	/var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
   480  func listPodVolumePluginDirectory(ctx context.Context, h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
   481  	mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
   482  	devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
   483  
   484  	mounts, err = listPodDirectory(ctx, h, mountPath, node)
   485  	if err != nil {
   486  		return nil, nil, err
   487  	}
   488  	devices, err = listPodDirectory(ctx, h, devicePath, node)
   489  	if err != nil {
   490  		return nil, nil, err
   491  	}
   492  	return mounts, devices, nil
   493  }
   494  
   495  func listPodDirectory(ctx context.Context, h storageutils.HostExec, path string, node *v1.Node) ([]string, error) {
   496  	// Return no error if the directory does not exist (e.g. there are no block volumes used)
   497  	_, err := h.IssueCommandWithResult(ctx, "test ! -d "+path, node)
   498  	if err == nil {
   499  		// The directory does not exist
   500  		return nil, nil
   501  	}
   502  	// The directory either exists or a real error happened (e.g. "access denied").
   503  	// Ignore the error, "find" will hit the error again and we report it there.
   504  
   505  	// Inside /var/lib/kubelet/pods/<pod>/volumes, look for <volume_plugin>/<volume-name>, hence depth 2
   506  	cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
   507  	out, err := h.IssueCommandWithResult(ctx, cmd, node)
   508  	if err != nil {
   509  		return nil, fmt.Errorf("error checking directory %s on node %s: %w", path, node.Name, err)
   510  	}
   511  	return strings.Split(out, "\n"), nil
   512  }
   513  

View as plain text