...

Source file src/k8s.io/kubernetes/test/e2e/storage/persistent_volumes.go

Documentation: k8s.io/kubernetes/test/e2e/storage

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package storage
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strings"
    23  	"time"
    24  
    25  	appsv1 "k8s.io/api/apps/v1"
    26  	v1 "k8s.io/api/core/v1"
    27  	storagev1 "k8s.io/api/storage/v1"
    28  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    29  	"k8s.io/apimachinery/pkg/labels"
    30  	"k8s.io/apimachinery/pkg/runtime"
    31  	"k8s.io/apimachinery/pkg/runtime/schema"
    32  	types "k8s.io/apimachinery/pkg/types"
    33  	utilerrors "k8s.io/apimachinery/pkg/util/errors"
    34  	"k8s.io/apimachinery/pkg/util/uuid"
    35  	clientset "k8s.io/client-go/kubernetes"
    36  	"k8s.io/client-go/util/retry"
    37  	"k8s.io/kubernetes/test/e2e/feature"
    38  	"k8s.io/kubernetes/test/e2e/framework"
    39  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    40  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    41  	e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
    42  	e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
    43  	"k8s.io/kubernetes/test/e2e/storage/utils"
    44  	imageutils "k8s.io/kubernetes/test/utils/image"
    45  	admissionapi "k8s.io/pod-security-admission/api"
    46  
    47  	"github.com/onsi/ginkgo/v2"
    48  	"github.com/onsi/gomega"
    49  	"github.com/onsi/gomega/gstruct"
    50  )
    51  
    52  // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
    53  // phase. Note: the PV is deleted in the AfterEach, not here.
    54  func completeTest(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
    55  	// 1. verify that the PV and PVC have bound correctly
    56  	ginkgo.By("Validating the PV-PVC binding")
    57  	framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc))
    58  
    59  	// 2. create the nfs writer pod, test if the write was successful,
    60  	//    then delete the pod and verify that it was deleted
    61  	ginkgo.By("Checking pod has write access to PersistentVolume")
    62  	framework.ExpectNoError(createWaitAndDeletePod(ctx, c, f.Timeouts, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
    63  
    64  	// 3. delete the PVC, wait for PV to become "Released"
    65  	ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
    66  	framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased))
    67  }
    68  
    69  // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
    70  // PV. Ensure each step succeeds.
    71  // Note: the PV is deleted in the AfterEach, not here.
    72  // Note: this func is serialized, we wait for each pod to be deleted before creating the
    73  //
    74  //	next pod. Adding concurrency is a TODO item.
    75  func completeMultiTest(ctx context.Context, f *framework.Framework, c clientset.Interface, ns string, pvols e2epv.PVMap, claims e2epv.PVCMap, expectPhase v1.PersistentVolumePhase) error {
    76  	var err error
    77  
    78  	// 1. verify each PV permits write access to a client pod
    79  	ginkgo.By("Checking pod has write access to PersistentVolumes")
    80  	for pvcKey := range claims {
    81  		pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{})
    82  		if err != nil {
    83  			return fmt.Errorf("error getting pvc %q: %w", pvcKey.Name, err)
    84  		}
    85  		if len(pvc.Spec.VolumeName) == 0 {
    86  			continue // claim is not bound
    87  		}
    88  		// sanity test to ensure our maps are in sync
    89  		_, found := pvols[pvc.Spec.VolumeName]
    90  		if !found {
    91  			return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
    92  		}
    93  		// TODO: currently a serialized test of each PV
    94  		if err = createWaitAndDeletePod(ctx, c, f.Timeouts, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
    95  			return err
    96  		}
    97  	}
    98  
    99  	// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
   100  	ginkgo.By("Deleting PVCs to invoke reclaim policy")
   101  	if err = e2epv.DeletePVCandValidatePVGroup(ctx, c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil {
   102  		return err
   103  	}
   104  	return nil
   105  }
   106  
   107  var _ = utils.SIGDescribe("PersistentVolumes", func() {
   108  
   109  	// global vars for the ginkgo.Context()s and ginkgo.It()'s below
   110  	f := framework.NewDefaultFramework("pv")
   111  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   112  	var (
   113  		c         clientset.Interface
   114  		ns        string
   115  		pvConfig  e2epv.PersistentVolumeConfig
   116  		pvcConfig e2epv.PersistentVolumeClaimConfig
   117  		volLabel  labels.Set
   118  		selector  *metav1.LabelSelector
   119  		pv        *v1.PersistentVolume
   120  		pvc       *v1.PersistentVolumeClaim
   121  		err       error
   122  	)
   123  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   124  
   125  	ginkgo.BeforeEach(func() {
   126  		c = f.ClientSet
   127  		ns = f.Namespace.Name
   128  		// Enforce binding only within test space via selector labels
   129  		volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
   130  		selector = metav1.SetAsLabelSelector(volLabel)
   131  	})
   132  
   133  	// Testing configurations of a single a PV/PVC pair, multiple evenly paired PVs/PVCs,
   134  	// and multiple unevenly paired PV/PVCs
   135  	ginkgo.Describe("NFS", func() {
   136  
   137  		var (
   138  			nfsServerPod *v1.Pod
   139  			serverHost   string
   140  		)
   141  
   142  		ginkgo.BeforeEach(func(ctx context.Context) {
   143  			_, nfsServerPod, serverHost = e2evolume.NewNFSServer(ctx, c, ns, []string{"-G", "777", "/exports"})
   144  			pvConfig = e2epv.PersistentVolumeConfig{
   145  				NamePrefix: "nfs-",
   146  				Labels:     volLabel,
   147  				PVSource: v1.PersistentVolumeSource{
   148  					NFS: &v1.NFSVolumeSource{
   149  						Server:   serverHost,
   150  						Path:     "/exports",
   151  						ReadOnly: false,
   152  					},
   153  				},
   154  			}
   155  			emptyStorageClass := ""
   156  			pvcConfig = e2epv.PersistentVolumeClaimConfig{
   157  				Selector:         selector,
   158  				StorageClassName: &emptyStorageClass,
   159  			}
   160  		})
   161  
   162  		ginkgo.AfterEach(func(ctx context.Context) {
   163  			framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, nfsServerPod), "AfterEach: Failed to delete pod ", nfsServerPod.Name)
   164  			pv, pvc = nil, nil
   165  			pvConfig, pvcConfig = e2epv.PersistentVolumeConfig{}, e2epv.PersistentVolumeClaimConfig{}
   166  		})
   167  
   168  		ginkgo.Context("with Single PV - PVC pairs", func() {
   169  			// Note: this is the only code where the pv is deleted.
   170  			ginkgo.AfterEach(func(ctx context.Context) {
   171  				framework.Logf("AfterEach: Cleaning up test resources.")
   172  				if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv, pvc); len(errs) > 0 {
   173  					framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
   174  				}
   175  			})
   176  
   177  			// Individual tests follow:
   178  			//
   179  			// Create an nfs PV, then a claim that matches the PV, and a pod that
   180  			// contains the claim. Verify that the PV and PVC bind correctly, and
   181  			// that the pod can write to the nfs volume.
   182  			ginkgo.It("should create a non-pre-bound PV and PVC: test write access ", func(ctx context.Context) {
   183  				pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false)
   184  				framework.ExpectNoError(err)
   185  				completeTest(ctx, f, c, ns, pv, pvc)
   186  			})
   187  
   188  			// Create a claim first, then a nfs PV that matches the claim, and a
   189  			// pod that contains the claim. Verify that the PV and PVC bind
   190  			// correctly, and that the pod can write to the nfs volume.
   191  			ginkgo.It("create a PVC and non-pre-bound PV: test write access", func(ctx context.Context) {
   192  				pv, pvc, err = e2epv.CreatePVCPV(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false)
   193  				framework.ExpectNoError(err)
   194  				completeTest(ctx, f, c, ns, pv, pvc)
   195  			})
   196  
   197  			// Create a claim first, then a pre-bound nfs PV that matches the claim,
   198  			// and a pod that contains the claim. Verify that the PV and PVC bind
   199  			// correctly, and that the pod can write to the nfs volume.
   200  			ginkgo.It("create a PVC and a pre-bound PV: test write access", func(ctx context.Context) {
   201  				pv, pvc, err = e2epv.CreatePVCPV(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true)
   202  				framework.ExpectNoError(err)
   203  				completeTest(ctx, f, c, ns, pv, pvc)
   204  			})
   205  
   206  			// Create a nfs PV first, then a pre-bound PVC that matches the PV,
   207  			// and a pod that contains the claim. Verify that the PV and PVC bind
   208  			// correctly, and that the pod can write to the nfs volume.
   209  			ginkgo.It("create a PV and a pre-bound PVC: test write access", func(ctx context.Context) {
   210  				pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true)
   211  				framework.ExpectNoError(err)
   212  				completeTest(ctx, f, c, ns, pv, pvc)
   213  			})
   214  
   215  			// Create new PV without claim, verify it's in Available state and LastPhaseTransitionTime is set.
   216  			f.It("create a PV: test phase transition timestamp is set and phase is Available", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) {
   217  				pvObj := e2epv.MakePersistentVolume(pvConfig)
   218  				pv, err = e2epv.CreatePV(ctx, c, f.Timeouts, pvObj)
   219  				framework.ExpectNoError(err)
   220  
   221  				// The new PV should transition phase to: Available
   222  				err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeAvailable, c, pv.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
   223  				framework.ExpectNoError(err)
   224  
   225  				// Verify that new PV has phase transition timestamp set.
   226  				pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
   227  				framework.ExpectNoError(err)
   228  				if pv.Status.LastPhaseTransitionTime == nil {
   229  					framework.Failf("New persistent volume %v should have LastPhaseTransitionTime value set, but it's nil.", pv.GetName())
   230  				}
   231  			})
   232  
   233  			// Create PV and pre-bound PVC that matches the PV, verify that when PV and PVC bind
   234  			// the LastPhaseTransitionTime filed of the PV is updated.
   235  			f.It("create a PV and a pre-bound PVC: test phase transition timestamp is set", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) {
   236  				pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true)
   237  				framework.ExpectNoError(err)
   238  
   239  				// The claim should transition phase to: Bound
   240  				err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
   241  				framework.ExpectNoError(err)
   242  				pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
   243  				framework.ExpectNoError(err)
   244  				pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
   245  				framework.ExpectNoError(err)
   246  				if pv.Status.LastPhaseTransitionTime == nil {
   247  					framework.Failf("Persistent volume %v should have LastPhaseTransitionTime value set after transitioning phase, but it's nil.", pv.GetName())
   248  				}
   249  				completeTest(ctx, f, c, ns, pv, pvc)
   250  			})
   251  
   252  			// Create PV and pre-bound PVC that matches the PV, verify that when PV and PVC bind
   253  			// the LastPhaseTransitionTime field of the PV is set, then delete the PVC to change PV phase to
   254  			// released and validate PV LastPhaseTransitionTime correctly updated timestamp.
   255  			f.It("create a PV and a pre-bound PVC: test phase transition timestamp multiple updates", feature.PersistentVolumeLastPhaseTransitionTime, func(ctx context.Context) {
   256  				pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, true)
   257  				framework.ExpectNoError(err)
   258  
   259  				// The claim should transition phase to: Bound.
   260  				err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
   261  				framework.ExpectNoError(err)
   262  				pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
   263  				framework.ExpectNoError(err)
   264  				pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
   265  				framework.ExpectNoError(err)
   266  
   267  				// Save first phase transition time.
   268  				firstPhaseTransition := pv.Status.LastPhaseTransitionTime
   269  
   270  				// Let test finish and delete PVC.
   271  				completeTest(ctx, f, c, ns, pv, pvc)
   272  
   273  				// The claim should transition phase to: Released.
   274  				err = e2epv.WaitForPersistentVolumePhase(ctx, v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
   275  				framework.ExpectNoError(err)
   276  
   277  				// Verify the phase transition timestamp got updated chronologically *after* first phase transition.
   278  				pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{})
   279  				if !firstPhaseTransition.Before(pv.Status.LastPhaseTransitionTime) {
   280  					framework.Failf("Persistent volume %v should have LastPhaseTransitionTime value updated to be chronologically after previous phase change: %v, but it's %v.", pv.GetName(), firstPhaseTransition, pv.Status.LastPhaseTransitionTime)
   281  				}
   282  			})
   283  		})
   284  
   285  		// Create multiple pvs and pvcs, all in the same namespace. The PVs-PVCs are
   286  		// verified to bind, though it's not known in advanced which PV will bind to
   287  		// which claim. For each pv-pvc pair create a pod that writes to the nfs mount.
   288  		// Note: when the number of PVs exceeds the number of PVCs the max binding wait
   289  		//   time will occur for each PV in excess. This is expected but the delta
   290  		//   should be kept small so that the tests aren't unnecessarily slow.
   291  		// Note: future tests may wish to incorporate the following:
   292  		//   a) pre-binding, b) create pvcs before pvs, c) create pvcs and pods
   293  		//   in different namespaces.
   294  		ginkgo.Context("with multiple PVs and PVCs all in same ns", func() {
   295  
   296  			// scope the pv and pvc maps to be available in the AfterEach
   297  			// note: these maps are created fresh in CreatePVsPVCs()
   298  			var pvols e2epv.PVMap
   299  			var claims e2epv.PVCMap
   300  
   301  			ginkgo.AfterEach(func(ctx context.Context) {
   302  				framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
   303  				errs := e2epv.PVPVCMapCleanup(ctx, c, ns, pvols, claims)
   304  				if len(errs) > 0 {
   305  					errmsg := []string{}
   306  					for _, e := range errs {
   307  						errmsg = append(errmsg, e.Error())
   308  					}
   309  					framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
   310  				}
   311  			})
   312  
   313  			// Create 2 PVs and 4 PVCs.
   314  			// Note: PVs are created before claims and no pre-binding
   315  			ginkgo.It("should create 2 PVs and 4 PVCs: test write access", func(ctx context.Context) {
   316  				numPVs, numPVCs := 2, 4
   317  				pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
   318  				framework.ExpectNoError(err)
   319  				framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true))
   320  				framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased))
   321  			})
   322  
   323  			// Create 3 PVs and 3 PVCs.
   324  			// Note: PVs are created before claims and no pre-binding
   325  			ginkgo.It("should create 3 PVs and 3 PVCs: test write access", func(ctx context.Context) {
   326  				numPVs, numPVCs := 3, 3
   327  				pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
   328  				framework.ExpectNoError(err)
   329  				framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true))
   330  				framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased))
   331  			})
   332  
   333  			// Create 4 PVs and 2 PVCs.
   334  			// Note: PVs are created before claims and no pre-binding.
   335  			f.It("should create 4 PVs and 2 PVCs: test write access", f.WithSlow(), func(ctx context.Context) {
   336  				numPVs, numPVCs := 4, 2
   337  				pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvConfig, pvcConfig)
   338  				framework.ExpectNoError(err)
   339  				framework.ExpectNoError(e2epv.WaitAndVerifyBinds(ctx, c, f.Timeouts, ns, pvols, claims, true))
   340  				framework.ExpectNoError(completeMultiTest(ctx, f, c, ns, pvols, claims, v1.VolumeReleased))
   341  			})
   342  		})
   343  
   344  		// This Context isolates and tests the "Recycle" reclaim behavior.  On deprecation of the
   345  		// Recycler, this entire context can be removed without affecting the test suite or leaving behind
   346  		// dead code.
   347  		ginkgo.Context("when invoking the Recycle reclaim policy", func() {
   348  			ginkgo.BeforeEach(func(ctx context.Context) {
   349  				pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
   350  				pv, pvc, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig, pvcConfig, ns, false)
   351  				framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
   352  				framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
   353  			})
   354  
   355  			ginkgo.AfterEach(func(ctx context.Context) {
   356  				framework.Logf("AfterEach: Cleaning up test resources.")
   357  				if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv, pvc); len(errs) > 0 {
   358  					framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
   359  				}
   360  			})
   361  
   362  			// This ginkgo.It() tests a scenario where a PV is written to by a Pod, recycled, then the volume checked
   363  			// for files. If files are found, the checking Pod fails, failing the test.  Otherwise, the pod
   364  			// (and test) succeed.
   365  			ginkgo.It("should test that a PV becomes Available and is clean after the PVC is deleted.", func(ctx context.Context) {
   366  				ginkgo.By("Writing to the volume.")
   367  				pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
   368  				pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   369  				framework.ExpectNoError(err)
   370  				framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
   371  
   372  				ginkgo.By("Deleting the claim")
   373  				framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))
   374  				framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
   375  
   376  				ginkgo.By("Re-mounting the volume.")
   377  				pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
   378  				pvc, err = e2epv.CreatePVC(ctx, c, ns, pvc)
   379  				framework.ExpectNoError(err)
   380  				framework.ExpectNoError(e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, 60*time.Second), "Failed to reach 'Bound' for PVC ", pvc.Name)
   381  
   382  				// If a file is detected in /mnt, fail the pod and do not restart it.
   383  				ginkgo.By("Verifying the mount has been cleaned.")
   384  				mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
   385  				pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, f.NamespacePodSecurityLevel, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
   386  				pod, err = c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   387  				framework.ExpectNoError(err)
   388  				framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, f.Timeouts.PodStart))
   389  
   390  				framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))
   391  				framework.Logf("Pod exited without failure; the volume has been recycled.")
   392  
   393  				// Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup.
   394  				framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.")
   395  				framework.ExpectNoError(e2epv.DeletePVCandValidatePV(ctx, c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
   396  				pvc = nil
   397  			})
   398  		})
   399  	})
   400  
   401  	ginkgo.Describe("CSI Conformance", func() {
   402  
   403  		var pvols e2epv.PVMap
   404  		var claims e2epv.PVCMap
   405  
   406  		ginkgo.AfterEach(func(ctx context.Context) {
   407  			framework.Logf("AfterEach: deleting %v PVCs and %v PVs...", len(claims), len(pvols))
   408  			errs := e2epv.PVPVCMapCleanup(ctx, c, ns, pvols, claims)
   409  			if len(errs) > 0 {
   410  				errmsg := []string{}
   411  				for _, e := range errs {
   412  					errmsg = append(errmsg, e.Error())
   413  				}
   414  				framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; "))
   415  			}
   416  		})
   417  
   418  		/*
   419  			Release: v1.29
   420  			Testname: PersistentVolumes(Claims), lifecycle
   421  			Description: Creating PV and PVC MUST succeed. Listing PVs with a labelSelector
   422  			MUST succeed. Listing PVCs in a namespace MUST succeed. Patching a PV MUST succeed
   423  			with its new label found. Patching a PVC MUST succeed with its new label found.
   424  			Reading a PV and PVC MUST succeed with required UID retrieved. Deleting a PVC
   425  			and PV MUST succeed and it MUST be confirmed. Replacement PV and PVC MUST be created.
   426  			Updating a PV MUST succeed with its new label found. Updating a PVC MUST succeed
   427  			with its new label found. Deleting the PVC and PV via deleteCollection MUST succeed
   428  			and it MUST be confirmed.
   429  		*/
   430  		framework.ConformanceIt("should run through the lifecycle of a PV and a PVC", func(ctx context.Context) {
   431  
   432  			pvClient := c.CoreV1().PersistentVolumes()
   433  			pvcClient := c.CoreV1().PersistentVolumeClaims(ns)
   434  
   435  			ginkgo.By("Creating initial PV and PVC")
   436  
   437  			// Configure csiDriver
   438  			defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy
   439  			csiDriverLabel := map[string]string{"e2e-test": f.UniqueName}
   440  			csiDriver := &storagev1.CSIDriver{
   441  				ObjectMeta: metav1.ObjectMeta{
   442  					Name:   "inline-driver-" + string(uuid.NewUUID()),
   443  					Labels: csiDriverLabel,
   444  				},
   445  
   446  				Spec: storagev1.CSIDriverSpec{
   447  					VolumeLifecycleModes: []storagev1.VolumeLifecycleMode{
   448  						storagev1.VolumeLifecyclePersistent,
   449  					},
   450  					FSGroupPolicy: &defaultFSGroupPolicy,
   451  				},
   452  			}
   453  
   454  			pvNamePrefix := ns + "-"
   455  			pvHostPathConfig := e2epv.PersistentVolumeConfig{
   456  				NamePrefix:       pvNamePrefix,
   457  				Labels:           volLabel,
   458  				StorageClassName: ns,
   459  				PVSource: v1.PersistentVolumeSource{
   460  					CSI: &v1.CSIPersistentVolumeSource{
   461  						Driver:       csiDriver.Name,
   462  						VolumeHandle: "e2e-conformance",
   463  					},
   464  				},
   465  			}
   466  			pvcConfig := e2epv.PersistentVolumeClaimConfig{
   467  				StorageClassName: &ns,
   468  			}
   469  
   470  			numPVs, numPVCs := 1, 1
   471  			pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvHostPathConfig, pvcConfig)
   472  			framework.ExpectNoError(err, "Failed to create the requested storage resources")
   473  
   474  			ginkgo.By(fmt.Sprintf("Listing all PVs with the labelSelector: %q", volLabel.AsSelector().String()))
   475  			pvList, err := pvClient.List(ctx, metav1.ListOptions{LabelSelector: volLabel.AsSelector().String()})
   476  			framework.ExpectNoError(err, "Failed to list PVs with the labelSelector: %q", volLabel.AsSelector().String())
   477  			gomega.Expect(pvList.Items).To(gomega.HaveLen(1))
   478  			initialPV := pvList.Items[0]
   479  
   480  			ginkgo.By(fmt.Sprintf("Listing PVCs in namespace %q", ns))
   481  			pvcList, err := pvcClient.List(ctx, metav1.ListOptions{})
   482  			framework.ExpectNoError(err, "Failed to list PVCs with the labelSelector: %q", volLabel.AsSelector().String())
   483  			gomega.Expect(pvcList.Items).To(gomega.HaveLen(1))
   484  			initialPVC := pvcList.Items[0]
   485  
   486  			ginkgo.By(fmt.Sprintf("Patching the PV %q", initialPV.Name))
   487  			payload := "{\"metadata\":{\"labels\":{\"" + initialPV.Name + "\":\"patched\"}}}"
   488  			patchedPV, err := pvClient.Patch(ctx, initialPV.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
   489  			framework.ExpectNoError(err, "Failed to patch PV %q", initialPV.Name)
   490  			gomega.Expect(patchedPV.Labels).To(gomega.HaveKeyWithValue(patchedPV.Name, "patched"), "Checking that patched label has been applied")
   491  
   492  			ginkgo.By(fmt.Sprintf("Patching the PVC %q", initialPVC.Name))
   493  			payload = "{\"metadata\":{\"labels\":{\"" + initialPVC.Name + "\":\"patched\"}}}"
   494  			patchedPVC, err := pvcClient.Patch(ctx, initialPVC.Name, types.StrategicMergePatchType, []byte(payload), metav1.PatchOptions{})
   495  			framework.ExpectNoError(err, "Failed to patch PVC %q", initialPVC.Name)
   496  			gomega.Expect(patchedPVC.Labels).To(gomega.HaveKeyWithValue(patchedPVC.Name, "patched"), "Checking that patched label has been applied")
   497  
   498  			ginkgo.By(fmt.Sprintf("Getting PV %q", patchedPV.Name))
   499  			retrievedPV, err := pvClient.Get(ctx, patchedPV.Name, metav1.GetOptions{})
   500  			framework.ExpectNoError(err, "Failed to get PV %q", patchedPV.Name)
   501  			gomega.Expect(retrievedPV.UID).To(gomega.Equal(patchedPV.UID))
   502  
   503  			ginkgo.By(fmt.Sprintf("Getting PVC %q", patchedPVC.Name))
   504  			retrievedPVC, err := pvcClient.Get(ctx, patchedPVC.Name, metav1.GetOptions{})
   505  			framework.ExpectNoError(err, "Failed to get PVC %q", patchedPVC.Name)
   506  			gomega.Expect(retrievedPVC.UID).To(gomega.Equal(patchedPVC.UID))
   507  
   508  			ginkgo.By(fmt.Sprintf("Deleting PVC %q", retrievedPVC.Name))
   509  			err = pvcClient.Delete(ctx, retrievedPVC.Name, metav1.DeleteOptions{})
   510  			framework.ExpectNoError(err, "Failed to delete PVC %q", retrievedPVC.Name)
   511  
   512  			ginkgo.By(fmt.Sprintf("Confirm deletion of PVC %q", retrievedPVC.Name))
   513  
   514  			type state struct {
   515  				PersistentVolumes      []v1.PersistentVolume
   516  				PersistentVolumeClaims []v1.PersistentVolumeClaim
   517  			}
   518  
   519  			err = framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
   520  				pvcList, err := pvcClient.List(ctx, metav1.ListOptions{})
   521  				if err != nil {
   522  					return nil, fmt.Errorf("failed to list pvc: %w", err)
   523  				}
   524  				return &state{
   525  					PersistentVolumeClaims: pvcList.Items,
   526  				}, nil
   527  			})).WithTimeout(30 * time.Second).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
   528  				if len(s.PersistentVolumeClaims) == 0 {
   529  					return nil, nil
   530  				}
   531  				return func() string {
   532  					return fmt.Sprintf("Expected pvc to be deleted, found %q", s.PersistentVolumeClaims[0].Name)
   533  				}, nil
   534  			}))
   535  			framework.ExpectNoError(err, "Timeout while waiting to confirm PVC %q deletion", retrievedPVC.Name)
   536  
   537  			ginkgo.By(fmt.Sprintf("Deleting PV %q", retrievedPV.Name))
   538  			err = pvClient.Delete(ctx, retrievedPV.Name, metav1.DeleteOptions{})
   539  			framework.ExpectNoError(err, "Failed to delete PV %q", retrievedPV.Name)
   540  
   541  			ginkgo.By(fmt.Sprintf("Confirm deletion of PV %q", retrievedPV.Name))
   542  			err = framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
   543  				pvList, err := pvClient.List(ctx, metav1.ListOptions{LabelSelector: volLabel.AsSelector().String()})
   544  				if err != nil {
   545  					return nil, fmt.Errorf("failed to list pv: %w", err)
   546  				}
   547  				return &state{
   548  					PersistentVolumes: pvList.Items,
   549  				}, nil
   550  			})).WithTimeout(30 * time.Second).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
   551  				if len(s.PersistentVolumes) == 0 {
   552  					return nil, nil
   553  				}
   554  				return func() string {
   555  					return fmt.Sprintf("Expected pv to be deleted, found %q", s.PersistentVolumes[0].Name)
   556  				}, nil
   557  			}))
   558  			framework.ExpectNoError(err, "Timeout while waiting to confirm PV %q deletion", retrievedPV.Name)
   559  
   560  			ginkgo.By("Recreating another PV & PVC")
   561  			pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvHostPathConfig, pvcConfig)
   562  			framework.ExpectNoError(err, "Failed to create the requested storage resources")
   563  
   564  			var pvName string
   565  			for key := range pvols {
   566  				pvName = key
   567  			}
   568  
   569  			var pvcName string
   570  			for key := range claims {
   571  				pvcName = key.Name
   572  			}
   573  
   574  			ginkgo.By(fmt.Sprintf("Updating the PV %q", pvName))
   575  			var updatedPV *v1.PersistentVolume
   576  			pvSelector := labels.Set{pvName: "updated"}.AsSelector().String()
   577  
   578  			err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   579  				pv, err := pvClient.Get(ctx, pvName, metav1.GetOptions{})
   580  				framework.ExpectNoError(err, "Unable to get PV %q", pvName)
   581  				pv.Labels[pvName] = "updated"
   582  				updatedPV, err = pvClient.Update(ctx, pv, metav1.UpdateOptions{})
   583  
   584  				return err
   585  			})
   586  			framework.ExpectNoError(err, "failed to update PV %q", pvName)
   587  			gomega.Expect(updatedPV.Labels).To(gomega.HaveKeyWithValue(updatedPV.Name, "updated"), "Checking that updated label has been applied")
   588  
   589  			ginkgo.By(fmt.Sprintf("Updating the PVC %q", pvcName))
   590  			var updatedPVC *v1.PersistentVolumeClaim
   591  			pvcSelector := labels.Set{pvcName: "updated"}.AsSelector().String()
   592  
   593  			err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   594  				pvc, err := pvcClient.Get(ctx, pvcName, metav1.GetOptions{})
   595  				framework.ExpectNoError(err, "Unable to get PVC %q", pvcName)
   596  				pvc.Labels = map[string]string{
   597  					pvcName: "updated",
   598  				}
   599  				updatedPVC, err = pvcClient.Update(ctx, pvc, metav1.UpdateOptions{})
   600  
   601  				return err
   602  			})
   603  			framework.ExpectNoError(err, "failed to update PVC %q", pvcName)
   604  			gomega.Expect(updatedPVC.Labels).To(gomega.HaveKeyWithValue(updatedPVC.Name, "updated"), "Checking that updated label has been applied")
   605  
   606  			ginkgo.By(fmt.Sprintf("Listing PVCs in all namespaces with the labelSelector: %q", pvcSelector))
   607  			pvcList, err = c.CoreV1().PersistentVolumeClaims("").List(ctx, metav1.ListOptions{LabelSelector: pvcSelector})
   608  			framework.ExpectNoError(err, "Failed to list PVCs in all namespaces with the labelSelector: %q", pvcSelector)
   609  			gomega.Expect(pvcList.Items).To(gomega.HaveLen(1))
   610  
   611  			ginkgo.By(fmt.Sprintf("Deleting PVC %q via DeleteCollection", pvcName))
   612  			err = pvcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: pvcSelector})
   613  			framework.ExpectNoError(err, "Failed to delete PVC %q", retrievedPVC.Name)
   614  
   615  			ginkgo.By(fmt.Sprintf("Confirm deletion of PVC %q", pvcName))
   616  			err = framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
   617  				pvcList, err := pvcClient.List(ctx, metav1.ListOptions{LabelSelector: pvcSelector})
   618  				if err != nil {
   619  					return nil, fmt.Errorf("failed to list pvc: %w", err)
   620  				}
   621  				return &state{
   622  					PersistentVolumeClaims: pvcList.Items,
   623  				}, nil
   624  			})).WithTimeout(30 * time.Second).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
   625  				if len(s.PersistentVolumeClaims) == 0 {
   626  					return nil, nil
   627  				}
   628  				return func() string {
   629  					return fmt.Sprintf("Expected pvc to be deleted, found %q", s.PersistentVolumeClaims[0].Name)
   630  				}, nil
   631  			}))
   632  			framework.ExpectNoError(err, "Timeout while waiting to confirm PVC %q deletion", pvcName)
   633  
   634  			ginkgo.By(fmt.Sprintf("Deleting PV %q via DeleteCollection", pvName))
   635  			err = pvClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: pvSelector})
   636  			framework.ExpectNoError(err, "Failed to delete PV %q", retrievedPVC.Name)
   637  
   638  			ginkgo.By(fmt.Sprintf("Confirm deletion of PV %q", pvName))
   639  			err = framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
   640  				pvList, err := pvClient.List(ctx, metav1.ListOptions{LabelSelector: pvSelector})
   641  				if err != nil {
   642  					return nil, fmt.Errorf("failed to list pv: %w", err)
   643  				}
   644  				return &state{
   645  					PersistentVolumes: pvList.Items,
   646  				}, nil
   647  			})).WithTimeout(30 * time.Second).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
   648  				if len(s.PersistentVolumes) == 0 {
   649  					return nil, nil
   650  				}
   651  				return func() string {
   652  					return fmt.Sprintf("Expected pv to be deleted, found %q", s.PersistentVolumes[0].Name)
   653  				}, nil
   654  			}))
   655  			framework.ExpectNoError(err, "Timeout while waiting to confirm PV %q deletion", retrievedPV.Name)
   656  		})
   657  
   658  		/*
   659  			Release: v1.29
   660  			Testname: PersistentVolumes(Claims), apply changes to a pv/pvc status
   661  			Description: Creating PV and PVC MUST succeed. Listing PVs with a labelSelector
   662  			 MUST succeed. Listing PVCs in a namespace MUST succeed. Reading PVC status MUST
   663  			 succeed with a valid phase found. Reading PV status MUST succeed with a valid
   664  			 phase found. Patching the PVC status MUST succeed with its new condition found.
   665  			 Patching the PV status MUST succeed with the new reason/message found. Updating
   666  			 the PVC status MUST succeed with its new condition found. Updating the PV status
   667  			 MUST succeed with the new reason/message found.
   668  		*/
   669  		framework.ConformanceIt("should apply changes to a pv/pvc status", func(ctx context.Context) {
   670  
   671  			pvClient := c.CoreV1().PersistentVolumes()
   672  			pvcClient := c.CoreV1().PersistentVolumeClaims(ns)
   673  
   674  			ginkgo.By("Creating initial PV and PVC")
   675  
   676  			pvHostPathConfig := e2epv.PersistentVolumeConfig{
   677  				NamePrefix:       ns + "-",
   678  				Labels:           volLabel,
   679  				StorageClassName: ns,
   680  				PVSource: v1.PersistentVolumeSource{
   681  					CSI: &v1.CSIPersistentVolumeSource{
   682  						Driver:       "e2e-driver-" + string(uuid.NewUUID()),
   683  						VolumeHandle: "e2e-status-conformance",
   684  					},
   685  				},
   686  			}
   687  
   688  			pvcConfig := e2epv.PersistentVolumeClaimConfig{
   689  				StorageClassName: &ns,
   690  			}
   691  
   692  			numPVs, numPVCs := 1, 1
   693  			pvols, claims, err = e2epv.CreatePVsPVCs(ctx, numPVs, numPVCs, c, f.Timeouts, ns, pvHostPathConfig, pvcConfig)
   694  			framework.ExpectNoError(err, "Failed to create the requested storage resources")
   695  
   696  			ginkgo.By(fmt.Sprintf("Listing all PVs with the labelSelector: %q", volLabel.AsSelector().String()))
   697  			pvList, err := pvClient.List(ctx, metav1.ListOptions{LabelSelector: volLabel.AsSelector().String()})
   698  			framework.ExpectNoError(err, "Failed to list PVs with the labelSelector: %q", volLabel.AsSelector().String())
   699  			gomega.Expect(pvList.Items).To(gomega.HaveLen(1))
   700  			initialPV := pvList.Items[0]
   701  
   702  			ginkgo.By(fmt.Sprintf("Listing PVCs in namespace %q", ns))
   703  			pvcList, err := pvcClient.List(ctx, metav1.ListOptions{})
   704  			framework.ExpectNoError(err, "Failed to list PVCs with the labelSelector: %q", volLabel.AsSelector().String())
   705  			gomega.Expect(pvcList.Items).To(gomega.HaveLen(1))
   706  			initialPVC := pvcList.Items[0]
   707  
   708  			ginkgo.By(fmt.Sprintf("Reading %q Status", initialPVC.Name))
   709  			pvcResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}
   710  			pvcUnstructured, err := f.DynamicClient.Resource(pvcResource).Namespace(ns).Get(ctx, initialPVC.Name, metav1.GetOptions{}, "status")
   711  			framework.ExpectNoError(err, "Failed to fetch the status of PVC %s in namespace %s", initialPVC.Name, ns)
   712  			retrievedPVC := &v1.PersistentVolumeClaim{}
   713  			err = runtime.DefaultUnstructuredConverter.FromUnstructured(pvcUnstructured.UnstructuredContent(), &retrievedPVC)
   714  			framework.ExpectNoError(err, "Failed to retrieve %q status.", initialPV.Name)
   715  			gomega.Expect(string(retrievedPVC.Status.Phase)).To(gomega.Or(gomega.Equal("Pending"), gomega.Equal("Bound")), "Checking that the PVC status has been read")
   716  
   717  			ginkgo.By(fmt.Sprintf("Reading %q Status", initialPV.Name))
   718  			pvResource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumes"}
   719  			pvUnstructured, err := f.DynamicClient.Resource(pvResource).Get(ctx, initialPV.Name, metav1.GetOptions{}, "status")
   720  			framework.ExpectNoError(err, "Failed to fetch the status of PV %s in namespace %s", initialPV.Name, ns)
   721  			retrievedPV := &v1.PersistentVolume{}
   722  			err = runtime.DefaultUnstructuredConverter.FromUnstructured(pvUnstructured.UnstructuredContent(), &retrievedPV)
   723  			framework.ExpectNoError(err, "Failed to retrieve %q status.", initialPV.Name)
   724  			gomega.Expect(string(retrievedPV.Status.Phase)).To(gomega.Or(gomega.Equal("Available"), gomega.Equal("Bound"), gomega.Equal("Pending")), "Checking that the PV status has been read")
   725  
   726  			ginkgo.By(fmt.Sprintf("Patching %q Status", initialPVC.Name))
   727  			payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True", "reason":"E2E patchedStatus", "message":"Set from e2e test"}]}}`)
   728  
   729  			patchedPVC, err := pvcClient.Patch(ctx, initialPVC.Name, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
   730  			framework.ExpectNoError(err, "Failed to patch status.")
   731  
   732  			gomega.Expect(patchedPVC.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
   733  				"StatusPatched": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
   734  					"Message": gomega.ContainSubstring("Set from e2e test"),
   735  					"Reason":  gomega.ContainSubstring("E2E patchedStatus"),
   736  				}),
   737  			}), "Checking that patched status has been applied")
   738  
   739  			ginkgo.By(fmt.Sprintf("Patching %q Status", retrievedPV.Name))
   740  			payload = []byte(`{"status":{"message": "StatusPatched", "reason": "E2E patchStatus"}}`)
   741  
   742  			patchedPV, err := pvClient.Patch(ctx, retrievedPV.Name, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
   743  			framework.ExpectNoError(err, "Failed to patch %q status.", retrievedPV.Name)
   744  			gomega.Expect(patchedPV.Status.Reason).To(gomega.Equal("E2E patchStatus"), "Checking that patched status has been applied")
   745  			gomega.Expect(patchedPV.Status.Message).To(gomega.Equal("StatusPatched"), "Checking that patched status has been applied")
   746  
   747  			ginkgo.By(fmt.Sprintf("Updating %q Status", patchedPVC.Name))
   748  			var statusToUpdate, updatedPVC *v1.PersistentVolumeClaim
   749  
   750  			err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   751  				statusToUpdate, err = pvcClient.Get(ctx, patchedPVC.Name, metav1.GetOptions{})
   752  				framework.ExpectNoError(err, "Unable to retrieve pvc %s", patchedPVC.Name)
   753  
   754  				statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, v1.PersistentVolumeClaimCondition{
   755  					Type:    "StatusUpdated",
   756  					Status:  "True",
   757  					Reason:  "E2E updateStatus",
   758  					Message: "Set from e2e test",
   759  				})
   760  
   761  				updatedPVC, err = pvcClient.UpdateStatus(ctx, statusToUpdate, metav1.UpdateOptions{})
   762  				return err
   763  			})
   764  			framework.ExpectNoError(err, "Failed to update status.")
   765  			gomega.Expect(updatedPVC.Status.Conditions).To(gstruct.MatchElements(conditionType, gstruct.IgnoreExtras, gstruct.Elements{
   766  				"StatusUpdated": gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{
   767  					"Message": gomega.ContainSubstring("Set from e2e test"),
   768  					"Reason":  gomega.ContainSubstring("E2E updateStatus"),
   769  				}),
   770  			}), "Checking that updated status has been applied")
   771  
   772  			ginkgo.By(fmt.Sprintf("Updating %q Status", patchedPV.Name))
   773  			var pvToUpdate, updatedPV *v1.PersistentVolume
   774  
   775  			err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
   776  				pvToUpdate, err = pvClient.Get(ctx, patchedPV.Name, metav1.GetOptions{})
   777  				framework.ExpectNoError(err, "Unable to retrieve pv %s", patchedPV.Name)
   778  
   779  				pvToUpdate.Status.Reason = "E2E updateStatus"
   780  				pvToUpdate.Status.Message = "StatusUpdated"
   781  				updatedPV, err = pvClient.UpdateStatus(ctx, pvToUpdate, metav1.UpdateOptions{})
   782  				return err
   783  			})
   784  			framework.ExpectNoError(err, "Failed to update status.")
   785  			gomega.Expect(updatedPV.Status.Reason).To(gomega.Equal("E2E updateStatus"), "Checking that updated status has been applied")
   786  			gomega.Expect(updatedPV.Status.Message).To(gomega.Equal("StatusUpdated"), "Checking that updated status has been applied")
   787  		})
   788  	})
   789  
   790  	// testsuites/multivolume tests can now run with windows nodes
   791  	// This test is not compatible with windows because the default StorageClass
   792  	// doesn't have the ntfs parameter, we can't change the status of the cluster
   793  	// to add a StorageClass that's compatible with windows which is also the
   794  	// default StorageClass
   795  	ginkgo.Describe("Default StorageClass [LinuxOnly]", func() {
   796  		ginkgo.Context("pods that use multiple volumes", func() {
   797  
   798  			ginkgo.AfterEach(func(ctx context.Context) {
   799  				e2estatefulset.DeleteAllStatefulSets(ctx, c, ns)
   800  			})
   801  
   802  			f.It("should be reschedulable", f.WithSlow(), func(ctx context.Context) {
   803  				// Only run on providers with default storageclass
   804  				e2epv.SkipIfNoDefaultStorageClass(ctx, c)
   805  
   806  				numVols := 4
   807  
   808  				ginkgo.By("Creating a StatefulSet pod to initialize data")
   809  				writeCmd := "true"
   810  				for i := 0; i < numVols; i++ {
   811  					writeCmd += fmt.Sprintf("&& touch %v", getVolumeFile(i))
   812  				}
   813  				writeCmd += "&& sleep 10000"
   814  
   815  				probe := &v1.Probe{
   816  					ProbeHandler: v1.ProbeHandler{
   817  						Exec: &v1.ExecAction{
   818  							// Check that the last file got created
   819  							Command: []string{"test", "-f", getVolumeFile(numVols - 1)},
   820  						},
   821  					},
   822  					InitialDelaySeconds: 1,
   823  					PeriodSeconds:       1,
   824  				}
   825  
   826  				mounts := []v1.VolumeMount{}
   827  				claims := []v1.PersistentVolumeClaim{}
   828  
   829  				for i := 0; i < numVols; i++ {
   830  					pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{}, ns)
   831  					pvc.Name = getVolName(i)
   832  					mounts = append(mounts, v1.VolumeMount{Name: pvc.Name, MountPath: getMountPath(i)})
   833  					claims = append(claims, *pvc)
   834  				}
   835  
   836  				spec := makeStatefulSetWithPVCs(ns, writeCmd, mounts, claims, probe)
   837  				ss, err := c.AppsV1().StatefulSets(ns).Create(ctx, spec, metav1.CreateOptions{})
   838  				framework.ExpectNoError(err)
   839  				e2estatefulset.WaitForRunningAndReady(ctx, c, 1, ss)
   840  
   841  				ginkgo.By("Deleting the StatefulSet but not the volumes")
   842  				// Scale down to 0 first so that the Delete is quick
   843  				ss, err = e2estatefulset.Scale(ctx, c, ss, 0)
   844  				framework.ExpectNoError(err)
   845  				e2estatefulset.WaitForStatusReplicas(ctx, c, ss, 0)
   846  				err = c.AppsV1().StatefulSets(ns).Delete(ctx, ss.Name, metav1.DeleteOptions{})
   847  				framework.ExpectNoError(err)
   848  
   849  				ginkgo.By("Creating a new Statefulset and validating the data")
   850  				validateCmd := "true"
   851  				for i := 0; i < numVols; i++ {
   852  					validateCmd += fmt.Sprintf("&& test -f %v", getVolumeFile(i))
   853  				}
   854  				validateCmd += "&& sleep 10000"
   855  
   856  				spec = makeStatefulSetWithPVCs(ns, validateCmd, mounts, claims, probe)
   857  				ss, err = c.AppsV1().StatefulSets(ns).Create(ctx, spec, metav1.CreateOptions{})
   858  				framework.ExpectNoError(err)
   859  				e2estatefulset.WaitForRunningAndReady(ctx, c, 1, ss)
   860  			})
   861  		})
   862  	})
   863  })
   864  
   865  func getVolName(i int) string {
   866  	return fmt.Sprintf("vol%v", i)
   867  }
   868  
   869  func getMountPath(i int) string {
   870  	return fmt.Sprintf("/mnt/%v", getVolName(i))
   871  }
   872  
   873  func getVolumeFile(i int) string {
   874  	return fmt.Sprintf("%v/data%v", getMountPath(i), i)
   875  }
   876  
   877  func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v1.PersistentVolumeClaim, readyProbe *v1.Probe) *appsv1.StatefulSet {
   878  	ssReplicas := int32(1)
   879  
   880  	labels := map[string]string{"app": "many-volumes-test"}
   881  	return &appsv1.StatefulSet{
   882  		ObjectMeta: metav1.ObjectMeta{
   883  			Name:      "many-volumes-test",
   884  			Namespace: ns,
   885  		},
   886  		Spec: appsv1.StatefulSetSpec{
   887  			Selector: &metav1.LabelSelector{
   888  				MatchLabels: map[string]string{"app": "many-volumes-test"},
   889  			},
   890  			Replicas: &ssReplicas,
   891  			Template: v1.PodTemplateSpec{
   892  				ObjectMeta: metav1.ObjectMeta{
   893  					Labels: labels,
   894  				},
   895  				Spec: v1.PodSpec{
   896  					Containers: []v1.Container{
   897  						{
   898  							Name:           "nginx",
   899  							Image:          e2epod.GetTestImage(imageutils.Nginx),
   900  							Command:        e2epod.GenerateScriptCmd(cmd),
   901  							VolumeMounts:   mounts,
   902  							ReadinessProbe: readyProbe,
   903  						},
   904  					},
   905  				},
   906  			},
   907  			VolumeClaimTemplates: claims,
   908  		},
   909  	}
   910  }
   911  
   912  // createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
   913  // Note: need named return value so that the err assignment in the defer sets the returned error.
   914  //
   915  //	Has been shown to be necessary using Go 1.7.
   916  func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
   917  	framework.Logf("Creating nfs test pod")
   918  	pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelPrivileged, command)
   919  	runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
   920  	if err != nil {
   921  		return fmt.Errorf("pod Create API error: %w", err)
   922  	}
   923  	defer func() {
   924  		delErr := e2epod.DeletePodWithWait(ctx, c, runPod)
   925  		if err == nil { // don't override previous err value
   926  			err = delErr // assign to returned err, can be nil
   927  		}
   928  	}()
   929  
   930  	err = testPodSuccessOrFail(ctx, c, t, ns, runPod)
   931  	if err != nil {
   932  		return fmt.Errorf("pod %q did not exit with Success: %w", runPod.Name, err)
   933  	}
   934  	return // note: named return value
   935  }
   936  
   937  // testPodSuccessOrFail tests whether the pod's exit code is zero.
   938  func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
   939  	framework.Logf("Pod should terminate with exitcode 0 (success)")
   940  	if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil {
   941  		return fmt.Errorf("pod %q failed to reach Success: %w", pod.Name, err)
   942  	}
   943  	framework.Logf("Pod %v succeeded ", pod.Name)
   944  	return nil
   945  }
   946  
   947  func conditionType(condition interface{}) string {
   948  	return string(condition.(v1.PersistentVolumeClaimCondition).Type)
   949  }
   950  

View as plain text