1
16
17 package storage
18
19 import (
20 "context"
21 "fmt"
22 "time"
23
24 v1 "k8s.io/api/core/v1"
25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26 "k8s.io/apimachinery/pkg/util/version"
27 "k8s.io/kubernetes/test/e2e/framework"
28 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
29 e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
30 e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
31 storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
32 "k8s.io/kubernetes/test/e2e/upgrades"
33
34 "github.com/onsi/ginkgo/v2"
35 )
36
37 const devicePath = "/mnt/volume1"
38
39
40
41
42 type VolumeModeDowngradeTest struct {
43 pv *v1.PersistentVolume
44 pvc *v1.PersistentVolumeClaim
45 pod *v1.Pod
46 }
47
48
49 func (VolumeModeDowngradeTest) Name() string {
50 return "[sig-storage] volume-mode-downgrade"
51 }
52
53
54 func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
55 if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
56 return true
57 }
58
59
60 blockVersion := version.MustParseSemantic("1.13.0-alpha.0")
61 if upgCtx.Versions[0].Version.LessThan(blockVersion) {
62 return true
63 }
64 if !upgCtx.Versions[1].Version.LessThan(blockVersion) {
65 return true
66 }
67
68 return false
69 }
70
71
72 func (t *VolumeModeDowngradeTest) Setup(ctx context.Context, f *framework.Framework) {
73
74 var err error
75
76 cs := f.ClientSet
77 ns := f.Namespace.Name
78
79 ginkgo.By("Creating a PVC")
80 block := v1.PersistentVolumeBlock
81 pvcConfig := e2epv.PersistentVolumeClaimConfig{
82 StorageClassName: nil,
83 VolumeMode: &block,
84 }
85 t.pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
86 t.pvc, err = e2epv.CreatePVC(ctx, cs, ns, t.pvc)
87 framework.ExpectNoError(err)
88
89 err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, cs, ns, t.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
90 framework.ExpectNoError(err)
91
92 t.pvc, err = cs.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Get(ctx, t.pvc.Name, metav1.GetOptions{})
93 framework.ExpectNoError(err)
94
95 t.pv, err = cs.CoreV1().PersistentVolumes().Get(ctx, t.pvc.Spec.VolumeName, metav1.GetOptions{})
96 framework.ExpectNoError(err)
97
98 ginkgo.By("Consuming the PVC before downgrade")
99 podConfig := e2epod.Config{
100 NS: ns,
101 PVCs: []*v1.PersistentVolumeClaim{t.pvc},
102 SeLinuxLabel: e2epv.SELinuxLabel,
103 }
104 t.pod, err = e2epod.CreateSecPod(ctx, cs, &podConfig, framework.PodStartTimeout)
105 framework.ExpectNoError(err)
106
107 ginkgo.By("Checking if PV exists as expected volume mode")
108 e2evolume.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
109
110 ginkgo.By("Checking if read/write to PV works properly")
111 storageutils.CheckReadWriteToPath(f, t.pod, block, devicePath)
112 }
113
114
115
116 func (t *VolumeModeDowngradeTest) Test(ctx context.Context, f *framework.Framework, done <-chan struct{}, upgrade upgrades.UpgradeType) {
117 ginkgo.By("Waiting for downgrade to finish")
118 <-done
119
120 ginkgo.By("Verifying that nothing exists at the device path in the pod")
121 e2evolume.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
122 }
123
124
125 func (t *VolumeModeDowngradeTest) Teardown(ctx context.Context, f *framework.Framework) {
126 ginkgo.By("Deleting the pod")
127 framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, f.ClientSet, t.pod))
128
129 ginkgo.By("Deleting the PVC")
130 framework.ExpectNoError(f.ClientSet.CoreV1().PersistentVolumeClaims(t.pvc.Namespace).Delete(ctx, t.pvc.Name, metav1.DeleteOptions{}))
131
132 ginkgo.By("Waiting for the PV to be deleted")
133 framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, f.ClientSet, t.pv.Name, 5*time.Second, 20*time.Minute))
134 }
135
View as plain text