1
16
17 package storage
18
19 import (
20 "context"
21 "fmt"
22 "path/filepath"
23 "time"
24
25 v1 "k8s.io/api/core/v1"
26 storagev1 "k8s.io/api/storage/v1"
27 "k8s.io/apimachinery/pkg/api/resource"
28 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29 clientset "k8s.io/client-go/kubernetes"
30
31 "github.com/onsi/ginkgo/v2"
32 "github.com/onsi/gomega"
33 "k8s.io/apimachinery/pkg/util/rand"
34 "k8s.io/apimachinery/pkg/util/wait"
35 "k8s.io/kubernetes/test/e2e/framework"
36 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
37 "k8s.io/kubernetes/test/e2e/storage/testsuites"
38 "k8s.io/kubernetes/test/e2e/storage/utils"
39 admissionapi "k8s.io/pod-security-admission/api"
40 )
41
42 const (
43 csiResizeWaitPeriod = 5 * time.Minute
44 )
45
46 var _ = utils.SIGDescribe("PersistentVolumes-expansion", func() {
47 f := framework.NewDefaultFramework("persistent-local-volumes-expansion")
48 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
49 ginkgo.Context("loopback local block volume", func() {
50 var (
51 config *localTestConfig
52 scName string
53 )
54
55 testVolType := BlockFsWithFormatLocalVolumeType
56 var testVol *localTestVolume
57 testMode := immediateMode
58 ginkgo.BeforeEach(func(ctx context.Context) {
59 nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, f.ClientSet, maxNodes)
60 framework.ExpectNoError(err)
61
62 scName = fmt.Sprintf("%v-%v", testSCPrefix, f.Namespace.Name)
63
64 randomNode := &nodes.Items[rand.Intn(len(nodes.Items))]
65
66 hostExec := utils.NewHostExec(f)
67 ltrMgr := utils.NewLocalResourceManager("local-volume-test", hostExec, hostBase)
68 config = &localTestConfig{
69 ns: f.Namespace.Name,
70 client: f.ClientSet,
71 timeouts: f.Timeouts,
72 nodes: nodes.Items,
73 randomNode: randomNode,
74 scName: scName,
75 discoveryDir: filepath.Join(hostBase, f.Namespace.Name),
76 hostExec: hostExec,
77 ltrMgr: ltrMgr,
78 }
79
80 setupExpandableLocalStorageClass(ctx, config, &testMode)
81 testVols := setupLocalVolumesPVCsPVs(ctx, config, testVolType, config.randomNode, 1, testMode)
82 testVol = testVols[0]
83 })
84 ginkgo.AfterEach(func(ctx context.Context) {
85 cleanupLocalVolumes(ctx, config, []*localTestVolume{testVol})
86 cleanupStorageClass(ctx, config)
87 })
88
89 ginkgo.It("should support online expansion on node", func(ctx context.Context) {
90 var (
91 pod1 *v1.Pod
92 pod1Err error
93 )
94 ginkgo.By("Creating pod1")
95 pod1, pod1Err = createLocalPod(ctx, config, testVol, nil)
96 framework.ExpectNoError(pod1Err)
97 verifyLocalPod(ctx, config, testVol, pod1, config.randomNode.Name)
98
99
100 ginkgo.By("Expanding current pvc")
101 currentPvcSize := testVol.pvc.Spec.Resources.Requests[v1.ResourceStorage]
102 newSize := currentPvcSize.DeepCopy()
103 newSize.Add(resource.MustParse("10Mi"))
104 framework.Logf("currentPvcSize %s, newSize %s", currentPvcSize.String(), newSize.String())
105 newPVC, err := testsuites.ExpandPVCSize(ctx, testVol.pvc, newSize, f.ClientSet)
106 framework.ExpectNoError(err, "While updating pvc for more size")
107 testVol.pvc = newPVC
108 gomega.Expect(testVol.pvc).NotTo(gomega.BeNil())
109
110 pvcSize := testVol.pvc.Spec.Resources.Requests[v1.ResourceStorage]
111 if pvcSize.Cmp(newSize) != 0 {
112 framework.Failf("error updating pvc size %q", testVol.pvc.Name)
113 }
114
115
116 err = config.ltrMgr.ExpandBlockDevice(ctx, testVol.ltr, 10 )
117 framework.ExpectNoError(err, "while expanding loopback device")
118
119
120 pv, err := UpdatePVSize(ctx, testVol.pv, newSize, f.ClientSet)
121 framework.ExpectNoError(err, "while updating pv to more size")
122 gomega.Expect(pv).NotTo(gomega.BeNil())
123 testVol.pv = pv
124
125 ginkgo.By("Waiting for file system resize to finish")
126 testVol.pvc, err = testsuites.WaitForFSResize(ctx, testVol.pvc, f.ClientSet)
127 framework.ExpectNoError(err, "while waiting for fs resize to finish")
128
129 pvcConditions := testVol.pvc.Status.Conditions
130 gomega.Expect(pvcConditions).To(gomega.BeEmpty(), "pvc should not have conditions")
131 })
132
133 })
134
135 })
136
137 func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Quantity, c clientset.Interface) (*v1.PersistentVolume, error) {
138 pvName := pv.Name
139 pvToUpdate := pv.DeepCopy()
140
141 var lastError error
142 waitErr := wait.PollUntilContextTimeout(ctx, 5*time.Second, csiResizeWaitPeriod, true, func(ctx context.Context) (bool, error) {
143 var err error
144 pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
145 if err != nil {
146 return false, fmt.Errorf("error fetching pv %s: %w", pvName, err)
147 }
148 pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size
149 pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{})
150 if err != nil {
151 framework.Logf("error updating PV %s: %v", pvName, err)
152 lastError = err
153 return false, nil
154 }
155 return true, nil
156 })
157 if wait.Interrupted(waitErr) {
158 return nil, fmt.Errorf("timed out attempting to update PV size. last update error: %v", lastError)
159 }
160 if waitErr != nil {
161 return nil, fmt.Errorf("failed to expand PV size: %v", waitErr)
162 }
163 return pvToUpdate, nil
164 }
165
166 func setupExpandableLocalStorageClass(ctx context.Context, config *localTestConfig, mode *storagev1.VolumeBindingMode) {
167 enableExpansion := true
168 sc := &storagev1.StorageClass{
169 ObjectMeta: metav1.ObjectMeta{
170 Name: config.scName,
171 },
172 Provisioner: "kubernetes.io/no-provisioner",
173 VolumeBindingMode: mode,
174 AllowVolumeExpansion: &enableExpansion,
175 }
176
177 _, err := config.client.StorageV1().StorageClasses().Create(ctx, sc, metav1.CreateOptions{})
178 framework.ExpectNoError(err)
179 }
180
View as plain text