1
16
17 package storage
18
19 import (
20 "context"
21 "fmt"
22 "strconv"
23
24 v1 "k8s.io/api/core/v1"
25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
26 "k8s.io/apimachinery/pkg/util/intstr"
27 "k8s.io/apimachinery/pkg/util/uuid"
28 "k8s.io/kubernetes/test/e2e/framework"
29 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
30 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
31 e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
32 "k8s.io/kubernetes/test/e2e/storage/utils"
33 imageutils "k8s.io/kubernetes/test/utils/image"
34 admissionapi "k8s.io/pod-security-admission/api"
35
36 "github.com/onsi/ginkgo/v2"
37 )
38
39 const (
40
41
42
43
44
45
46
47
48
49 wrappedVolumeRaceConfigMapVolumeCount = 50
50 wrappedVolumeRaceConfigMapPodCount = 5
51 wrappedVolumeRaceConfigMapIterationCount = 3
52 wrappedVolumeRaceGitRepoVolumeCount = 50
53 wrappedVolumeRaceGitRepoPodCount = 5
54 wrappedVolumeRaceGitRepoIterationCount = 3
55 wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
56 )
57
58 var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
59 f := framework.NewDefaultFramework("emptydir-wrapper")
60 f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
61
62
67 framework.ConformanceIt("should not conflict", func(ctx context.Context) {
68 name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
69 volumeName := "secret-volume"
70 volumeMountPath := "/etc/secret-volume"
71
72 secret := &v1.Secret{
73 ObjectMeta: metav1.ObjectMeta{
74 Namespace: f.Namespace.Name,
75 Name: name,
76 },
77 Data: map[string][]byte{
78 "data-1": []byte("value-1\n"),
79 },
80 }
81
82 var err error
83 if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
84 framework.Failf("unable to create test secret %s: %v", secret.Name, err)
85 }
86
87 configMapVolumeName := "configmap-volume"
88 configMapVolumeMountPath := "/etc/configmap-volume"
89
90 configMap := &v1.ConfigMap{
91 ObjectMeta: metav1.ObjectMeta{
92 Namespace: f.Namespace.Name,
93 Name: name,
94 },
95 BinaryData: map[string][]byte{
96 "data-1": []byte("value-1\n"),
97 },
98 }
99
100 if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
101 framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
102 }
103
104 pod := &v1.Pod{
105 ObjectMeta: metav1.ObjectMeta{
106 Name: "pod-secrets-" + string(uuid.NewUUID()),
107 },
108 Spec: v1.PodSpec{
109 Volumes: []v1.Volume{
110 {
111 Name: volumeName,
112 VolumeSource: v1.VolumeSource{
113 Secret: &v1.SecretVolumeSource{
114 SecretName: name,
115 },
116 },
117 },
118 {
119 Name: configMapVolumeName,
120 VolumeSource: v1.VolumeSource{
121 ConfigMap: &v1.ConfigMapVolumeSource{
122 LocalObjectReference: v1.LocalObjectReference{
123 Name: name,
124 },
125 },
126 },
127 },
128 },
129 Containers: []v1.Container{
130 {
131 Name: "secret-test",
132 Image: imageutils.GetE2EImage(imageutils.Agnhost),
133 Args: []string{"test-webserver"},
134 VolumeMounts: []v1.VolumeMount{
135 {
136 Name: volumeName,
137 MountPath: volumeMountPath,
138 ReadOnly: true,
139 },
140 {
141 Name: configMapVolumeName,
142 MountPath: configMapVolumeMountPath,
143 },
144 },
145 },
146 },
147 },
148 }
149 pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
150 ginkgo.DeferCleanup(func(ctx context.Context) {
151 ginkgo.By("Cleaning up the secret")
152 if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}); err != nil {
153 framework.Failf("unable to delete secret %v: %v", secret.Name, err)
154 }
155 ginkgo.By("Cleaning up the configmap")
156 if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}); err != nil {
157 framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
158 }
159 ginkgo.By("Cleaning up the pod")
160 if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
161 framework.Failf("unable to delete pod %v: %v", pod.Name, err)
162 }
163 })
164 })
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
188 framework.ConformanceIt("should not cause race condition when used for configmaps", f.WithSerial(), func(ctx context.Context) {
189 configMapNames := createConfigmapsForRace(ctx, f)
190 ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames)
191 volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
192 for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
193 testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
194 }
195 })
196
197
198
199
200
201 f.It("should not cause race condition when used for git_repo", f.WithSerial(), f.WithSlow(), func(ctx context.Context) {
202 gitURL, gitRepo, cleanup := createGitServer(ctx, f)
203 defer cleanup()
204 volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
205 for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
206 testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
207 }
208 })
209 })
210
211 func createGitServer(ctx context.Context, f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
212 var err error
213 gitServerPodName := "git-server-" + string(uuid.NewUUID())
214 containerPort := int32(8000)
215
216 labels := map[string]string{"name": gitServerPodName}
217
218 gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver")
219 gitServerPod.ObjectMeta.Labels = labels
220 e2epod.NewPodClient(f).CreateSync(ctx, gitServerPod)
221
222
223 httpPort := 2345
224
225 gitServerSvc := &v1.Service{
226 ObjectMeta: metav1.ObjectMeta{
227 Name: "git-server-svc",
228 },
229 Spec: v1.ServiceSpec{
230 Selector: labels,
231 Ports: []v1.ServicePort{
232 {
233 Name: "http-portal",
234 Port: int32(httpPort),
235 TargetPort: intstr.FromInt32(containerPort),
236 },
237 },
238 },
239 }
240
241 if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, gitServerSvc, metav1.CreateOptions{}); err != nil {
242 framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
243 }
244
245 return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
246 ginkgo.By("Cleaning up the git server pod")
247 if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
248 framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
249 }
250 ginkgo.By("Cleaning up the git server svc")
251 if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, gitServerSvc.Name, metav1.DeleteOptions{}); err != nil {
252 framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
253 }
254 }
255 }
256
257 func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
258 for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
259 volumeName := fmt.Sprintf("racey-git-repo-%d", i)
260 volumes = append(volumes, v1.Volume{
261 Name: volumeName,
262 VolumeSource: v1.VolumeSource{
263 GitRepo: &v1.GitRepoVolumeSource{
264 Repository: gitURL,
265 Directory: gitRepo,
266 },
267 },
268 })
269 volumeMounts = append(volumeMounts, v1.VolumeMount{
270 Name: volumeName,
271 MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
272 })
273 }
274 return
275 }
276
277 func createConfigmapsForRace(ctx context.Context, f *framework.Framework) (configMapNames []string) {
278 ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
279 for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
280 configMapName := fmt.Sprintf("racey-configmap-%d", i)
281 configMapNames = append(configMapNames, configMapName)
282 configMap := &v1.ConfigMap{
283 ObjectMeta: metav1.ObjectMeta{
284 Namespace: f.Namespace.Name,
285 Name: configMapName,
286 },
287 Data: map[string]string{
288 "data-1": "value-1",
289 },
290 }
291 _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
292 framework.ExpectNoError(err)
293 }
294 return
295 }
296
297 func deleteConfigMaps(ctx context.Context, f *framework.Framework, configMapNames []string) {
298 ginkgo.By("Cleaning up the configMaps")
299 for _, configMapName := range configMapNames {
300 err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMapName, metav1.DeleteOptions{})
301 framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
302 }
303 }
304
305 func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
306 for i, configMapName := range configMapNames {
307 volumeName := fmt.Sprintf("racey-configmap-%d", i)
308 volumes = append(volumes, v1.Volume{
309 Name: volumeName,
310 VolumeSource: v1.VolumeSource{
311 ConfigMap: &v1.ConfigMapVolumeSource{
312 LocalObjectReference: v1.LocalObjectReference{
313 Name: configMapName,
314 },
315 Items: []v1.KeyToPath{
316 {
317 Key: "data-1",
318 Path: "data-1",
319 },
320 },
321 },
322 },
323 })
324 volumeMounts = append(volumeMounts, v1.VolumeMount{
325 Name: volumeName,
326 MountPath: fmt.Sprintf("/etc/config-%d", i),
327 })
328 }
329 return
330 }
331
332 func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
333 const nodeHostnameLabelKey = "kubernetes.io/hostname"
334
335 rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
336 targetNode, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
337 framework.ExpectNoError(err)
338
339 ginkgo.By("Creating RC which spawns configmap-volume pods")
340 affinity := &v1.Affinity{
341 NodeAffinity: &v1.NodeAffinity{
342 RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
343 NodeSelectorTerms: []v1.NodeSelectorTerm{
344 {
345 MatchExpressions: []v1.NodeSelectorRequirement{
346 {
347 Key: nodeHostnameLabelKey,
348 Operator: v1.NodeSelectorOpIn,
349 Values: []string{targetNode.Labels[nodeHostnameLabelKey]},
350 },
351 },
352 },
353 },
354 },
355 },
356 }
357
358 rc := &v1.ReplicationController{
359 ObjectMeta: metav1.ObjectMeta{
360 Name: rcName,
361 },
362 Spec: v1.ReplicationControllerSpec{
363 Replicas: &podCount,
364 Selector: map[string]string{
365 "name": rcName,
366 },
367 Template: &v1.PodTemplateSpec{
368 ObjectMeta: metav1.ObjectMeta{
369 Labels: map[string]string{"name": rcName},
370 },
371 Spec: v1.PodSpec{
372 Containers: []v1.Container{
373 {
374 Name: "test-container",
375 Image: imageutils.GetE2EImage(imageutils.Pause),
376 VolumeMounts: volumeMounts,
377 },
378 },
379 Affinity: affinity,
380 DNSPolicy: v1.DNSDefault,
381 Volumes: volumes,
382 },
383 },
384 },
385 }
386 _, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rc, metav1.CreateOptions{})
387 framework.ExpectNoError(err, "error creating replication controller")
388
389 ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
390
391 pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount)
392 framework.ExpectNoError(err, "error creating pods")
393
394 ginkgo.By("Ensuring each pod is running")
395
396
397
398 for _, pod := range pods.Items {
399 if pod.DeletionTimestamp != nil {
400 continue
401 }
402 err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
403 framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
404 }
405 }
406
View as plain text