1
16
17 package csi_mock
18
19 import (
20 "context"
21 "fmt"
22 "time"
23
24 "github.com/onsi/ginkgo/v2"
25 "github.com/onsi/gomega"
26 storagev1 "k8s.io/api/storage/v1"
27 apierrors "k8s.io/apimachinery/pkg/api/errors"
28 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29 "k8s.io/apimachinery/pkg/util/wait"
30 clientset "k8s.io/client-go/kubernetes"
31 "k8s.io/kubernetes/test/e2e/framework"
32 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
33 "k8s.io/kubernetes/test/e2e/storage/utils"
34 admissionapi "k8s.io/pod-security-admission/api"
35 )
36
37 var _ = utils.SIGDescribe("CSI Mock volume limit", func() {
38 f := framework.NewDefaultFramework("csi-mock-volumes-limit")
39 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
40 m := newMockDriverSetup(f)
41
42 ginkgo.Context("CSI volume limit information using mock driver", func() {
43 f.It("should report attach limit when limit is bigger than 0", f.WithSlow(), func(ctx context.Context) {
44
45 var err error
46 m.init(ctx, testParameters{attachLimit: 2})
47 ginkgo.DeferCleanup(m.cleanup)
48
49 nodeName := m.config.ClientNodeSelection.Name
50 driverName := m.config.GetUniqueDriverName()
51
52 csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
53 framework.ExpectNoError(err, "while checking limits in CSINode: %v", err)
54
55 gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
56
57 _, _, pod1 := m.createPod(ctx, pvcReference)
58 gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
59
60 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace)
61 framework.ExpectNoError(err, "Failed to start pod1: %v", err)
62
63 _, _, pod2 := m.createPod(ctx, pvcReference)
64 gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating second pod")
65
66 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod2.Name, pod2.Namespace)
67 framework.ExpectNoError(err, "Failed to start pod2: %v", err)
68
69 _, _, pod3 := m.createPod(ctx, pvcReference)
70 gomega.Expect(pod3).NotTo(gomega.BeNil(), "while creating third pod")
71 err = waitForMaxVolumeCondition(pod3, m.cs)
72 framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod3)
73 })
74
75 f.It("should report attach limit for generic ephemeral volume when persistent volume is attached", f.WithSlow(), func(ctx context.Context) {
76
77 var err error
78 m.init(ctx, testParameters{attachLimit: 1})
79 ginkgo.DeferCleanup(m.cleanup)
80
81 nodeName := m.config.ClientNodeSelection.Name
82 driverName := m.config.GetUniqueDriverName()
83
84 csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
85 framework.ExpectNoError(err, "while checking limits in CSINode: %v", err)
86
87 gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 1))
88
89 _, _, pod1 := m.createPod(ctx, pvcReference)
90 gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating pod with persistent volume")
91
92 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace)
93 framework.ExpectNoError(err, "Failed to start pod1: %v", err)
94
95 _, _, pod2 := m.createPod(ctx, genericEphemeral)
96 gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod with ephemeral volume")
97 err = waitForMaxVolumeCondition(pod2, m.cs)
98 framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2)
99 })
100
101 f.It("should report attach limit for persistent volume when generic ephemeral volume is attached", f.WithSlow(), func(ctx context.Context) {
102
103 var err error
104 m.init(ctx, testParameters{attachLimit: 1})
105 ginkgo.DeferCleanup(m.cleanup)
106
107 nodeName := m.config.ClientNodeSelection.Name
108 driverName := m.config.GetUniqueDriverName()
109
110 csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
111 framework.ExpectNoError(err, "while checking limits in CSINode: %v", err)
112
113 gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 1))
114
115 _, _, pod1 := m.createPod(ctx, genericEphemeral)
116 gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating pod with persistent volume")
117
118 err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod1.Name, pod1.Namespace)
119 framework.ExpectNoError(err, "Failed to start pod1: %v", err)
120
121 _, _, pod2 := m.createPod(ctx, pvcReference)
122 gomega.Expect(pod2).NotTo(gomega.BeNil(), "while creating pod with ephemeral volume")
123 err = waitForMaxVolumeCondition(pod2, m.cs)
124 framework.ExpectNoError(err, "while waiting for max volume condition on pod : %+v", pod2)
125 })
126 })
127 })
128
129 func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Interface) (int32, error) {
130 var attachLimit int32
131
132 waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
133 csiNode, err := cs.StorageV1().CSINodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
134 if err != nil && !apierrors.IsNotFound(err) {
135 return false, err
136 }
137 attachLimit = getVolumeLimitFromCSINode(csiNode, driverName)
138 if attachLimit > 0 {
139 return true, nil
140 }
141 return false, nil
142 })
143 if waitErr != nil {
144 return 0, fmt.Errorf("error waiting for non-zero volume limit of driver %s on node %s: %v", driverName, nodeName, waitErr)
145 }
146 return attachLimit, nil
147 }
148
149 func getVolumeLimitFromCSINode(csiNode *storagev1.CSINode, driverName string) int32 {
150 for _, d := range csiNode.Spec.Drivers {
151 if d.Name != driverName {
152 continue
153 }
154 if d.Allocatable != nil && d.Allocatable.Count != nil {
155 return *d.Allocatable.Count
156 }
157 }
158 return 0
159 }
160
View as plain text