1
2
3
4
19
20 package cm
21
22 import (
23 "errors"
24 "os"
25 "path"
26 "testing"
27 "time"
28
29 "github.com/golang/mock/gomock"
30 cadvisorapiv2 "github.com/google/cadvisor/info/v2"
31
32 "github.com/opencontainers/runc/libcontainer/cgroups"
33 "github.com/stretchr/testify/assert"
34 "github.com/stretchr/testify/require"
35 v1 "k8s.io/api/core/v1"
36 "k8s.io/apimachinery/pkg/api/resource"
37 cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
38
39 "k8s.io/mount-utils"
40 )
41
42 func fakeContainerMgrMountInt() mount.Interface {
43 return mount.NewFakeMounter(
44 []mount.MountPoint{
45 {
46 Device: "cgroup",
47 Type: "cgroup",
48 Opts: []string{"rw", "relatime", "cpuset"},
49 },
50 {
51 Device: "cgroup",
52 Type: "cgroup",
53 Opts: []string{"rw", "relatime", "cpu"},
54 },
55 {
56 Device: "cgroup",
57 Type: "cgroup",
58 Opts: []string{"rw", "relatime", "cpuacct"},
59 },
60 {
61 Device: "cgroup",
62 Type: "cgroup",
63 Opts: []string{"rw", "relatime", "memory"},
64 },
65 })
66 }
67
68 func TestCgroupMountValidationSuccess(t *testing.T) {
69 f, err := validateSystemRequirements(fakeContainerMgrMountInt())
70 assert.NoError(t, err)
71 if cgroups.IsCgroup2UnifiedMode() {
72 assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled")
73 } else {
74 assert.False(t, f.cpuHardcapping, "cpu hardcapping is expected to be disabled")
75 }
76 }
77
78 func TestCgroupMountValidationMemoryMissing(t *testing.T) {
79 if cgroups.IsCgroup2UnifiedMode() {
80 t.Skip("skipping cgroup v1 test on a cgroup v2 system")
81 }
82 mountInt := mount.NewFakeMounter(
83 []mount.MountPoint{
84 {
85 Device: "cgroup",
86 Type: "cgroup",
87 Opts: []string{"rw", "relatime", "cpuset"},
88 },
89 {
90 Device: "cgroup",
91 Type: "cgroup",
92 Opts: []string{"rw", "relatime", "cpu"},
93 },
94 {
95 Device: "cgroup",
96 Type: "cgroup",
97 Opts: []string{"rw", "relatime", "cpuacct"},
98 },
99 })
100 _, err := validateSystemRequirements(mountInt)
101 assert.Error(t, err)
102 }
103
104 func TestCgroupMountValidationMultipleSubsystem(t *testing.T) {
105 if cgroups.IsCgroup2UnifiedMode() {
106 t.Skip("skipping cgroup v1 test on a cgroup v2 system")
107 }
108 mountInt := mount.NewFakeMounter(
109 []mount.MountPoint{
110 {
111 Device: "cgroup",
112 Type: "cgroup",
113 Opts: []string{"rw", "relatime", "cpuset", "memory"},
114 },
115 {
116 Device: "cgroup",
117 Type: "cgroup",
118 Opts: []string{"rw", "relatime", "cpu"},
119 },
120 {
121 Device: "cgroup",
122 Type: "cgroup",
123 Opts: []string{"rw", "relatime", "cpuacct"},
124 },
125 })
126 _, err := validateSystemRequirements(mountInt)
127 assert.NoError(t, err)
128 }
129
130 func TestGetCpuWeight(t *testing.T) {
131 assert.Equal(t, uint64(0), getCPUWeight(nil))
132
133 v := uint64(2)
134 assert.Equal(t, uint64(1), getCPUWeight(&v))
135
136 v = uint64(262144)
137 assert.Equal(t, uint64(10000), getCPUWeight(&v))
138
139 v = uint64(1000000000)
140 assert.Equal(t, uint64(10000), getCPUWeight(&v))
141 }
142
143 func TestSoftRequirementsValidationSuccess(t *testing.T) {
144 if cgroups.IsCgroup2UnifiedMode() {
145 t.Skip("skipping cgroup v1 test on a cgroup v2 system")
146 }
147 req := require.New(t)
148 tempDir, err := os.MkdirTemp("", "")
149 req.NoError(err)
150 defer os.RemoveAll(tempDir)
151 req.NoError(os.WriteFile(path.Join(tempDir, "cpu.cfs_period_us"), []byte("0"), os.ModePerm))
152 req.NoError(os.WriteFile(path.Join(tempDir, "cpu.cfs_quota_us"), []byte("0"), os.ModePerm))
153 mountInt := mount.NewFakeMounter(
154 []mount.MountPoint{
155 {
156 Device: "cgroup",
157 Type: "cgroup",
158 Opts: []string{"rw", "relatime", "cpuset"},
159 },
160 {
161 Device: "cgroup",
162 Type: "cgroup",
163 Opts: []string{"rw", "relatime", "cpu"},
164 Path: tempDir,
165 },
166 {
167 Device: "cgroup",
168 Type: "cgroup",
169 Opts: []string{"rw", "relatime", "cpuacct", "memory"},
170 },
171 })
172 f, err := validateSystemRequirements(mountInt)
173 assert.NoError(t, err)
174 assert.True(t, f.cpuHardcapping, "cpu hardcapping is expected to be enabled")
175 }
176
177 func TestGetCapacity(t *testing.T) {
178 ephemeralStorageFromCapacity := int64(2000)
179 ephemeralStorageFromCadvisor := int64(8000)
180 mockCtrl := gomock.NewController(t)
181 defer mockCtrl.Finish()
182 mockCtrlError := gomock.NewController(t)
183 defer mockCtrlError.Finish()
184
185 mockCadvisor := cadvisortest.NewMockInterface(mockCtrl)
186 rootfs := cadvisorapiv2.FsInfo{
187 Capacity: 8000,
188 }
189 mockCadvisor.EXPECT().RootFsInfo().Return(rootfs, nil)
190 mockCadvisorError := cadvisortest.NewMockInterface(mockCtrlError)
191 mockCadvisorError.EXPECT().RootFsInfo().Return(cadvisorapiv2.FsInfo{}, errors.New("Unable to get rootfs data from cAdvisor interface"))
192 cases := []struct {
193 name string
194 cm *containerManagerImpl
195 expectedResourceQuantity *resource.Quantity
196 expectedNoEphemeralStorage bool
197 disablelocalStorageCapacityIsolation bool
198 }{
199 {
200 name: "capacity property has ephemeral-storage",
201 cm: &containerManagerImpl{
202 cadvisorInterface: mockCadvisor,
203 capacity: v1.ResourceList{
204 v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorageFromCapacity, resource.BinarySI),
205 },
206 },
207 expectedResourceQuantity: resource.NewQuantity(ephemeralStorageFromCapacity, resource.BinarySI),
208 expectedNoEphemeralStorage: false,
209 },
210 {
211 name: "capacity property does not have ephemeral-storage",
212 cm: &containerManagerImpl{
213 cadvisorInterface: mockCadvisor,
214 capacity: v1.ResourceList{},
215 },
216 expectedResourceQuantity: resource.NewQuantity(ephemeralStorageFromCadvisor, resource.BinarySI),
217 expectedNoEphemeralStorage: false,
218 },
219 {
220 name: "capacity property does not have ephemeral-storage, error from rootfs",
221 cm: &containerManagerImpl{
222 cadvisorInterface: mockCadvisorError,
223 capacity: v1.ResourceList{},
224 },
225 expectedNoEphemeralStorage: true,
226 },
227 {
228 name: "capacity property does not have ephemeral-storage, cadvisor interface is nil",
229 cm: &containerManagerImpl{
230 cadvisorInterface: nil,
231 capacity: v1.ResourceList{},
232 },
233 expectedNoEphemeralStorage: true,
234 },
235 {
236 name: "capacity property has ephemeral-storage, but localStorageCapacityIsolation is disabled",
237 cm: &containerManagerImpl{
238 cadvisorInterface: mockCadvisor,
239 capacity: v1.ResourceList{
240 v1.ResourceEphemeralStorage: *resource.NewQuantity(ephemeralStorageFromCapacity, resource.BinarySI),
241 },
242 },
243 expectedResourceQuantity: resource.NewQuantity(ephemeralStorageFromCapacity, resource.BinarySI),
244 expectedNoEphemeralStorage: true,
245 disablelocalStorageCapacityIsolation: true,
246 },
247 }
248 for _, c := range cases {
249 t.Run(c.name, func(t *testing.T) {
250 ret := c.cm.GetCapacity(!c.disablelocalStorageCapacityIsolation)
251 if v, exists := ret[v1.ResourceEphemeralStorage]; !exists {
252 if !c.expectedNoEphemeralStorage {
253 t.Errorf("did not get any ephemeral storage data")
254 }
255 } else {
256 if v.Value() != c.expectedResourceQuantity.Value() {
257 t.Errorf("got unexpected %s value, expected %d, got %d", v1.ResourceEphemeralStorage, c.expectedResourceQuantity.Value(), v.Value())
258 }
259 }
260 })
261 }
262 }
263
264 func TestNewPodContainerManager(t *testing.T) {
265
266 info := QOSContainersInfo{
267 Guaranteed: CgroupName{"guaranteed"},
268 BestEffort: CgroupName{"besteffort"},
269 Burstable: CgroupName{"burstable"},
270 }
271 QosEnabled := NodeConfig{
272 CgroupsPerQOS: true,
273 }
274 QosDisabled := NodeConfig{
275 CgroupsPerQOS: false,
276 }
277
278 cases := []struct {
279 name string
280 cm *containerManagerImpl
281 }{
282 {
283 name: "CgroupsPerQOS is disabled, return *podContainerManagerNoop",
284 cm: &containerManagerImpl{
285 qosContainerManager: &qosContainerManagerImpl{
286 qosContainersInfo: info,
287 cgroupManager: NewCgroupManager(&CgroupSubsystems{}, ""),
288 },
289
290 NodeConfig: QosDisabled,
291 },
292 },
293 {
294 name: "CgroupsPerQOS is enabled, return *podContainerManagerImpl",
295 cm: &containerManagerImpl{
296 qosContainerManager: &qosContainerManagerImpl{
297 qosContainersInfo: info,
298 cgroupManager: NewCgroupManager(&CgroupSubsystems{}, ""),
299 },
300
301 NodeConfig: QosEnabled,
302 },
303 },
304 {
305 name: "CgroupsPerQOS is enabled, use systemd",
306 cm: &containerManagerImpl{
307 qosContainerManager: &qosContainerManagerImpl{
308 qosContainersInfo: info,
309 cgroupManager: NewCgroupManager(&CgroupSubsystems{}, "systemd"),
310 },
311
312 NodeConfig: QosEnabled,
313 },
314 },
315 {
316 name: "CgroupsPerQOS is disabled, use systemd",
317 cm: &containerManagerImpl{
318 qosContainerManager: &qosContainerManagerImpl{
319 qosContainersInfo: info,
320 cgroupManager: NewCgroupManager(&CgroupSubsystems{}, "systemd"),
321 },
322
323 NodeConfig: QosDisabled,
324 },
325 },
326 }
327
328 for _, c := range cases {
329 c := c
330 t.Run(c.name, func(t *testing.T) {
331 t.Parallel()
332 pcm := c.cm.NewPodContainerManager()
333 if c.cm.NodeConfig.CgroupsPerQOS {
334 assert.IsType(t, &podContainerManagerImpl{}, pcm)
335 got := pcm.(*podContainerManagerImpl)
336 assert.Equal(t, c.cm.subsystems, got.subsystems)
337 assert.Equal(t, c.cm.cgroupManager, got.cgroupManager)
338 assert.Equal(t, c.cm.PodPidsLimit, got.podPidsLimit)
339 assert.Equal(t, c.cm.EnforceCPULimits, got.enforceCPULimits)
340 assert.Equal(t, uint64(c.cm.CPUCFSQuotaPeriod/time.Microsecond), got.cpuCFSQuotaPeriod)
341
342 } else {
343 assert.IsType(t, &podContainerManagerNoop{}, pcm)
344 got := pcm.(*podContainerManagerNoop)
345 assert.Equal(t, c.cm.cgroupRoot, got.cgroupRoot)
346 }
347 })
348 }
349 }
350
View as plain text