...
1#!/usr/bin/env bash
2
3# Copyright 2018 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17set -o errexit
18set -o nounset
19set -o pipefail
20
21# Runs tests related to kubectl apply.
22run_kubectl_apply_tests() {
23 set -o nounset
24 set -o errexit
25
26 create_and_use_new_namespace
27 kube::log::status "Testing kubectl apply"
28 ## kubectl apply should create the resource that doesn't exist yet
29 # Pre-Condition: no POD exists
30 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
31 # Command: apply a pod "test-pod" (doesn't exist) should create this pod
32 kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
33 # Post-Condition: pod "test-pod" is created
34 kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
35 # Post-Condition: pod "test-pod" has configuration annotation
36 grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
37 # pod has field manager for kubectl client-side apply
38 output_message=$(kubectl get --show-managed-fields -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
39 kube::test::if_has_string "${output_message}" 'kubectl-client-side-apply'
40 # Clean up
41 kubectl delete pods test-pod "${kube_flags[@]:?}"
42
43 ### set-last-applied
44 # Pre-Condition: no POD exists
45 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
46 # Command: create "test-pod" (doesn't exist) should create this pod without last-applied annotation
47 kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
48 # Post-Condition: pod "test-pod" is created
49 kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
50 # Pre-Condition: pod "test-pod" does not have configuration annotation
51 ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1
52 # Dry-run set-last-applied
53 kubectl apply set-last-applied --dry-run=client -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
54 kubectl apply set-last-applied --dry-run=server -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
55 ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" || exit 1
56 # Command
57 kubectl apply set-last-applied -f hack/testdata/pod.yaml --create-annotation=true "${kube_flags[@]:?}"
58 # Post-Condition: pod "test-pod" has configuration annotation
59 grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
60 # Clean up
61 kubectl delete pods test-pod "${kube_flags[@]:?}"
62
63 ## kubectl apply should be able to clear defaulted fields.
64 # Pre-Condition: no deployment exists
65 kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
66 # Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment
67 kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]:?}"
68 # Post-Condition: deployment "test-deployment-retainkeys" created
69 kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys'
70 # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields
71 grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
72 grep -q maxSurge <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
73 grep -q maxUnavailable <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
74 grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
75 # Command: apply a deployment "test-deployment-retainkeys" should clear
76 # defaulted fields and successfully update the deployment
77 [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]]
78 # Post-Condition: deployment "test-deployment-retainkeys" has updated fields
79 grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
80 ! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
81 grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
82 ! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
83 # Clean up
84 kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}"
85
86 ## kubectl apply -f with label selector should only apply matching objects
87 # Pre-Condition: no POD exists
88 kube::test::wait_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
89 # apply
90 kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]:?}"
91 # check right pod exists
92 kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field:?}.name}}" 'selector-test-pod'
93 # check wrong pod doesn't exist
94 output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]:?}")
95 kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found'
96 # cleanup
97 kubectl delete pods selector-test-pod
98
99 ## kubectl apply --dry-run=server
100 # Pre-Condition: no POD exists
101 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
102
103 # apply dry-run
104 kubectl apply --dry-run=client -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
105 kubectl apply --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
106 # No pod exists
107 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
108 # apply non dry-run creates the pod
109 kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
110 initialResourceVersion=$(kubectl get "${kube_flags[@]:?}" -f hack/testdata/pod.yaml -o go-template='{{ .metadata.resourceVersion }}')
111 # apply changes
112 kubectl apply --dry-run=client -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
113 kubectl apply --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
114 # Post-Condition: label still has initial value
115 kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
116 # Ensure dry-run doesn't persist change
117 resourceVersion=$(kubectl get "${kube_flags[@]:?}" -f hack/testdata/pod.yaml -o go-template='{{ .metadata.resourceVersion }}')
118 kube::test::if_has_string "${resourceVersion}" "${initialResourceVersion}"
119
120 # clean-up
121 kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
122
123 ## kubectl apply dry-run on CR
124 # Create CRD
125 kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__
126{
127 "kind": "CustomResourceDefinition",
128 "apiVersion": "apiextensions.k8s.io/v1",
129 "metadata": {
130 "name": "resources.mygroup.example.com"
131 },
132 "spec": {
133 "group": "mygroup.example.com",
134 "scope": "Namespaced",
135 "names": {
136 "plural": "resources",
137 "singular": "resource",
138 "kind": "Kind",
139 "listKind": "KindList"
140 },
141 "versions": [
142 {
143 "name": "v1alpha1",
144 "served": true,
145 "storage": true,
146 "schema": {
147 "openAPIV3Schema": {
148 "x-kubernetes-preserve-unknown-fields": true,
149 "type": "object"
150 }
151 }
152 }
153 ]
154 }
155}
156__EOF__
157
158 # Ensure the API server has recognized and started serving the associated CR API
159 local tries=5
160 for i in $(seq 1 $tries); do
161 local output
162 output=$(kubectl "${kube_flags[@]:?}" api-resources --api-group mygroup.example.com -oname || true)
163 if kube::test::if_has_string "$output" resources.mygroup.example.com; then
164 break
165 fi
166 echo "${i}: Waiting for CR API to be available"
167 sleep "$i"
168 done
169
170 # Dry-run create the CR
171 kubectl "${kube_flags[@]:?}" apply --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
172 # Make sure that the CR doesn't exist
173 ! kubectl "${kube_flags[@]:?}" get resource/myobj 2>/dev/null || exit 1
174
175 # clean-up
176 kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
177
178 ## kubectl apply --prune
179 # Pre-Condition: namespace nsb exists; no POD exists
180 kubectl create ns nsb
181 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
182 # apply a into namespace nsb
183 kubectl apply --namespace nsb -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
184 kube::test::get_object_assert 'pods a -n nsb' "{{${id_field:?}}}" 'a'
185 # apply b with namespace
186 kubectl apply --namespace nsb --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}"
187 # check right pod exists and wrong pod doesn't exist
188 kube::test::wait_object_assert 'pods -n nsb' "{{range.items}}{{${id_field:?}}}:{{end}}" 'b:'
189
190 # cleanup
191 kubectl delete pods b -n nsb
192
193 # same thing without prune for a sanity check
194 # Pre-Condition: no POD exists
195 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
196
197 # apply a
198 kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
199 # check right pod exists
200 kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
201 # check wrong pod doesn't exist
202 kube::test::wait_object_assert 'pods -n nsb' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
203
204 # apply b
205 kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}"
206 # check both pods exist
207 kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
208 kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b'
209
210 # cleanup
211 kubectl delete pod/a
212 kubectl delete pod/b -n nsb
213
214 ## kubectl apply --prune requires a --all flag to select everything
215 output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]:?}")
216 kube::test::if_has_string "${output_message}" \
217 'all resources selected for prune without explicitly passing --all'
218 # should apply everything
219 kubectl apply --all --prune -f hack/testdata/prune
220 kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
221 kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b'
222 kubectl delete pod/a
223 kubectl delete pod/b -n nsb
224 kubectl delete ns nsb
225
226 ## kubectl apply --prune should fallback to delete for non reapable types
227 kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]:?}"
228 kube::test::get_object_assert 'pvc a-pvc' "{{${id_field:?}}}" 'a-pvc'
229 kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]:?}"
230 kube::test::get_object_assert 'pvc b-pvc' "{{${id_field:?}}}" 'b-pvc'
231 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
232 kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]:?}"
233
234 ## kubectl apply --prune --prune-allowlist
235 # Pre-Condition: no POD exists
236 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
237 # apply pod a
238 kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
239 # check right pod exists
240 kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
241 # apply svc and don't prune pod a by overwriting allowlist
242 kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-allowlist core/v1/Service 2>&1 "${kube_flags[@]:?}"
243 kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc'
244 kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a'
245 # apply svc and prune pod a with default allowlist
246 kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]:?}"
247 kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc'
248 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
249 # cleanup
250 kubectl delete svc prune-svc 2>&1 "${kube_flags[@]:?}"
251
252 ## kubectl apply --prune can prune resources not in the defaulted namespace
253 # Pre-Condition: namespace nsb exists; no POD exists
254 kubectl create ns nsb
255 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
256 # apply a into namespace nsb
257 kubectl apply --namespace nsb -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}"
258 kube::test::get_object_assert 'pods a -n nsb' "{{${id_field:?}}}" 'a'
259 # apply b with namespace
260 kubectl apply --namespace nsb -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}"
261 kube::test::get_object_assert 'pods b -n nsb' "{{${id_field:?}}}" 'b'
262 # apply --prune must prune a
263 kubectl apply --prune --all -f hack/testdata/prune/b.yaml
264 # check wrong pod doesn't exist and right pod exists
265 kube::test::wait_object_assert 'pods -n nsb' "{{range.items}}{{${id_field:?}}}:{{end}}" 'b:'
266
267 # cleanup
268 kubectl delete ns nsb
269
270 ## kubectl apply -n must fail if input file contains namespace other than the one given in -n
271 output_message=$(! kubectl apply -n foo -f hack/testdata/prune/b.yaml 2>&1 "${kube_flags[@]:?}")
272 kube::test::if_has_string "${output_message}" 'the namespace from the provided object "nsb" does not match the namespace "foo".'
273
274 ## kubectl apply -f some.yml --force
275 # Pre-condition: no service exists
276 kube::test::get_object_assert services "{{range.items}}{{${id_field:?}}}:{{end}}" ''
277 # apply service a
278 kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]:?}"
279 # check right service exists
280 kube::test::get_object_assert 'services a' "{{${id_field:?}}}" 'a'
281 # change immutable field and apply service a
282 output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]:?}")
283 kube::test::if_has_string "${output_message}" 'may not change once set'
284 # apply --force to recreate resources for immutable fields
285 kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]:?}"
286 # check immutable field exists
287 kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12'
288 # cleanup
289 kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]:?}"
290
291 ## kubectl apply -k somedir
292 kubectl apply -k hack/testdata/kustomize
293 kube::test::get_object_assert 'configmap test-the-map' "{{${id_field}}}" 'test-the-map'
294 kube::test::get_object_assert 'deployment test-the-deployment' "{{${id_field}}}" 'test-the-deployment'
295 kube::test::get_object_assert 'service test-the-service' "{{${id_field}}}" 'test-the-service'
296 # cleanup
297 kubectl delete -k hack/testdata/kustomize
298
299 ## kubectl apply --kustomize somedir
300 kubectl apply --kustomize hack/testdata/kustomize
301 kube::test::get_object_assert 'configmap test-the-map' "{{${id_field}}}" 'test-the-map'
302 kube::test::get_object_assert 'deployment test-the-deployment' "{{${id_field}}}" 'test-the-deployment'
303 kube::test::get_object_assert 'service test-the-service' "{{${id_field}}}" 'test-the-service'
304 # cleanup
305 kubectl delete --kustomize hack/testdata/kustomize
306
307 ## kubectl apply multiple resources with one failure during apply phase.
308 # Pre-Condition: namespace does not exist and no POD exists
309 output_message=$(! kubectl get namespace multi-resource-ns 2>&1 "${kube_flags[@]:?}")
310 kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found'
311 kube::test::wait_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
312 # First pass, namespace is created, but pod is not (since namespace does not exist yet).
313 output_message=$(! kubectl apply -f hack/testdata/multi-resource-1.yaml 2>&1 "${kube_flags[@]:?}")
314 kube::test::if_has_string "${output_message}" 'namespaces "multi-resource-ns" not found'
315 output_message=$(! kubectl get pods test-pod -n multi-resource-ns 2>&1 "${kube_flags[@]:?}")
316 kube::test::if_has_string "${output_message}" 'pods "test-pod" not found'
317 # Second pass, pod is created (now that namespace exists).
318 kubectl apply -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
319 kube::test::get_object_assert 'pods test-pod -n multi-resource-ns' "{{${id_field}}}" 'test-pod'
320 # cleanup
321 kubectl delete -f hack/testdata/multi-resource-1.yaml "${kube_flags[@]:?}"
322
323 ## kubectl apply multiple resources with one failure during builder phase.
324 # Pre-Condition: No configmaps with name=foo
325 kube::test::get_object_assert 'configmaps --field-selector=metadata.name=foo' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
326 # Apply a configmap and a bogus custom resource.
327 output_message=$(! kubectl apply -f hack/testdata/multi-resource-2.yaml 2>&1 "${kube_flags[@]:?}")
328 # Should be error message from bogus custom resource.
329 kube::test::if_has_string "${output_message}" 'no matches for kind "Bogus" in version "example.com/v1"'
330 # ConfigMap should have been created even with custom resource error.
331 kube::test::get_object_assert 'configmaps foo' "{{${id_field}}}" 'foo'
332 # cleanup
333 kubectl delete configmaps foo "${kube_flags[@]:?}"
334
335 ## kubectl apply multiple resources with one failure during builder phase.
336 # Pre-Condition: No pods exist.
337 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
338 # Applies three pods, one of which is invalid (POD-B), two succeed (pod-a, pod-c).
339 output_message=$(! kubectl apply -f hack/testdata/multi-resource-3.yaml 2>&1 "${kube_flags[@]:?}")
340 kube::test::if_has_string "${output_message}" 'The Pod "POD-B" is invalid'
341 kube::test::get_object_assert 'pods pod-a' "{{${id_field}}}" 'pod-a'
342 kube::test::get_object_assert 'pods pod-c' "{{${id_field}}}" 'pod-c'
343 # cleanup
344 kubectl delete pod pod-a pod-c "${kube_flags[@]:?}"
345 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
346
347 ## kubectl apply multiple resources with one failure during apply phase.
348 # Pre-Condition: crd does not exist, and custom resource does not exist.
349 kube::test::get_object_assert crds "{{range.items}}{{${id_field:?}}}:{{end}}" ''
350 # First pass, custom resource fails, but crd apply succeeds.
351 output_message=$(! kubectl apply -f hack/testdata/multi-resource-4.yaml 2>&1 "${kube_flags[@]:?}")
352 kube::test::if_has_string "${output_message}" 'no matches for kind "Widget" in version "example.com/v1"'
353 kubectl wait --timeout=2s --for=condition=Established=true crd/widgets.example.com
354 output_message=$(! kubectl get widgets foo 2>&1 "${kube_flags[@]:?}")
355 kube::test::if_has_string "${output_message}" 'widgets.example.com "foo" not found'
356 kube::test::get_object_assert 'crds widgets.example.com' "{{${id_field}}}" 'widgets.example.com'
357 # Second pass, custom resource is created (now that crd exists).
358 kubectl apply -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}"
359 kube::test::get_object_assert 'widget foo' "{{${id_field}}}" 'foo'
360 # cleanup
361 kubectl delete -f hack/testdata/multi-resource-4.yaml "${kube_flags[@]:?}"
362
363 set +o nounset
364 set +o errexit
365}
366
367# Runs tests related to kubectl apply (server-side)
368run_kubectl_server_side_apply_tests() {
369 set -o nounset
370 set -o errexit
371
372 create_and_use_new_namespace
373 kube::log::status "Testing kubectl apply --server-side"
374 ## kubectl apply should create the resource that doesn't exist yet
375 # Pre-Condition: no POD exists
376 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
377 # Command: apply a pod "test-pod" (doesn't exist) should create this pod
378 kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
379 # Post-Condition: pod "test-pod" is created
380 kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
381 # pod has field manager for kubectl server-side apply
382 output_message=$(kubectl get --show-managed-fields -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
383 kube::test::if_has_string "${output_message}" 'kubectl'
384 # pod has custom field manager
385 kubectl apply --server-side --field-manager=my-field-manager --force-conflicts -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
386 output_message=$(kubectl get -f hack/testdata/pod.yaml -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
387 kube::test::if_has_string "${output_message}" 'my-field-manager'
388 # Clean up
389 kubectl delete pods test-pod "${kube_flags[@]:?}"
390
391 ## kubectl apply --dry-run=server
392 # Pre-Condition: no POD exists
393 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
394
395 # apply dry-run
396 kubectl apply --server-side --dry-run=server -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
397 # No pod exists
398 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
399 # apply non dry-run creates the pod
400 kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
401 initialResourceVersion=$(kubectl get "${kube_flags[@]:?}" -f hack/testdata/pod.yaml -o go-template='{{ .metadata.resourceVersion }}')
402 # apply changes
403 kubectl apply --server-side --dry-run=server -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
404 # Post-Condition: label still has initial value
405 kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label'
406 # Ensure dry-run doesn't persist change
407 resourceVersion=$(kubectl get "${kube_flags[@]:?}" -f hack/testdata/pod.yaml -o go-template='{{ .metadata.resourceVersion }}')
408 kube::test::if_has_string "${resourceVersion}" "${initialResourceVersion}"
409
410 # clean-up
411 kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
412
413 ## kubectl apply upgrade
414 # Pre-Condition: no POD exists
415 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
416
417 kube::log::status "Testing upgrade kubectl client-side apply to server-side apply"
418 # run client-side apply
419 kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
420 # test upgrade does not work with non-standard server-side apply field manager
421 ! kubectl apply --server-side --field-manager="not-kubectl" -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" || exit 1
422 # test upgrade from client-side apply to server-side apply
423 kubectl apply --server-side -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
424 # Post-Condition: pod "test-pod" has configuration annotation
425 grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
426 output_message=$(kubectl apply view-last-applied pod/test-pod -o json 2>&1 "${kube_flags[@]:?}")
427 kube::test::if_has_string "${output_message}" '"name": "test-pod-applied"'
428
429 kube::log::status "Testing downgrade kubectl server-side apply to client-side apply"
430 # test downgrade from server-side apply to client-side apply
431 kubectl apply --server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
432 # Post-Condition: pod "test-pod" has configuration annotation
433 grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")"
434 output_message=$(kubectl apply view-last-applied pod/test-pod -o json 2>&1 "${kube_flags[@]:?}")
435 kube::test::if_has_string "${output_message}" '"name": "test-pod-label"'
436 kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}"
437
438 # clean-up
439 kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}"
440
441 # Test apply migration
442
443 # Create a configmap in the cluster with client-side apply:
444 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
445apiVersion: v1
446kind: ConfigMap
447metadata:
448 name: test
449data:
450 key: value
451 legacy: unused
452__EOF__
453 )
454
455 kube::test::if_has_string "${output_message}" 'configmap/test created'
456
457 # Apply the same manifest with --server-side flag, as per server-side-apply migration instructions:
458 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
459apiVersion: v1
460kind: ConfigMap
461metadata:
462 name: test
463data:
464 key: value
465 legacy: unused
466__EOF__
467 )
468
469 kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
470
471 # Apply the object a third time using server-side-apply, but this time removing
472 # a field and adding a field. Old versions of kubectl would not allow the field
473 # to be removed
474 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
475apiVersion: v1
476kind: ConfigMap
477metadata:
478 name: test
479data:
480 key: value
481 ssaKey: ssaValue
482__EOF__
483 )
484
485 kube::test::if_has_string "${output_message}" 'configmap/test serverside-applied'
486
487 # Fetch the object and check to see that it does not have a field 'legacy'
488 kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
489 kube::test::get_object_assert "configmap test" "{{ .data.legacy }}" '<no value>'
490 kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
491
492 # CSA the object after it has been server-side-applied and had a field removed
493 # Add new key with client-side-apply. Also removes the field from server-side-apply
494 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false -f - << __EOF__
495apiVersion: v1
496kind: ConfigMap
497metadata:
498 name: test
499data:
500 key: value
501 newKey: newValue
502__EOF__
503 )
504
505 kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
506 kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" 'newValue'
507 kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" '<no value>'
508
509 # SSA the object without the field added above by CSA. Show that the object
510 # on the server has had the field removed
511 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side -f - << __EOF__
512apiVersion: v1
513kind: ConfigMap
514metadata:
515 name: test
516data:
517 key: value
518 ssaKey: ssaValue
519__EOF__
520 )
521
522 # Fetch the object and check to see that it does not have a field 'newKey'
523 kube::test::get_object_assert "configmap test" "{{ .data.key }}" 'value'
524 kube::test::get_object_assert "configmap test" "{{ .data.newKey }}" '<no value>'
525 kube::test::get_object_assert "configmap test" "{{ .data.ssaKey }}" 'ssaValue'
526
527 # Show that kubectl diff --server-side also functions after a migration
528 output_message=$(kubectl diff "${kube_flags[@]:?}" --server-side -f - << __EOF__ || test $? -eq 1
529apiVersion: v1
530kind: ConfigMap
531metadata:
532 name: test
533 annotations:
534 newAnnotation: newValue
535data:
536 key: value
537 newKey: newValue
538__EOF__
539)
540 kube::test::if_has_string "${output_message}" '+ newKey: newValue'
541 kube::test::if_has_string "${output_message}" '+ newAnnotation: newValue'
542
543 # clean-up
544 kubectl "${kube_flags[@]:?}" delete configmap test
545
546 ## Test to show that supplying a custom field manager to kubectl apply
547 # does not prevent migration from client-side-apply to server-side-apply
548 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=false --field-manager=myfm -f - << __EOF__
549apiVersion: v1
550data:
551 key: value1
552 legacy: value2
553kind: ConfigMap
554metadata:
555 name: ssa-test
556__EOF__
557)
558 kube::test::if_has_string "$output_message" "configmap/ssa-test created"
559 kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
560
561 # show that after client-side applying with a custom field manager, the
562 # last-applied-annotation is present
563 grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")"
564
565 # Migrate to server-side-apply by applying the same object
566 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
567apiVersion: v1
568data:
569 key: value1
570 legacy: value2
571kind: ConfigMap
572metadata:
573 name: ssa-test
574__EOF__
575)
576 kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
577 kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value1'
578
579 # show that after migrating to SSA with a custom field manager, the
580 # last-applied-annotation is dropped
581 ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get configmap ssa-test -o yaml "${kube_flags[@]:?}")" || exit 1
582
583 # Change a field without having any conflict and also drop a field in the same patch
584 output_message=$(kubectl "${kube_flags[@]:?}" apply --server-side=true --field-manager=myfm -f - << __EOF__
585apiVersion: v1
586data:
587 key: value2
588kind: ConfigMap
589metadata:
590 name: ssa-test
591__EOF__
592)
593 kube::test::if_has_string "$output_message" "configmap/ssa-test serverside-applied"
594 kube::test::get_object_assert "configmap ssa-test" "{{ .data.key }}" 'value2'
595 kube::test::get_object_assert "configmap ssa-test" "{{ .data.legacy }}" '<no value>'
596
597 # Clean up
598 kubectl delete configmap ssa-test
599
600 ## kubectl apply dry-run on CR
601 # Create CRD
602 kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
603{
604 "kind": "CustomResourceDefinition",
605 "apiVersion": "apiextensions.k8s.io/v1",
606 "metadata": {
607 "name": "resources.mygroup.example.com"
608 },
609 "spec": {
610 "group": "mygroup.example.com",
611 "scope": "Namespaced",
612 "names": {
613 "plural": "resources",
614 "singular": "resource",
615 "kind": "Kind",
616 "listKind": "KindList"
617 },
618 "versions": [
619 {
620 "name": "v1alpha1",
621 "served": true,
622 "storage": true,
623 "schema": {
624 "openAPIV3Schema": {
625 "x-kubernetes-preserve-unknown-fields": true,
626 "type": "object"
627 }
628 }
629 }
630 ]
631 }
632}
633__EOF__
634
635 # Ensure the API server has recognized and started serving the associated CR API
636 local tries=5
637 for i in $(seq 1 $tries); do
638 local output
639 output=$(kubectl "${kube_flags[@]:?}" api-resources --api-group mygroup.example.com -oname || true)
640 if kube::test::if_has_string "$output" resources.mygroup.example.com; then
641 break
642 fi
643 echo "${i}: Waiting for CR API to be available"
644 sleep "$i"
645 done
646
647 # Dry-run create the CR
648 kubectl "${kube_flags[@]:?}" apply --server-side --dry-run=server -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
649 # Make sure that the CR doesn't exist
650 ! kubectl "${kube_flags[@]:?}" get resource/myobj 2>/dev/null || exit 1
651
652 # clean-up
653 kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
654
655 set +o nounset
656 set +o errexit
657}
View as plain text