...
1#!/usr/bin/env bash
2
3# Copyright 2018 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17set -o errexit
18set -o nounset
19set -o pipefail
20
21run_daemonset_tests() {
22 set -o nounset
23 set -o errexit
24
25 create_and_use_new_namespace
26 kube::log::status "Testing kubectl(v1:daemonsets)"
27
28 ### Create a rolling update DaemonSet
29 # Pre-condition: no DaemonSet exists
30 kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
31 # Command
32 kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
33 # Template Generation should be 1
34 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
35 kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
36 # Template Generation should stay 1
37 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '1'
38 # Test set commands
39 kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=registry.k8s.io/pause:test-cmd"
40 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '2'
41 kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar
42 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '3'
43 kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
44 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '4'
45 # pod has field for kubectl set field manager
46 output_message=$(kubectl get daemonsets bind --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
47 kube::test::if_has_string "${output_message}" 'kubectl-set'
48 # Describe command should respect the chunk size parameter
49 kube::test::describe_resource_chunk_size_assert daemonsets pods,events
50
51 # Rollout restart should change generation
52 kubectl rollout restart daemonset/bind "${kube_flags[@]:?}"
53 kube::test::get_object_assert 'daemonsets bind' "{{${generation_field:?}}}" '5'
54
55 # Clean up
56 kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
57
58 set +o nounset
59 set +o errexit
60}
61
62run_daemonset_history_tests() {
63 set -o nounset
64 set -o errexit
65
66 create_and_use_new_namespace
67 kube::log::status "Testing kubectl(v1:daemonsets, v1:controllerrevisions)"
68
69 ### Test rolling back a DaemonSet
70 # Pre-condition: no DaemonSet or its pods exists
71 kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
72 # Command
73 # Create a DaemonSet (revision 1)
74 kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]:?}"
75 kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*"
76 # Rollback to revision 1 - should be no-op
77 kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
78 kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
79 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
80 # Update the DaemonSet (revision 2)
81 kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]:?}"
82 kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
83 kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
84 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
85 kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*"
86 # Get rollout history
87 output_message=$(kubectl rollout history daemonset)
88 kube::test::if_has_string "${output_message}" "daemonset.apps/bind"
89 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
90 kube::test::if_has_string "${output_message}" "1 kubectl apply"
91 kube::test::if_has_string "${output_message}" "2 kubectl apply"
92 # Get rollout history for a single revision
93 output_message=$(kubectl rollout history daemonset --revision=1)
94 kube::test::if_has_string "${output_message}" "daemonset.apps/bind with revision #1"
95 kube::test::if_has_string "${output_message}" "Pod Template:"
96 kube::test::if_has_string "${output_message}" "${IMAGE_PAUSE_V2}"
97 # Get rollout history for a different single revision
98 output_message=$(kubectl rollout history daemonset --revision=2)
99 kube::test::if_has_string "${output_message}" "daemonset.apps/bind with revision #2"
100 kube::test::if_has_string "${output_message}" "Pod Template:"
101 kube::test::if_has_string "${output_message}" "${IMAGE_DAEMONSET_R2}"
102 kube::test::if_has_string "${output_message}" "${IMAGE_DAEMONSET_R2_2}"
103 # Rollback to revision 1 with dry-run - should be no-op
104 kubectl rollout undo daemonset --dry-run=client "${kube_flags[@]:?}"
105 kubectl rollout undo daemonset --dry-run=server "${kube_flags[@]:?}"
106 kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
107 kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
108 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
109 # Rollback to revision 1
110 kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}"
111 kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
112 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
113 # Get rollout history
114 output_message=$(kubectl rollout history daemonset)
115 kube::test::if_has_string "${output_message}" "daemonset.apps/bind"
116 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
117 kube::test::if_has_string "${output_message}" "2 kubectl apply"
118 kube::test::if_has_string "${output_message}" "3 kubectl apply"
119 # Rollback to revision 1000000 - should fail
120 output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
121 kube::test::if_has_string "${output_message}" "unable to find specified revision"
122 kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
123 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
124 # Rollback to last revision
125 kubectl rollout undo daemonset "${kube_flags[@]:?}"
126 kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:"
127 kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:"
128 kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
129 # Get rollout history
130 output_message=$(kubectl rollout history daemonset)
131 kube::test::if_has_string "${output_message}" "daemonset.apps/bind"
132 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
133 kube::test::if_has_string "${output_message}" "3 kubectl apply"
134 kube::test::if_has_string "${output_message}" "4 kubectl apply"
135 # Clean up
136 kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}"
137
138 set +o nounset
139 set +o errexit
140}
141
142run_kubectl_apply_deployments_tests() {
143 set -o nounset
144 set -o errexit
145
146 create_and_use_new_namespace
147 kube::log::status "Testing kubectl apply deployments"
148 ## kubectl apply should propagate user defined null values
149 # Pre-Condition: no Deployments, ReplicaSets, Pods exist
150 kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
151 kube::test::get_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
152 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
153 # apply base deployment
154 kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]:?}"
155 # check right deployment exists
156 kube::test::get_object_assert 'deployments my-depl' "{{${id_field:?}}}" 'my-depl'
157 # check right labels exists
158 kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
159 kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
160 kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1'
161
162 # apply new deployment with new template labels
163 kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}"
164 # check right labels exists
165 kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1'
166 kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1'
167 kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" '<no value>'
168
169 # cleanup
170 # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things
171 kubectl delete deployments,rs,pods --all --cascade=orphan --grace-period=0
172 # Post-Condition: no Deployments, ReplicaSets, Pods exist
173 kube::test::wait_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
174 kube::test::wait_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" ''
175 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
176
177 # kubectl apply deployment --overwrite=true --force=true
178 # Pre-Condition: no deployment exists
179 kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" ''
180 # apply deployment nginx
181 kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]:?}"
182 # check right deployment exists
183 kube::test::get_object_assert 'deployment nginx' "{{${id_field:?}}}" 'nginx'
184 # apply deployment with new labels and a conflicting resourceVersion
185 output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]:?}")
186 kube::test::if_has_string "${output_message}" 'Error from server (Conflict)'
187 # apply deployment with --force and --overwrite will succeed
188 kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10
189 # check the changed deployment
190 output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]:?}" |grep nginx2)
191 kube::test::if_has_string "${output_message}" '"name": "nginx2"'
192 # applying a resource (with --force) that is both conflicting and invalid will
193 # cause the server to only return a "Conflict" error when we attempt to patch.
194 # This means that we will delete the existing resource after receiving 5 conflict
195 # errors in a row from the server, and will attempt to create the modified
196 # resource that we are passing to "apply". Since the modified resource is also
197 # invalid, we will receive an invalid error when we attempt to create it, after
198 # having deleted the old resource. Ensure that when this case is reached, the
199 # old resource is restored once again, and the validation error is printed.
200 output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]:?}")
201 kube::test::if_has_string "${output_message}" 'Invalid value'
202 # Ensure that the old object has been restored
203 kube::test::get_object_assert 'deployment nginx' "{{${template_labels:?}}}" 'nginx2'
204 # cleanup
205 kubectl delete deployments --all --grace-period=10
206
207 set +o nounset
208 set +o errexit
209}
210
211run_deployment_tests() {
212 set -o nounset
213 set -o errexit
214
215 create_and_use_new_namespace
216 kube::log::status "Testing deployments"
217 # Test kubectl create deployment (using default - old generator)
218 kubectl create deployment test-nginx-extensions --image=registry.k8s.io/nginx:test-cmd
219 # Post-Condition: Deployment "nginx" is created.
220 kube::test::get_object_assert 'deploy test-nginx-extensions' "{{${container_name_field:?}}}" 'nginx'
221 # and old generator was used, iow. old defaults are applied
222 output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}')
223 kube::test::if_has_not_string "${output_message}" '2'
224 # Ensure we can interact with deployments through apps endpoints
225 output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
226 kube::test::if_has_string "${output_message}" 'apps/v1'
227 # Clean up
228 kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}"
229
230 # Test kubectl create deployment
231 kubectl create deployment test-nginx-apps --image=registry.k8s.io/nginx:test-cmd
232 # Post-Condition: Deployment "nginx" is created.
233 kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx'
234 # and new generator was used, iow. new defaults are applied
235 output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}')
236 kube::test::if_has_string "${output_message}" '10'
237 # Ensure we can interact with deployments through apps endpoints
238 output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}")
239 kube::test::if_has_string "${output_message}" 'apps/v1'
240 # Describe command (resource only) should print detailed information
241 kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:"
242 # Describe command (resource only) should print detailed information
243 kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
244 # Describe command should respect the chunk size parameter
245 kube::test::describe_resource_chunk_size_assert deployments replicasets,events
246 # Clean up
247 kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}"
248
249 ### Test kubectl create deployment with image and command
250 # Pre-Condition: No deployment exists.
251 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
252 # Dry-run command
253 kubectl create deployment nginx-with-command --dry-run=client --image=registry.k8s.io/nginx:test-cmd -- /bin/sleep infinity
254 kubectl create deployment nginx-with-command --dry-run=server --image=registry.k8s.io/nginx:test-cmd -- /bin/sleep infinity
255 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
256 # Command
257 kubectl create deployment nginx-with-command --image=registry.k8s.io/nginx:test-cmd -- /bin/sleep infinity
258 # Post-Condition: Deployment "nginx" is created.
259 kube::test::get_object_assert 'deploy nginx-with-command' "{{${container_name_field:?}}}" 'nginx'
260 # Clean up
261 kubectl delete deployment nginx-with-command "${kube_flags[@]:?}"
262
263 ### Test kubectl create deployment should not fail validation
264 # Pre-Condition: No deployment exists.
265 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
266 # Command
267 kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]:?}"
268 # Post-Condition: Deployment "deployment-with-unixuserid" is created.
269 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'deployment-with-unixuserid:'
270 # Clean up
271 kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]:?}"
272
273 ### Test cascading deletion
274 ## Test that rs is deleted when deployment is deleted.
275 # Pre-condition: no deployment exists
276 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
277 # Create deployment
278 kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
279 # Wait for rs to come up.
280 kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '3'
281 # Deleting the deployment should delete the rs.
282 # using empty value in cascade flag to make sure the backward compatibility.
283 kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade
284 kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
285
286 ## Test that rs is not deleted when deployment is deleted with cascading strategy set to orphan.
287 # Pre-condition: no deployment and rs exist
288 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
289 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
290 # Create deployment
291 kubectl create deployment nginx-deployment --image=registry.k8s.io/nginx:test-cmd
292 # Wait for rs to come up.
293 kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
294 # Delete the deployment with cascading strategy set to orphan.
295 kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade=orphan
296 # Wait for the deployment to be deleted and then verify that rs is not
297 # deleted.
298 kube::test::wait_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
299 kube::test::get_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1'
300 # Cleanup
301 # Find the name of the rs to be deleted.
302 output_message=$(kubectl get rs "${kube_flags[@]:?}" -o template --template="{{range.items}}{{${id_field:?}}}{{end}}")
303 kubectl delete rs "${output_message}" "${kube_flags[@]:?}"
304
305 ### Auto scale deployment
306 # Pre-condition: no deployment exists
307 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
308 # Pre-condition: no hpa exists
309 kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
310 # Command
311 kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
312 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
313 # Dry-run autoscale
314 kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3
315 kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3
316 kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
317 # autoscale 2~3 pods, no CPU utilization specified
318 kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
319 kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
320 # Describe command should respect the chunk size parameter
321 kube::test::describe_resource_chunk_size_assert horizontalpodautoscalers events
322 # Clean up
323 # Note that we should delete hpa first, otherwise it may fight with the deployment reaper.
324 kubectl delete hpa nginx-deployment "${kube_flags[@]:?}"
325 kubectl delete deployment.apps nginx-deployment "${kube_flags[@]:?}"
326
327 ### Rollback a deployment
328 # Pre-condition: no deployment exists
329 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
330 # Command
331 # Create a deployment (revision 1)
332 kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]:?}"
333 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx:'
334 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
335 # Rollback to revision 1 - should be no-op
336 kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
337 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
338 # Update the deployment (revision 2)
339 kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}"
340 kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
341 # Rollback to revision 1 with dry-run - should be no-op
342 kubectl rollout undo deployment nginx --dry-run=client "${kube_flags[@]:?}" | grep "test-cmd"
343 kubectl rollout undo deployment nginx --dry-run=server "${kube_flags[@]:?}"
344 kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
345 # Rollback to revision 1
346 kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}"
347 sleep 1
348 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
349 # Rollback to revision 1000000 - should be no-op
350 ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" || exit 1
351 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
352 # Rollback to last revision
353 kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
354 sleep 1
355 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
356 # Pause the deployment
357 kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}"
358 # A paused deployment cannot be rolled back
359 ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" || exit 1
360 # A paused deployment cannot be restarted
361 ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" || exit 1
362 # Resume the deployment
363 kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}"
364 # The resumed deployment can now be rolled back
365 kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
366 # Check that the new replica set has all old revisions stored in an annotation
367 newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
368 kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
369 # Check that trying to watch the status of a superseded revision returns an error
370 ! kubectl rollout status deployment/nginx --revision=3 || exit 1
371 # Restarting the deployment creates a new replicaset
372 kubectl rollout restart deployment/nginx
373 sleep 1
374 newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
375 rs="$(kubectl get rs "${newrs}" -o yaml)"
376 kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\""
377 # Deployment has field for kubectl rollout field manager
378 output_message=$(kubectl get deployment nginx --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
379 kube::test::if_has_string "${output_message}" 'kubectl-rollout'
380 # Create second deployment
381 ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}"
382 # Deletion of both deployments should not be blocked
383 kubectl delete deployment nginx2 "${kube_flags[@]:?}"
384 # Clean up
385 kubectl delete deployment nginx "${kube_flags[@]:?}"
386
387 ### Set image of a deployment
388 # Pre-condition: no deployment exists
389 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
390 # Create a deployment
391 kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
392 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
393 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
394 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
395 # Dry-run set the deployment's image
396 kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=client "${kube_flags[@]:?}"
397 kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" --dry-run=server "${kube_flags[@]:?}"
398 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
399 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
400 # Set the deployment's image
401 kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
402 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
403 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
404 # Set non-existing container should fail
405 ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" || exit 1
406 # Set image of deployments without specifying name
407 kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
408 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
409 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
410 # Set image of a deployment specified by file
411 kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
412 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
413 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
414 # Set image of a local file without talking to the server
415 kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" --local -o yaml
416 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
417 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
418 # Set image of all containers of the deployment
419 kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
420 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
421 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
422 # Set image of all containers of the deployment again when image not change
423 kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
424 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
425 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
426 # Clean up
427 kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
428
429 ### Set env of a deployment
430 # Pre-condition: no deployment exists
431 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
432 # Create a deployment
433 kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
434 kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]:?}"
435 kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]:?}"
436 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
437 #configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically
438 kube::test::get_object_assert 'configmaps/test-set-env-config' "{{${id_field:?}}}" 'test-set-env-config'
439 kube::test::get_object_assert secret "{{range.items}}{{${id_field:?}}}:{{end}}" 'test-set-env-secret:'
440 # Set env of deployments by configmap from keys
441 kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]:?}"
442 # Assert correct value in deployment env
443 kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2'
444 # Assert single value in deployment env
445 kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
446 # Dry-run set env
447 kubectl set env deployment nginx-deployment --dry-run=client --from=configmap/test-set-env-config "${kube_flags[@]:?}"
448 kubectl set env deployment nginx-deployment --dry-run=server --from=configmap/test-set-env-config "${kube_flags[@]:?}"
449 kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1'
450 # Set env of deployments by configmap
451 kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}"
452 # Assert all values in deployment env
453 kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2'
454 # Set env of deployments for all container
455 kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]:?}"
456 # Set env of deployments for specific container
457 kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]:?}"
458 # Set env of deployments by secret from keys
459 kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]:?}"
460 # Set env of deployments by secret
461 kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]:?}"
462 # Remove specific env of deployment
463 kubectl set env deployment nginx-deployment env-
464 # Assert that we cannot use standard input for both resource and environment variable
465 output_message="$(echo SOME_ENV_VAR_KEY=SOME_ENV_VAR_VAL | kubectl set env -f - - "${kube_flags[@]:?}" 2>&1 || true)"
466 kube::test::if_has_string "${output_message}" 'standard input cannot be used for multiple arguments'
467 # Clean up
468 kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
469 kubectl delete configmap test-set-env-config "${kube_flags[@]:?}"
470 kubectl delete secret test-set-env-secret "${kube_flags[@]:?}"
471
472 ### Get rollout history
473 # Pre-condition: no deployment exists
474 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
475 # Create a deployment
476 kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}"
477 kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
478 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
479 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
480 # Set the deployment's image
481 kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}"
482 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
483 kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
484 # Get rollout history
485 output_message=$(kubectl rollout history deployment nginx-deployment)
486 kube::test::if_has_string "${output_message}" "deployment.apps/nginx-deployment"
487 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
488 kube::test::if_has_string "${output_message}" "1 <none>"
489 kube::test::if_has_string "${output_message}" "2 <none>"
490 kube::test::if_has_not_string "${output_message}" "3 <none>"
491 # Get rollout history for a single revision
492 output_message=$(kubectl rollout history deployment nginx-deployment --revision=1)
493 kube::test::if_has_string "${output_message}" "deployment.apps/nginx-deployment with revision #1"
494 kube::test::if_has_string "${output_message}" "Pod Template:"
495 kube::test::if_has_string "${output_message}" "${IMAGE_DEPLOYMENT_R1}"
496 kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
497 # Get rollout history for a different single revision
498 output_message=$(kubectl rollout history deployment nginx-deployment --revision=2)
499 kube::test::if_has_string "${output_message}" "deployment.apps/nginx-deployment with revision #2"
500 kube::test::if_has_string "${output_message}" "Pod Template:"
501 kube::test::if_has_string "${output_message}" "${IMAGE_DEPLOYMENT_R2}"
502 kube::test::if_has_string "${output_message}" "${IMAGE_PERL}"
503 # Clean up
504 kubectl delete deployment nginx-deployment "${kube_flags[@]:?}"
505
506 set +o nounset
507 set +o errexit
508}
509
510run_statefulset_history_tests() {
511 set -o nounset
512 set -o errexit
513
514 create_and_use_new_namespace
515 kube::log::status "Testing kubectl(v1:statefulsets, v1:controllerrevisions)"
516
517 ### Test rolling back a StatefulSet
518 # Pre-condition: no statefulset or its pods exists
519 kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
520 # Command
521 # Create a StatefulSet (revision 1)
522 kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]:?}"
523 kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*"
524 # Rollback to revision 1 - should be no-op
525 kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
526 kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
527 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
528 # Update the statefulset (revision 2)
529 kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]:?}"
530 kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
531 kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
532 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
533 kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*"
534 # Get rollout history
535 output_message=$(kubectl rollout history statefulset)
536 kube::test::if_has_string "${output_message}" "statefulset.apps/nginx"
537 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
538 kube::test::if_has_string "${output_message}" "1 kubectl apply"
539 kube::test::if_has_string "${output_message}" "2 kubectl apply"
540 # Get rollout history for a single revision
541 output_message=$(kubectl rollout history statefulset --revision=1)
542 kube::test::if_has_string "${output_message}" "statefulset.apps/nginx with revision #1"
543 kube::test::if_has_string "${output_message}" "Pod Template:"
544 kube::test::if_has_string "${output_message}" "${IMAGE_STATEFULSET_R1}"
545 # Get rollout history for a different single revision
546 output_message=$(kubectl rollout history statefulset --revision=2)
547 kube::test::if_has_string "${output_message}" "statefulset.apps/nginx with revision #2"
548 kube::test::if_has_string "${output_message}" "Pod Template:"
549 kube::test::if_has_string "${output_message}" "${IMAGE_STATEFULSET_R2}"
550 kube::test::if_has_string "${output_message}" "${IMAGE_PAUSE_V2}"
551 # Rollback to revision 1 with dry-run - should be no-op
552 kubectl rollout undo statefulset --dry-run=client "${kube_flags[@]:?}"
553 kubectl rollout undo statefulset --dry-run=server "${kube_flags[@]:?}"
554 kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
555 kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
556 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
557 # Rollback to revision 1
558 kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}"
559 kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
560 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
561 # Get rollout history
562 output_message=$(kubectl rollout history statefulset)
563 kube::test::if_has_string "${output_message}" "statefulset.apps/nginx"
564 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
565 kube::test::if_has_string "${output_message}" "2 kubectl apply"
566 kube::test::if_has_string "${output_message}" "3 kubectl apply"
567 # Rollback to revision 1000000 - should fail
568 output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1)
569 kube::test::if_has_string "${output_message}" "unable to find specified revision"
570 kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:"
571 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1"
572 # Rollback to last revision
573 kubectl rollout undo statefulset "${kube_flags[@]:?}"
574 kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:"
575 kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:"
576 kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2"
577 # Get rollout history
578 output_message=$(kubectl rollout history statefulset)
579 kube::test::if_has_string "${output_message}" "statefulset.apps/nginx"
580 kube::test::if_has_string "${output_message}" "REVISION CHANGE-CAUSE"
581 kube::test::if_has_string "${output_message}" "3 kubectl apply"
582 kube::test::if_has_string "${output_message}" "4 kubectl apply"
583 # Clean up - delete newest configuration
584 kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]:?}"
585 # Post-condition: no pods from statefulset controller
586 wait-for-pods-with-label "app=nginx-statefulset" ""
587
588 set +o nounset
589 set +o errexit
590}
591
592run_stateful_set_tests() {
593 set -o nounset
594 set -o errexit
595
596 create_and_use_new_namespace
597 kube::log::status "Testing kubectl(v1:statefulsets)"
598
599 ### Create and stop statefulset, make sure it doesn't leak pods
600 # Pre-condition: no statefulset exists
601 kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" ''
602 # Command: create statefulset
603 kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
604
605 # Describe command should respect the chunk size parameter
606 kube::test::describe_resource_chunk_size_assert statefulsets pods,events
607
608 ### Scale statefulset test with current-replicas and replicas
609 # Pre-condition: 0 replicas
610 kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0'
611 kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '1'
612 # Command: Scale up
613 kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]:?}"
614 # Post-condition: 1 replica, named nginx-0
615 kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '1'
616 kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '2'
617 # Typically we'd wait and confirm that N>1 replicas are up, but this framework
618 # doesn't start the scheduler, so pet-0 will block all others.
619 # TODO: test robust scaling in an e2e.
620 wait-for-pods-with-label "app=nginx-statefulset" "nginx-0"
621
622 # Rollout restart should change generation
623 kubectl rollout restart statefulset nginx "${kube_flags[@]}"
624 kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '3'
625
626 ### Clean up
627 kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}"
628 # Post-condition: no pods from statefulset controller
629 wait-for-pods-with-label "app=nginx-statefulset" ""
630
631 set +o nounset
632 set +o errexit
633
634}
635
636run_rs_tests() {
637 set -o nounset
638 set -o errexit
639
640 create_and_use_new_namespace
641 kube::log::status "Testing kubectl(v1:replicasets)"
642
643 ### Create and stop a replica set, make sure it doesn't leak pods
644 # Pre-condition: no replica set exists
645 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
646 # Command
647 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
648 kube::log::status "Deleting rs"
649 kubectl delete rs frontend "${kube_flags[@]:?}"
650 # Post-condition: no pods from frontend replica set
651 kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${id_field:?}}}:{{end}}" ''
652
653 ### Create and then delete a replica set with cascading strategy set to orphan, make sure it doesn't delete pods.
654 # Pre-condition: no replica set exists
655 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
656 # Command
657 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
658 # wait for all 3 pods to be set up
659 kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
660 kube::log::status "Deleting rs"
661 kubectl delete rs frontend "${kube_flags[@]:?}" --cascade=orphan
662 # Wait for the rs to be deleted.
663 kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
664 # Post-condition: All 3 pods still remain from frontend replica set
665 kube::test::get_object_assert "pods -l tier=frontend" "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
666 # Cleanup
667 kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}"
668 kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
669
670 ### Create replica set frontend from YAML
671 # Pre-condition: no replica set exists
672 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
673 # Command
674 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
675 # Post-condition: frontend replica set is created
676 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
677 # Describe command should print detailed information
678 kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
679 # Describe command should print events information by default
680 kube::test::describe_object_events_assert rs 'frontend'
681 # Describe command should not print events information when show-events=false
682 kube::test::describe_object_events_assert rs 'frontend' false
683 # Describe command should print events information when show-events=true
684 kube::test::describe_object_events_assert rs 'frontend' true
685 # Describe command (resource only) should print detailed information
686 kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:"
687 # Describe command should print events information by default
688 kube::test::describe_resource_events_assert rs
689 # Describe command should not print events information when show-events=false
690 kube::test::describe_resource_events_assert rs false
691 # Describe command should print events information when show-events=true
692 kube::test::describe_resource_events_assert rs true
693 # Describe command (resource only) should print detailed information
694 kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By"
695 # Describe command should respect the chunk size parameter
696 kube::test::describe_resource_chunk_size_assert replicasets pods,events
697
698 ### Scale replica set frontend with current-replicas and replicas
699 # Pre-condition: 3 replicas
700 kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
701 # Dry-run Command
702 kubectl scale --dry-run=client --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}"
703 kubectl scale --dry-run=server --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}"
704 kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
705 # Command
706 kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}"
707 # Post-condition: 2 replicas
708 kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '2'
709
710 # Set up three deploy, two deploy have same label
711 kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]:?}"
712 kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]:?}"
713 kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]:?}"
714 kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
715 kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
716 kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
717 # Test kubectl scale --all with dry run
718 kubectl scale deploy --replicas=3 --all --dry-run=client
719 kubectl scale deploy --replicas=3 --all --dry-run=server
720 kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1'
721 kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1'
722 kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
723 # Test kubectl scale --selector
724 kubectl scale deploy --replicas=2 -l run=hello
725 kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '2'
726 kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '2'
727 kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1'
728 # Test kubectl scale --all
729 kubectl scale deploy --replicas=3 --all
730 kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '3'
731 kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3'
732 kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3'
733 # Clean-up
734 kubectl delete rs frontend "${kube_flags[@]:?}"
735 kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]:?}"
736
737 ### Expose replica set as service
738 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
739 # Pre-condition: 3 replicas
740 kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3'
741 # Command
742 kubectl expose rs frontend --port=80 "${kube_flags[@]:?}"
743 # Post-condition: service exists and the port is unnamed
744 kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{${port_field:?}}}" '<no value> 80'
745 # Cleanup services
746 kubectl delete service frontend "${kube_flags[@]:?}"
747
748 # Test set commands
749 # Pre-condition: frontend replica set exists at generation 1
750 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '1'
751 kubectl set image rs/frontend "${kube_flags[@]:?}" "*=registry.k8s.io/pause:test-cmd"
752 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2'
753 kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar
754 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
755 kubectl set resources rs/frontend --dry-run=client "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
756 kubectl set resources rs/frontend --dry-run=server "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
757 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3'
758 kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi
759 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
760 kubectl set serviceaccount rs/frontend --dry-run=client "${kube_flags[@]:?}" serviceaccount1
761 kubectl set serviceaccount rs/frontend --dry-run=server "${kube_flags[@]:?}" serviceaccount1
762 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4'
763 kubectl set serviceaccount rs/frontend "${kube_flags[@]:?}" serviceaccount1
764 kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '5'
765
766 # RS has field for kubectl set field manager
767 output_message=$(kubectl get rs frontend --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
768 kube::test::if_has_string "${output_message}" 'kubectl-set'
769
770 ### Delete replica set with id
771 # Pre-condition: frontend replica set exists
772 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
773 # Command
774 kubectl delete rs frontend "${kube_flags[@]:?}"
775 # Post-condition: no replica set exists
776 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
777
778 ### Create two replica sets
779 # Pre-condition: no replica set exists
780 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
781 # Command
782 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
783 kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]:?}"
784 # Post-condition: frontend and redis-slave
785 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
786
787 ### Delete multiple replica sets at once
788 # Pre-condition: frontend and redis-slave
789 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:'
790 # Command
791 kubectl delete rs frontend redis-slave "${kube_flags[@]:?}" # delete multiple replica sets at once
792 # Post-condition: no replica set exists
793 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
794
795 if kube::test::if_supports_resource "horizontalpodautoscalers" ; then
796 ### Auto scale replica set
797 # Pre-condition: no replica set exists
798 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
799 # Command
800 kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}"
801 kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:'
802 # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file
803 kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" --max=2 --cpu-percent=70
804 kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70'
805 kubectl delete hpa frontend "${kube_flags[@]:?}"
806 # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name
807 kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3
808 kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
809 # HorizontalPodAutoscaler has field for kubectl autoscale field manager
810 output_message=$(kubectl get hpa frontend -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
811 kube::test::if_has_string "${output_message}" 'kubectl-autoscale'
812 # Clean up
813 kubectl delete hpa frontend "${kube_flags[@]:?}"
814 # autoscale without specifying --max should fail
815 ! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1
816 # Clean up
817 kubectl delete rs frontend "${kube_flags[@]:?}"
818 fi
819
820 set +o nounset
821 set +o errexit
822}
View as plain text