...
1#!/usr/bin/env bash
2
3# Copyright 2018 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17set -o errexit
18set -o nounset
19set -o pipefail
20
21create_test_pods(){
22 # create test pods we can work with
23 kubectl create -f - "${kube_flags[@]:?}" << __EOF__
24{
25 "kind": "Pod",
26 "apiVersion": "v1",
27 "metadata": {
28 "name": "test-pod-1",
29 "labels": {
30 "e": "f",
31 "type": "test-pod"
32 }
33 },
34 "spec": {
35 "nodeName": "127.0.0.1",
36 "containers": [
37 {
38 "name": "container-1",
39 "resources": {},
40 "image": "test-image"
41 }
42 ]
43 }
44}
45__EOF__
46
47 kubectl create -f - "${kube_flags[@]}" << __EOF__
48{
49 "kind": "Pod",
50 "apiVersion": "v1",
51 "metadata": {
52 "name": "test-pod-2",
53 "labels": {
54 "c": "d",
55 "type": "test-pod"
56 }
57 },
58 "spec": {
59 "nodeName": "127.0.0.1",
60 "containers": [
61 {
62 "name": "container-1",
63 "resources": {},
64 "image": "test-image"
65 }
66 ]
67 }
68}
69__EOF__
70}
71
72delete_test_pods() {
73 # need to use --force because node is unready
74 kubectl delete pod/test-pod-1 --force --ignore-not-found
75 kubectl delete pod/test-pod-2 --force --ignore-not-found
76}
77
78run_cluster_management_tests() {
79 set -o nounset
80 set -o errexit
81
82 create_and_use_new_namespace
83 kube::log::status "Testing cluster-management commands"
84
85 kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
86
87 create_test_pods
88
89 # taint/untaint
90 # Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
91 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
92 # Dry-run
93 kubectl taint node 127.0.0.1 --dry-run=client dedicated=foo:PreferNoSchedule
94 kubectl taint node 127.0.0.1 --dry-run=server dedicated=foo:PreferNoSchedule
95 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
96 # taint can add a taint (<key>=<value>:<effect>)
97 kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
98 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=foo:PreferNoSchedule"
99 # taint can remove a taint
100 kubectl taint node 127.0.0.1 dedicated-
101 # taint can add a taint (<key>:<effect>)
102 kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule
103 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=<no value>:PreferNoSchedule"
104 # Node has field manager for kubectl taint
105 output_message=$(kubectl get node 127.0.0.1 --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
106 kube::test::if_has_string "${output_message}" 'kubectl-taint'
107 # Dry-run remove a taint
108 kubectl taint node 127.0.0.1 --dry-run=client dedicated-
109 kubectl taint node 127.0.0.1 --dry-run=server dedicated-
110 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=<no value>:PreferNoSchedule"
111 # taint can remove a taint
112 kubectl taint node 127.0.0.1 dedicated-
113 # Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
114 kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
115
116 ### kubectl cordon update with --dry-run does not mark node unschedulable
117 # Pre-condition: node is schedulable
118 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
119 kubectl cordon "127.0.0.1" --dry-run=client
120 kubectl cordon "127.0.0.1" --dry-run=server
121 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
122
123 ### kubectl drain update with --dry-run does not mark node unschedulable
124 # Pre-condition: node is schedulable
125 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
126 kubectl drain "127.0.0.1" --dry-run=client --force
127 kubectl drain "127.0.0.1" --dry-run=server --force
128 # Post-condition: node still exists, node is still schedulable
129 kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
130 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
131
132 ### kubectl drain with --pod-selector only evicts pods that match the given selector
133 # Pre-condition: node is schedulable
134 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
135 # Pre-condition: test-pod-1 and test-pod-2 exist
136 kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
137 # dry-run command
138 kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=client --force
139 kubectl drain "127.0.0.1" --pod-selector 'e in (f)' --dry-run=server --force
140 kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
141 # command - need --force because pod is unmanaged and --skip-wait-for-delete-timeout because node is unready
142 response=$(kubectl drain "127.0.0.1" --force --pod-selector 'e in (f)' --skip-wait-for-delete-timeout=1)
143 kube::test::if_has_string "${response}" "evicting pod .*/test-pod-1"
144 # only "test-pod-1" should have been matched and deleted - test-pod-2 should not have a deletion timestamp
145 kube::test::get_object_assert "pods/test-pod-2" "{{.metadata.deletionTimestamp}}" '<no value>'
146 # Post-condition: recreate test pods -- they have deletionTimestamp set but will not go away because node is unready
147 delete_test_pods
148 create_test_pods
149 # Post-condition: node is schedulable
150 kubectl uncordon "127.0.0.1"
151 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
152
153 ### kubectl uncordon update with --dry-run is a no-op
154 # Pre-condition: node is already schedulable
155 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
156 response=$(kubectl uncordon "127.0.0.1" --dry-run=client)
157 kube::test::if_has_string "${response}" 'already uncordoned'
158 response=$(kubectl uncordon "127.0.0.1" --dry-run=server)
159 kube::test::if_has_string "${response}" 'already uncordoned'
160 # Post-condition: node is still schedulable
161 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
162
163 ### kubectl drain command fails when both --selector and a node argument are given
164 # Pre-condition: node exists and contains label test=label
165 kubectl label node "127.0.0.1" "test=label"
166 kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
167 response=$(! kubectl drain "127.0.0.1" --selector test=label 2>&1)
168 kube::test::if_has_string "${response}" 'cannot specify both a node name'
169
170 ### Test kubectl drain chunk size
171 # Pre-condition: node exists and contains label test=label
172 kube::test::get_object_assert "nodes 127.0.0.1" '{{.metadata.labels.test}}' 'label'
173 # Pre-condition: node is schedulable
174 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
175 # Pre-condition: test-pod-1 and test-pod-2 exist
176 kube::test::get_object_assert "pods" "{{range .items}}{{.metadata.name}},{{end}}" 'test-pod-1,test-pod-2,'
177 # command - need to use force because pods are unmanaged, dry run (or skip-wait) because node is unready
178 output_message=$(kubectl --v=6 drain --force --pod-selector type=test-pod --selector test=label --chunk-size=1 --dry-run=client 2>&1 "${kube_flags[@]}")
179 # Post-condition: Check if we get a limit on node, and both limit and continue on pods
180 kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=1 200 OK"
181 kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
182 kube::test::if_has_string "${output_message}" "/v1/pods?continue=.*&fieldSelector=spec.nodeName%3D127.0.0.1&labelSelector=type%3Dtest-pod&limit=1 200 OK"
183 # Post-condition: Check we evict multiple pages worth of pods
184 kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-1"
185 kube::test::if_has_string "${output_message}" "evicting pod .*/test-pod-2"
186 # Post-condition: node is schedulable
187 kubectl uncordon "127.0.0.1"
188 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" '<no value>'
189
190 ### Test kubectl drain chunk size defaults to 500
191 output_message=$(kubectl --v=6 drain --force --selector test=label --dry-run=client 2>&1 "${kube_flags[@]}")
192 # Post-condition: Check if we get a limit
193 kube::test::if_has_string "${output_message}" "/v1/nodes?labelSelector=test%3Dlabel&limit=500 200 OK"
194 kube::test::if_has_string "${output_message}" "/v1/pods?fieldSelector=spec.nodeName%3D127.0.0.1&limit=500 200 OK"
195
196 ### kubectl cordon command fails when no arguments are passed
197 # Pre-condition: node exists
198 response=$(! kubectl cordon 2>&1)
199 kube::test::if_has_string "${response}" 'error\: USAGE\: cordon NODE'
200
201 ### kubectl cordon selects no nodes with an empty --selector=
202 # Pre-condition: node "127.0.0.1" is uncordoned
203 kubectl uncordon "127.0.0.1"
204 response=$(! kubectl cordon --selector= 2>&1)
205 kube::test::if_has_string "${response}" 'must provide one or more resources'
206 # test=label matches our node
207 response=$(kubectl cordon --selector test=label)
208 kube::test::if_has_string "${response}" 'node/127.0.0.1 cordoned'
209 # invalid=label does not match any nodes
210 response=$(kubectl cordon --selector invalid=label)
211 kube::test::if_has_not_string "${response}" 'cordoned'
212 # Post-condition: node "127.0.0.1" is cordoned
213 kube::test::get_object_assert "nodes 127.0.0.1" "{{.spec.unschedulable}}" 'true'
214
215 # Clean up test pods
216 delete_test_pods
217
218 set +o nounset
219 set +o errexit
220}
View as plain text