1#!/usr/bin/env bash
2
3# Copyright 2014 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17KUBE_VERBOSE=${KUBE_VERBOSE:-1}
18if (( KUBE_VERBOSE > 4 )); then
19 set -x
20fi
21
22KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
23
24# This script builds and runs a local kubernetes cluster. You may need to run
25# this as root to allow kubelet to open docker's socket, and to write the test
26# CA in /var/run/kubernetes.
27# Usage: `hack/local-up-cluster.sh`.
28
29DOCKER_OPTS=${DOCKER_OPTS:-""}
30export DOCKER=(docker "${DOCKER_OPTS[@]}")
31DOCKER_ROOT=${DOCKER_ROOT:-""}
32ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
33RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
34KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
35KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
36POD_MANIFEST_PATH=${POD_MANIFEST_PATH:-"/var/run/kubernetes/static-pods"}
37KUBELET_FLAGS=${KUBELET_FLAGS:-""}
38KUBELET_IMAGE=${KUBELET_IMAGE:-""}
39# many dev environments run with swap on, so we don't fail in this env
40FAIL_SWAP_ON=${FAIL_SWAP_ON:-"false"}
41# Name of the dns addon, eg: "kube-dns" or "coredns"
42DNS_ADDON=${DNS_ADDON:-"coredns"}
43CLUSTER_CIDR=${CLUSTER_CIDR:-10.1.0.0/16}
44SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
45FIRST_SERVICE_CLUSTER_IP=${FIRST_SERVICE_CLUSTER_IP:-10.0.0.1}
46# if enabled, must set CGROUP_ROOT
47CGROUPS_PER_QOS=${CGROUPS_PER_QOS:-true}
48# name of the cgroup driver, i.e. cgroupfs or systemd
49CGROUP_DRIVER=${CGROUP_DRIVER:-""}
50# if cgroups per qos is enabled, optionally change cgroup root
51CGROUP_ROOT=${CGROUP_ROOT:-""}
52# owner of client certs, default to current user if not specified
53USER=${USER:-$(whoami)}
54# if true, limited swap is being used instead of unlimited swap (default)
55LIMITED_SWAP=${LIMITED_SWAP:-""}
56
57# required for cni installation
58CNI_CONFIG_DIR=${CNI_CONFIG_DIR:-/etc/cni/net.d}
59CNI_PLUGINS_VERSION=${CNI_PLUGINS_VERSION:-"v1.4.0"}
60# The arch of the CNI binary, if not set, will be fetched based on the value of `uname -m`
61CNI_TARGETARCH=${CNI_TARGETARCH:-""}
62CNI_PLUGINS_URL="https://github.com/containernetworking/plugins/releases/download"
63CNI_PLUGINS_AMD64_SHA256SUM=${CNI_PLUGINS_AMD64_SHA256SUM:-"754a71ed60a4bd08726c3af705a7d55ee3df03122b12e389fdba4bea35d7dd7e"}
64CNI_PLUGINS_ARM64_SHA256SUM=${CNI_PLUGINS_ARM64_SHA256SUM:-"de7a666fd6ad83a228086bd55756db62ef335a193d1b143d910b69f079e30598"}
65CNI_PLUGINS_PPC64LE_SHA256SUM=${CNI_PLUGINS_PPC64LE_SHA256SUM:-"8ceff026f4eccf33c261b4153af6911e10784ac169d08c1d86cf6887b9f4e99b"}
66CNI_PLUGINS_S390X_SHA256SUM=${CNI_PLUGINS_S390X_SHA256SUM:-"2f1f65ac33e961bcdc633e14c376656455824e22cc45d3ca7e31eb2750a7ebc4"}
67
68# enables testing eviction scenarios locally.
69EVICTION_HARD=${EVICTION_HARD:-"imagefs.available<15%,memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5%"}
70EVICTION_SOFT=${EVICTION_SOFT:-""}
71EVICTION_PRESSURE_TRANSITION_PERIOD=${EVICTION_PRESSURE_TRANSITION_PERIOD:-"1m"}
72
73# This script uses docker0 (or whatever container bridge docker is currently using)
74# and we don't know the IP of the DNS pod to pass in as --cluster-dns.
75# To set this up by hand, set this flag and change DNS_SERVER_IP.
76# Note also that you need API_HOST (defined below) for correct DNS.
77KUBE_PROXY_MODE=${KUBE_PROXY_MODE:-""}
78ENABLE_CLUSTER_DNS=${KUBE_ENABLE_CLUSTER_DNS:-true}
79ENABLE_NODELOCAL_DNS=${KUBE_ENABLE_NODELOCAL_DNS:-false}
80DNS_SERVER_IP=${KUBE_DNS_SERVER_IP:-10.0.0.10}
81LOCAL_DNS_IP=${KUBE_LOCAL_DNS_IP:-169.254.20.10}
82DNS_MEMORY_LIMIT=${KUBE_DNS_MEMORY_LIMIT:-170Mi}
83DNS_DOMAIN=${KUBE_DNS_NAME:-"cluster.local"}
84WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60}
85MAX_TIME_FOR_URL_API_SERVER=${MAX_TIME_FOR_URL_API_SERVER:-1}
86ENABLE_DAEMON=${ENABLE_DAEMON:-false}
87HOSTNAME_OVERRIDE=${HOSTNAME_OVERRIDE:-"127.0.0.1"}
88EXTERNAL_CLOUD_PROVIDER=${EXTERNAL_CLOUD_PROVIDER:-false}
89EXTERNAL_CLOUD_PROVIDER_BINARY=${EXTERNAL_CLOUD_PROVIDER_BINARY:-""}
90EXTERNAL_CLOUD_VOLUME_PLUGIN=${EXTERNAL_CLOUD_VOLUME_PLUGIN:-""}
91CONFIGURE_CLOUD_ROUTES=${CONFIGURE_CLOUD_ROUTES:-true}
92CLOUD_CTLRMGR_FLAGS=${CLOUD_CTLRMGR_FLAGS:-""}
93CLOUD_PROVIDER=${CLOUD_PROVIDER:-""}
94CLOUD_CONFIG=${CLOUD_CONFIG:-""}
95KUBELET_PROVIDER_ID=${KUBELET_PROVIDER_ID:-"$(hostname)"}
96FEATURE_GATES=${FEATURE_GATES:-"AllAlpha=false"}
97CPUMANAGER_POLICY=${CPUMANAGER_POLICY:-""}
98CPUMANAGER_RECONCILE_PERIOD=${CPUMANAGER_RECONCILE_PERIOD:-""}
99CPUMANAGER_POLICY_OPTIONS=${CPUMANAGER_POLICY_OPTIONS:-""}
100STORAGE_BACKEND=${STORAGE_BACKEND:-"etcd3"}
101STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-"application/vnd.kubernetes.protobuf"}
102# preserve etcd data. you also need to set ETCD_DIR.
103PRESERVE_ETCD="${PRESERVE_ETCD:-false}"
104ENABLE_TRACING=${ENABLE_TRACING:-false}
105
106# enable Kubernetes-CSI snapshotter
107ENABLE_CSI_SNAPSHOTTER=${ENABLE_CSI_SNAPSHOTTER:-false}
108
109KUBECONFIG_TOKEN=${KUBECONFIG_TOKEN:-""}
110AUTH_ARGS=${AUTH_ARGS:-""}
111
112# WebHook Authentication and Authorization
113AUTHORIZATION_WEBHOOK_CONFIG_FILE=${AUTHORIZATION_WEBHOOK_CONFIG_FILE:-""}
114AUTHENTICATION_WEBHOOK_CONFIG_FILE=${AUTHENTICATION_WEBHOOK_CONFIG_FILE:-""}
115
116# Install a default storage class (enabled by default)
117DEFAULT_STORAGE_CLASS=${KUBE_DEFAULT_STORAGE_CLASS:-true}
118
119# Do not run the mutation detector by default on a local cluster.
120# It is intended for a specific type of testing and inherently leaks memory.
121KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-false}"
122export KUBE_CACHE_MUTATION_DETECTOR
123
124# panic the server on watch decode errors since they are considered coder mistakes
125KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
126export KUBE_PANIC_WATCH_DECODE_ERROR
127
128# Default list of admission Controllers to invoke prior to persisting objects in cluster
129# The order defined here does not matter.
130ENABLE_ADMISSION_PLUGINS=${ENABLE_ADMISSION_PLUGINS:-"NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,Priority,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota,NodeRestriction"}
131DISABLE_ADMISSION_PLUGINS=${DISABLE_ADMISSION_PLUGINS:-""}
132ADMISSION_CONTROL_CONFIG_FILE=${ADMISSION_CONTROL_CONFIG_FILE:-""}
133
134# START_MODE can be 'all', 'kubeletonly', 'nokubelet', 'nokubeproxy', or 'nokubelet,nokubeproxy'
135if [[ -z "${START_MODE:-}" ]]; then
136 case "$(uname -s)" in
137 Darwin)
138 START_MODE=nokubelet,nokubeproxy
139 ;;
140 Linux)
141 START_MODE=all
142 ;;
143 *)
144 echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
145 exit 1
146 ;;
147 esac
148fi
149
150# A list of controllers to enable
151KUBE_CONTROLLERS="${KUBE_CONTROLLERS:-"*"}"
152
153# Audit policy
154AUDIT_POLICY_FILE=${AUDIT_POLICY_FILE:-""}
155
156# Stop right away if the build fails
157set -e
158
159source "${KUBE_ROOT}/hack/lib/init.sh"
160kube::util::ensure-gnu-sed
161
162function usage {
163 echo "This script starts a local kube cluster. "
164 echo "Example 0: hack/local-up-cluster.sh -h (this 'help' usage description)"
165 echo "Example 1: hack/local-up-cluster.sh -o _output/dockerized/bin/linux/amd64/ (run from docker output)"
166 echo "Example 2: hack/local-up-cluster.sh -O (auto-guess the bin path for your platform)"
167 echo "Example 3: hack/local-up-cluster.sh (build a local copy of the source)"
168 echo "Example 4: FEATURE_GATES=CPUManagerPolicyOptions=true \\"
169 echo " CPUMANAGER_POLICY=\"static\" \\"
170 echo " CPUMANAGER_POLICY_OPTIONS=full-pcpus-only=\"true\" \\"
171 echo " CPUMANAGER_RECONCILE_PERIOD=\"5s\" \\"
172 echo " KUBELET_FLAGS=\"--kube-reserved=cpu=1,memory=2Gi,ephemeral-storage=1Gi --system-reserved=cpu=1,memory=2Gi,ephemeral-storage=1Gi\" \\"
173 echo " hack/local-up-cluster.sh (build a local copy of the source with full-pcpus-only CPU Management policy)"
174}
175
176# This function guesses where the existing cached binary build is for the `-O`
177# flag
178function guess_built_binary_path {
179 local apiserver_path
180 apiserver_path=$(kube::util::find-binary "kube-apiserver")
181 if [[ -z "${apiserver_path}" ]]; then
182 return
183 fi
184 echo -n "$(dirname "${apiserver_path}")"
185}
186
187### Allow user to supply the source directory.
188GO_OUT=${GO_OUT:-}
189while getopts "ho:O" OPTION
190do
191 case ${OPTION} in
192 o)
193 echo "skipping build"
194 GO_OUT="${OPTARG}"
195 echo "using source ${GO_OUT}"
196 ;;
197 O)
198 GO_OUT=$(guess_built_binary_path)
199 if [ "${GO_OUT}" == "" ]; then
200 echo "Could not guess the correct output directory to use."
201 exit 1
202 fi
203 ;;
204 h)
205 usage
206 exit
207 ;;
208 ?)
209 usage
210 exit
211 ;;
212 esac
213done
214
215if [ -z "${GO_OUT}" ]; then
216 binaries_to_build="cmd/kubectl cmd/kube-apiserver cmd/kube-controller-manager cmd/cloud-controller-manager cmd/kube-scheduler"
217 if [[ "${START_MODE}" != *"nokubelet"* ]]; then
218 binaries_to_build="${binaries_to_build} cmd/kubelet"
219 fi
220 if [[ "${START_MODE}" != *"nokubeproxy"* ]]; then
221 binaries_to_build="${binaries_to_build} cmd/kube-proxy"
222 fi
223 make -C "${KUBE_ROOT}" WHAT="${binaries_to_build}"
224else
225 echo "skipped the build because GO_OUT was set (${GO_OUT})"
226fi
227
228# Shut down anyway if there's an error.
229set +e
230
231API_PORT=${API_PORT:-0}
232API_SECURE_PORT=${API_SECURE_PORT:-6443}
233
234# WARNING: For DNS to work on most setups you should export API_HOST as the docker0 ip address,
235API_HOST=${API_HOST:-localhost}
236API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
237ADVERTISE_ADDRESS=${ADVERTISE_ADDRESS:-""}
238NODE_PORT_RANGE=${NODE_PORT_RANGE:-""}
239API_BIND_ADDR=${API_BIND_ADDR:-"0.0.0.0"}
240EXTERNAL_HOSTNAME=${EXTERNAL_HOSTNAME:-localhost}
241
242KUBELET_HOST=${KUBELET_HOST:-"127.0.0.1"}
243KUBELET_RESOLV_CONF=${KUBELET_RESOLV_CONF:-"/etc/resolv.conf"}
244# By default only allow CORS for requests on localhost
245API_CORS_ALLOWED_ORIGINS=${API_CORS_ALLOWED_ORIGINS:-/127.0.0.1(:[0-9]+)?$,/localhost(:[0-9]+)?$}
246KUBELET_PORT=${KUBELET_PORT:-10250}
247# By default we use 0(close it) for it's insecure
248KUBELET_READ_ONLY_PORT=${KUBELET_READ_ONLY_PORT:-0}
249LOG_LEVEL=${LOG_LEVEL:-3}
250# Use to increase verbosity on particular files, e.g. LOG_SPEC=token_controller*=5,other_controller*=4
251LOG_SPEC=${LOG_SPEC:-""}
252LOG_DIR=${LOG_DIR:-"/tmp"}
253TMP_DIR=${TMP_DIR:-$(kube::realpath "$(mktemp -d -t "$(basename "$0").XXXXXX")")}
254CONTAINER_RUNTIME_ENDPOINT=${CONTAINER_RUNTIME_ENDPOINT:-"unix:///run/containerd/containerd.sock"}
255RUNTIME_REQUEST_TIMEOUT=${RUNTIME_REQUEST_TIMEOUT:-"2m"}
256IMAGE_SERVICE_ENDPOINT=${IMAGE_SERVICE_ENDPOINT:-""}
257CPU_CFS_QUOTA=${CPU_CFS_QUOTA:-true}
258ENABLE_HOSTPATH_PROVISIONER=${ENABLE_HOSTPATH_PROVISIONER:-"false"}
259CLAIM_BINDER_SYNC_PERIOD=${CLAIM_BINDER_SYNC_PERIOD:-"15s"} # current k8s default
260ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # current default
261LOCAL_STORAGE_CAPACITY_ISOLATION=${LOCAL_STORAGE_CAPACITY_ISOLATION:-"true"} # current default
262# This is the default dir and filename where the apiserver will generate a self-signed cert
263# which should be able to be used as the CA to verify itself
264CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
265ROOT_CA_FILE=${CERT_DIR}/server-ca.crt
266CLUSTER_SIGNING_CERT_FILE=${CLUSTER_SIGNING_CERT_FILE:-"${CERT_DIR}/client-ca.crt"}
267CLUSTER_SIGNING_KEY_FILE=${CLUSTER_SIGNING_KEY_FILE:-"${CERT_DIR}/client-ca.key"}
268# Reuse certs will skip generate new ca/cert files under CERT_DIR
269# it's useful with PRESERVE_ETCD=true because new ca will make existed service account secrets invalided
270REUSE_CERTS=${REUSE_CERTS:-false}
271
272
273# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
274mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
275CONTROLPLANE_SUDO=$(test -w "${CERT_DIR}" || echo "sudo -E")
276
277function test_apiserver_off {
278 # For the common local scenario, fail fast if server is already running.
279 # this can happen if you run local-up-cluster.sh twice and kill etcd in between.
280 if [[ "${API_PORT}" -gt "0" ]]; then
281 if ! curl --silent -g "${API_HOST}:${API_PORT}" ; then
282 echo "API SERVER insecure port is free, proceeding..."
283 else
284 echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_PORT}"
285 exit 1
286 fi
287 fi
288
289 if ! curl --silent -k -g "${API_HOST}:${API_SECURE_PORT}" ; then
290 echo "API SERVER secure port is free, proceeding..."
291 else
292 echo "ERROR starting API SERVER, exiting. Some process on ${API_HOST} is serving already on ${API_SECURE_PORT}"
293 exit 1
294 fi
295}
296
297function detect_arch {
298 local host_arch
299
300 case "$(uname -m)" in
301 x86_64*)
302 host_arch=amd64
303 ;;
304 i?86_64*)
305 host_arch=amd64
306 ;;
307 amd64*)
308 host_arch=amd64
309 ;;
310 aarch64*)
311 host_arch=arm64
312 ;;
313 arm64*)
314 host_arch=arm64
315 ;;
316 arm*)
317 host_arch=arm
318 ;;
319 i?86*)
320 host_arch=x86
321 ;;
322 s390x*)
323 host_arch=s390x
324 ;;
325 ppc64le*)
326 host_arch=ppc64le
327 ;;
328 *)
329 echo "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le." >&2
330 exit 1
331 ;;
332 esac
333
334 if [[ -z "${host_arch}" ]]; then
335 return
336 fi
337 echo -n "${host_arch}"
338}
339
340function detect_os {
341 local host_os
342
343 case "$(uname -s)" in
344 Darwin)
345 host_os=darwin
346 ;;
347 Linux)
348 host_os=linux
349 ;;
350 *)
351 echo "Unsupported host OS. Must be Linux or Mac OS X." >&2
352 exit 1
353 ;;
354 esac
355
356 if [[ -z "${host_os}" ]]; then
357 return
358 fi
359 echo -n "${host_os}"
360}
361
362function detect_binary {
363 host_arch=$(detect_arch)
364 host_os=$(detect_os)
365
366 GO_OUT="${KUBE_ROOT}/_output/local/bin/${host_os}/${host_arch}"
367}
368
369cleanup()
370{
371 echo "Cleaning up..."
372 # delete running images
373 # if [[ "${ENABLE_CLUSTER_DNS}" == true ]]; then
374 # Still need to figure why this commands throw an error: Error from server: client: etcd cluster is unavailable or misconfigured
375 # ${KUBECTL} --namespace=kube-system delete service kube-dns
376 # And this one hang forever:
377 # ${KUBECTL} --namespace=kube-system delete rc kube-dns-v10
378 # fi
379
380 # Check if the API server is still running
381 [[ -n "${APISERVER_PID-}" ]] && kube::util::read-array APISERVER_PIDS < <(pgrep -P "${APISERVER_PID}" ; ps -o pid= -p "${APISERVER_PID}")
382 [[ -n "${APISERVER_PIDS-}" ]] && sudo kill "${APISERVER_PIDS[@]}" 2>/dev/null
383
384 # Check if the controller-manager is still running
385 [[ -n "${CTLRMGR_PID-}" ]] && kube::util::read-array CTLRMGR_PIDS < <(pgrep -P "${CTLRMGR_PID}" ; ps -o pid= -p "${CTLRMGR_PID}")
386 [[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill "${CTLRMGR_PIDS[@]}" 2>/dev/null
387
388 # Check if the cloud-controller-manager is still running
389 [[ -n "${CLOUD_CTLRMGR_PID-}" ]] && kube::util::read-array CLOUD_CTLRMGR_PIDS < <(pgrep -P "${CLOUD_CTLRMGR_PID}" ; ps -o pid= -p "${CLOUD_CTLRMGR_PID}")
390 [[ -n "${CLOUD_CTLRMGR_PIDS-}" ]] && sudo kill "${CLOUD_CTLRMGR_PIDS[@]}" 2>/dev/null
391
392 # Check if the kubelet is still running
393 [[ -n "${KUBELET_PID-}" ]] && kube::util::read-array KUBELET_PIDS < <(pgrep -P "${KUBELET_PID}" ; ps -o pid= -p "${KUBELET_PID}")
394 [[ -n "${KUBELET_PIDS-}" ]] && sudo kill "${KUBELET_PIDS[@]}" 2>/dev/null
395
396 # Check if the proxy is still running
397 [[ -n "${PROXY_PID-}" ]] && kube::util::read-array PROXY_PIDS < <(pgrep -P "${PROXY_PID}" ; ps -o pid= -p "${PROXY_PID}")
398 [[ -n "${PROXY_PIDS-}" ]] && sudo kill "${PROXY_PIDS[@]}" 2>/dev/null
399
400 # Check if the scheduler is still running
401 [[ -n "${SCHEDULER_PID-}" ]] && kube::util::read-array SCHEDULER_PIDS < <(pgrep -P "${SCHEDULER_PID}" ; ps -o pid= -p "${SCHEDULER_PID}")
402 [[ -n "${SCHEDULER_PIDS-}" ]] && sudo kill "${SCHEDULER_PIDS[@]}" 2>/dev/null
403
404 # Check if the etcd is still running
405 [[ -n "${ETCD_PID-}" ]] && kube::etcd::stop
406 if [[ "${PRESERVE_ETCD}" == "false" ]]; then
407 [[ -n "${ETCD_DIR-}" ]] && kube::etcd::clean_etcd_dir
408 fi
409
410 exit 0
411}
412
413# Check if all processes are still running. Prints a warning once each time
414# a process dies unexpectedly.
415function healthcheck {
416 if [[ -n "${APISERVER_PID-}" ]] && ! sudo kill -0 "${APISERVER_PID}" 2>/dev/null; then
417 warning_log "API server terminated unexpectedly, see ${APISERVER_LOG}"
418 APISERVER_PID=
419 fi
420
421 if [[ -n "${CTLRMGR_PID-}" ]] && ! sudo kill -0 "${CTLRMGR_PID}" 2>/dev/null; then
422 warning_log "kube-controller-manager terminated unexpectedly, see ${CTLRMGR_LOG}"
423 CTLRMGR_PID=
424 fi
425
426 if [[ -n "${KUBELET_PID-}" ]] && ! sudo kill -0 "${KUBELET_PID}" 2>/dev/null; then
427 warning_log "kubelet terminated unexpectedly, see ${KUBELET_LOG}"
428 KUBELET_PID=
429 fi
430
431 if [[ -n "${PROXY_PID-}" ]] && ! sudo kill -0 "${PROXY_PID}" 2>/dev/null; then
432 warning_log "kube-proxy terminated unexpectedly, see ${PROXY_LOG}"
433 PROXY_PID=
434 fi
435
436 if [[ -n "${SCHEDULER_PID-}" ]] && ! sudo kill -0 "${SCHEDULER_PID}" 2>/dev/null; then
437 warning_log "scheduler terminated unexpectedly, see ${SCHEDULER_LOG}"
438 SCHEDULER_PID=
439 fi
440
441 if [[ -n "${ETCD_PID-}" ]] && ! sudo kill -0 "${ETCD_PID}" 2>/dev/null; then
442 warning_log "etcd terminated unexpectedly"
443 ETCD_PID=
444 fi
445}
446
447function print_color {
448 message=$1
449 prefix=${2:+$2: } # add colon only if defined
450 color=${3:-1} # default is red
451 echo -n "$(tput bold)$(tput setaf "${color}")"
452 echo "${prefix}${message}"
453 echo -n "$(tput sgr0)"
454}
455
456function warning_log {
457 print_color "$1" "W$(date "+%m%d %H:%M:%S")]" 1
458}
459
460function start_etcd {
461 echo "Starting etcd"
462 export ETCD_LOGFILE=${LOG_DIR}/etcd.log
463 kube::etcd::start
464}
465
466function set_service_accounts {
467 SERVICE_ACCOUNT_LOOKUP=${SERVICE_ACCOUNT_LOOKUP:-true}
468 SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-${TMP_DIR}/kube-serviceaccount.key}
469 # Generate ServiceAccount key if needed
470 if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
471 mkdir -p "$(dirname "${SERVICE_ACCOUNT_KEY}")"
472 openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
473 fi
474}
475
476function generate_certs {
477 # Create CA signers
478 if [[ "${ENABLE_SINGLE_CA_SIGNER:-}" = true ]]; then
479 kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"client auth","server auth"'
480 sudo cp "${CERT_DIR}/server-ca.key" "${CERT_DIR}/client-ca.key"
481 sudo cp "${CERT_DIR}/server-ca.crt" "${CERT_DIR}/client-ca.crt"
482 sudo cp "${CERT_DIR}/server-ca-config.json" "${CERT_DIR}/client-ca-config.json"
483 else
484 kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" server '"server auth"'
485 kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
486 fi
487
488 # Create auth proxy client ca
489 kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
490
491 # serving cert for kube-apiserver
492 kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-apiserver kubernetes.default kubernetes.default.svc "localhost" "${API_HOST_IP}" "${API_HOST}" "${FIRST_SERVICE_CLUSTER_IP}"
493
494 # Create client certs signed with client-ca, given id, given CN and a number of groups
495 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
496 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
497 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
498 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-apiserver kube-apiserver
499
500 # Create matching certificates for kube-aggregator
501 kube::util::create_serving_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "server-ca" kube-aggregator api.kube-public.svc "localhost" "${API_HOST_IP}"
502 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
503
504 # TODO remove masters and add rolebinding
505 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-aggregator system:kube-aggregator system:masters
506 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-aggregator
507}
508
509function generate_kubeproxy_certs {
510 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
511 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
512}
513
514function generate_kubelet_certs {
515 kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet "system:node:${HOSTNAME_OVERRIDE}" system:nodes
516 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
517}
518
519function start_apiserver {
520 authorizer_args=()
521 if [[ -n "${AUTHORIZATION_CONFIG:-}" ]]; then
522 authorizer_args+=("--authorization-config=${AUTHORIZATION_CONFIG}")
523 else
524 if [[ -n "${AUTHORIZATION_MODE:-Node,RBAC}" ]]; then
525 authorizer_args+=("--authorization-mode=${AUTHORIZATION_MODE:-Node,RBAC}")
526 fi
527 authorizer_args+=(
528 "--authorization-webhook-config-file=${AUTHORIZATION_WEBHOOK_CONFIG_FILE}"
529 "--authentication-token-webhook-config-file=${AUTHENTICATION_WEBHOOK_CONFIG_FILE}"
530 )
531 fi
532
533 priv_arg=""
534 if [[ -n "${ALLOW_PRIVILEGED}" ]]; then
535 priv_arg="--allow-privileged=${ALLOW_PRIVILEGED}"
536 fi
537
538 runtime_config=""
539 if [[ -n "${RUNTIME_CONFIG}" ]]; then
540 runtime_config="--runtime-config=${RUNTIME_CONFIG}"
541 fi
542
543 # Let the API server pick a default address when API_HOST_IP
544 # is set to 127.0.0.1
545 advertise_address=""
546 if [[ "${API_HOST_IP}" != "127.0.0.1" ]]; then
547 advertise_address="--advertise-address=${API_HOST_IP}"
548 fi
549 if [[ "${ADVERTISE_ADDRESS}" != "" ]] ; then
550 advertise_address="--advertise-address=${ADVERTISE_ADDRESS}"
551 fi
552 node_port_range=""
553 if [[ "${NODE_PORT_RANGE}" != "" ]] ; then
554 node_port_range="--service-node-port-range=${NODE_PORT_RANGE}"
555 fi
556
557 if [[ "${REUSE_CERTS}" != true ]]; then
558 # Clean previous dynamic certs
559 # This file is owned by root, so we can't always overwrite it (depends if
560 # we run the script as root or not). Let's remove it, that is something we
561 # can always do: either we have write permissions as a user in CERT_DIR or
562 # we run the rm with sudo.
563 ${CONTROLPLANE_SUDO} rm -f "${CERT_DIR}"/kubelet-rotated.kubeconfig
564
565 # Create Certs
566 generate_certs
567 fi
568
569 cloud_config_arg="--cloud-provider=${CLOUD_PROVIDER} --cloud-config=${CLOUD_CONFIG}"
570 if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
571 cloud_config_arg="--cloud-provider=external"
572 fi
573
574 if [[ -z "${EGRESS_SELECTOR_CONFIG_FILE:-}" ]]; then
575 cat <<EOF > "${TMP_DIR}"/kube_egress_selector_configuration.yaml
576apiVersion: apiserver.k8s.io/v1beta1
577kind: EgressSelectorConfiguration
578egressSelections:
579- name: cluster
580 connection:
581 proxyProtocol: Direct
582- name: controlplane
583 connection:
584 proxyProtocol: Direct
585- name: etcd
586 connection:
587 proxyProtocol: Direct
588EOF
589 EGRESS_SELECTOR_CONFIG_FILE="${TMP_DIR}/kube_egress_selector_configuration.yaml"
590 fi
591
592 if [[ -z "${AUDIT_POLICY_FILE}" ]]; then
593 cat <<EOF > "${TMP_DIR}"/kube-audit-policy-file
594# Log all requests at the Metadata level.
595apiVersion: audit.k8s.io/v1
596kind: Policy
597rules:
598- level: Metadata
599EOF
600 AUDIT_POLICY_FILE="${TMP_DIR}/kube-audit-policy-file"
601 fi
602
603 APISERVER_LOG=${LOG_DIR}/kube-apiserver.log
604 # shellcheck disable=SC2086
605 ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-apiserver" "${authorizer_args[@]}" "${priv_arg}" ${runtime_config} \
606 ${cloud_config_arg} \
607 "${advertise_address}" \
608 "${node_port_range}" \
609 --v="${LOG_LEVEL}" \
610 --vmodule="${LOG_SPEC}" \
611 --audit-policy-file="${AUDIT_POLICY_FILE}" \
612 --audit-log-path="${LOG_DIR}/kube-apiserver-audit.log" \
613 --cert-dir="${CERT_DIR}" \
614 --egress-selector-config-file="${EGRESS_SELECTOR_CONFIG_FILE:-}" \
615 --client-ca-file="${CERT_DIR}/client-ca.crt" \
616 --kubelet-client-certificate="${CERT_DIR}/client-kube-apiserver.crt" \
617 --kubelet-client-key="${CERT_DIR}/client-kube-apiserver.key" \
618 --service-account-key-file="${SERVICE_ACCOUNT_KEY}" \
619 --service-account-lookup="${SERVICE_ACCOUNT_LOOKUP}" \
620 --service-account-issuer="https://kubernetes.default.svc" \
621 --service-account-jwks-uri="https://kubernetes.default.svc/openid/v1/jwks" \
622 --service-account-signing-key-file="${SERVICE_ACCOUNT_KEY}" \
623 --enable-admission-plugins="${ENABLE_ADMISSION_PLUGINS}" \
624 --disable-admission-plugins="${DISABLE_ADMISSION_PLUGINS}" \
625 --admission-control-config-file="${ADMISSION_CONTROL_CONFIG_FILE}" \
626 --bind-address="${API_BIND_ADDR}" \
627 --secure-port="${API_SECURE_PORT}" \
628 --tls-cert-file="${CERT_DIR}/serving-kube-apiserver.crt" \
629 --tls-private-key-file="${CERT_DIR}/serving-kube-apiserver.key" \
630 --storage-backend="${STORAGE_BACKEND}" \
631 --storage-media-type="${STORAGE_MEDIA_TYPE}" \
632 --etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" \
633 --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
634 --feature-gates="${FEATURE_GATES}" \
635 --external-hostname="${EXTERNAL_HOSTNAME}" \
636 --requestheader-username-headers=X-Remote-User \
637 --requestheader-group-headers=X-Remote-Group \
638 --requestheader-extra-headers-prefix=X-Remote-Extra- \
639 --requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
640 --requestheader-allowed-names=system:auth-proxy \
641 --proxy-client-cert-file="${CERT_DIR}/client-auth-proxy.crt" \
642 --proxy-client-key-file="${CERT_DIR}/client-auth-proxy.key" \
643 --cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
644 APISERVER_PID=$!
645
646 # Create kubeconfigs for all components, using client certs
647 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
648 ${CONTROLPLANE_SUDO} chown "${USER}" "${CERT_DIR}/client-admin.key" # make readable for kubectl
649
650 # Wait for kube-apiserver to come up before launching the rest of the components.
651 echo "Waiting for apiserver to come up"
652 kube::util::wait_for_url "https://${API_HOST_IP}:${API_SECURE_PORT}/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \
653 || { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; }
654
655 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
656 kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
657
658 if [[ -z "${AUTH_ARGS}" ]]; then
659 AUTH_ARGS="--client-key=${CERT_DIR}/client-admin.key --client-certificate=${CERT_DIR}/client-admin.crt"
660 fi
661 # Grant apiserver permission to speak to the kubelet
662 ${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kube-apiserver-kubelet-admin --clusterrole=system:kubelet-api-admin --user=kube-apiserver
663
664 # Grant kubelets permission to request client certificates
665 ${KUBECTL} --kubeconfig "${CERT_DIR}/admin.kubeconfig" create clusterrolebinding kubelet-csr --clusterrole=system:certificates.k8s.io:certificatesigningrequests:selfnodeclient --group=system:nodes
666
667 ${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-kube-aggregator.kubeconfig"
668 ${CONTROLPLANE_SUDO} chown -R "$(whoami)" "${CERT_DIR}"
669 ${KUBECTL} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-kube-aggregator.kubeconfig" --server="https://${API_HOST_IP}:31090"
670 echo "use 'kubectl --kubeconfig=${CERT_DIR}/admin-kube-aggregator.kubeconfig' to use the aggregated API server"
671
672}
673
674function start_controller_manager {
675 cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
676 cloud_config_arg+=("--configure-cloud-routes=${CONFIGURE_CLOUD_ROUTES}")
677 if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
678 cloud_config_arg=("--cloud-provider=external")
679 cloud_config_arg+=("--external-cloud-volume-plugin=${EXTERNAL_CLOUD_VOLUME_PLUGIN}")
680 cloud_config_arg+=("--cloud-config=${CLOUD_CONFIG}")
681 fi
682
683 CTLRMGR_LOG=${LOG_DIR}/kube-controller-manager.log
684 ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-controller-manager" \
685 --v="${LOG_LEVEL}" \
686 --vmodule="${LOG_SPEC}" \
687 --service-account-private-key-file="${SERVICE_ACCOUNT_KEY}" \
688 --service-cluster-ip-range="${SERVICE_CLUSTER_IP_RANGE}" \
689 --root-ca-file="${ROOT_CA_FILE}" \
690 --cluster-signing-cert-file="${CLUSTER_SIGNING_CERT_FILE}" \
691 --cluster-signing-key-file="${CLUSTER_SIGNING_KEY_FILE}" \
692 --enable-hostpath-provisioner="${ENABLE_HOSTPATH_PROVISIONER}" \
693 --pvclaimbinder-sync-period="${CLAIM_BINDER_SYNC_PERIOD}" \
694 --feature-gates="${FEATURE_GATES}" \
695 "${cloud_config_arg[@]}" \
696 --authentication-kubeconfig "${CERT_DIR}"/controller.kubeconfig \
697 --authorization-kubeconfig "${CERT_DIR}"/controller.kubeconfig \
698 --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
699 --use-service-account-credentials \
700 --controllers="${KUBE_CONTROLLERS}" \
701 --leader-elect=false \
702 --cert-dir="${CERT_DIR}" \
703 --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CTLRMGR_LOG}" 2>&1 &
704 CTLRMGR_PID=$!
705}
706
707function start_cloud_controller_manager {
708 if [ -z "${CLOUD_CONFIG}" ]; then
709 echo "CLOUD_CONFIG cannot be empty!"
710 exit 1
711 fi
712 if [ ! -f "${CLOUD_CONFIG}" ]; then
713 echo "Cloud config ${CLOUD_CONFIG} doesn't exist"
714 exit 1
715 fi
716
717 CLOUD_CTLRMGR_LOG=${LOG_DIR}/cloud-controller-manager.log
718 # shellcheck disable=SC2086
719 ${CONTROLPLANE_SUDO} "${EXTERNAL_CLOUD_PROVIDER_BINARY:-"${GO_OUT}/cloud-controller-manager"}" \
720 ${CLOUD_CTLRMGR_FLAGS} \
721 --v="${LOG_LEVEL}" \
722 --vmodule="${LOG_SPEC}" \
723 --feature-gates="${FEATURE_GATES}" \
724 --cloud-provider="${CLOUD_PROVIDER}" \
725 --cloud-config="${CLOUD_CONFIG}" \
726 --configure-cloud-routes="${CONFIGURE_CLOUD_ROUTES}" \
727 --kubeconfig "${CERT_DIR}"/controller.kubeconfig \
728 --use-service-account-credentials \
729 --leader-elect=false \
730 --master="https://${API_HOST}:${API_SECURE_PORT}" >"${CLOUD_CTLRMGR_LOG}" 2>&1 &
731 export CLOUD_CTLRMGR_PID=$!
732}
733
734function wait_node_ready(){
735 # check the nodes information after kubelet daemon start
736 local nodes_stats="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' get nodes"
737 local node_name=$HOSTNAME_OVERRIDE
738 local system_node_wait_time=60
739 local interval_time=2
740 kube::util::wait_for_success "$system_node_wait_time" "$interval_time" "$nodes_stats | grep $node_name"
741 if [ $? == "1" ]; then
742 echo "time out on waiting $node_name exist"
743 exit 1
744 fi
745
746 local system_node_ready_time=300
747 local node_ready="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' wait --for=condition=Ready --timeout=60s nodes $node_name"
748 kube::util::wait_for_success "$system_node_ready_time" "$interval_time" "$node_ready"
749 if [ $? == "1" ]; then
750 echo "time out on waiting $node_name info"
751 exit 1
752 fi
753}
754
755function refresh_docker_containerd_runc {
756 apt update
757 apt-get install ca-certificates curl gnupg ripgrep tree vim
758 install -m 0755 -d /etc/apt/keyrings
759 curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg
760 chmod a+r /etc/apt/keyrings/docker.gpg
761
762 # shellcheck disable=SC2027 disable=SC2046
763 echo \
764 "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
765 "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
766 tee /etc/apt/sources.list.d/docker.list > /dev/null
767
768 apt-get update
769 apt-get -y install docker-ce docker-ce-cli containerd.io docker-buildx-plugin
770 groupadd docker
771 usermod -aG docker "$USER"
772
773 if ! grep -q "cri-containerd" "/lib/systemd/system/docker.service"; then
774 sed -i "s/ExecStart=\(.*\)/ExecStart=\1 --cri-containerd/" /lib/systemd/system/docker.service
775 fi
776
777 apt install -y conntrack vim htop ripgrep dnsutils tree ripgrep build-essential
778}
779
780function wait_coredns_available(){
781 local interval_time=2
782 local coredns_wait_time=300
783
784 # kick the coredns pods to be recreated
785 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" -n kube-system delete pods -l k8s-app=kube-dns
786 sleep 30
787
788 local coredns_pods_ready="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' wait --for=condition=Ready --timeout=60s pods -l k8s-app=kube-dns -n kube-system"
789 kube::util::wait_for_success "$coredns_wait_time" "$interval_time" "$coredns_pods_ready"
790 if [ $? == "1" ]; then
791 echo "time out on waiting for coredns pods"
792 exit 1
793 fi
794
795 local coredns_available="${KUBECTL} --kubeconfig '${CERT_DIR}/admin.kubeconfig' wait --for=condition=Available --timeout=60s deployments coredns -n kube-system"
796 kube::util::wait_for_success "$coredns_wait_time" "$interval_time" "$coredns_available"
797 if [ $? == "1" ]; then
798 echo "time out on waiting for coredns deployment"
799 exit 1
800 fi
801
802 # bump log level
803 echo "6" | sudo tee /proc/sys/kernel/printk
804
805 # loop through and grab all things in dmesg
806 dmesg > "${LOG_DIR}/dmesg.log"
807 dmesg -w --human >> "${LOG_DIR}/dmesg.log" &
808}
809
810function start_kubelet {
811 KUBELET_LOG=${LOG_DIR}/kubelet.log
812 mkdir -p "${POD_MANIFEST_PATH}" &>/dev/null || sudo mkdir -p "${POD_MANIFEST_PATH}"
813
814 cloud_config_arg=("--cloud-provider=${CLOUD_PROVIDER}" "--cloud-config=${CLOUD_CONFIG}")
815 if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
816 cloud_config_arg=("--cloud-provider=external")
817 if [[ "${CLOUD_PROVIDER:-}" == "aws" ]]; then
818 cloud_config_arg+=("--provider-id=$(curl http://169.254.169.254/latest/meta-data/instance-id)")
819 else
820 cloud_config_arg+=("--provider-id=${KUBELET_PROVIDER_ID}")
821 fi
822 fi
823
824 mkdir -p "/var/lib/kubelet" &>/dev/null || sudo mkdir -p "/var/lib/kubelet"
825
826 image_service_endpoint_args=()
827 if [[ -n "${IMAGE_SERVICE_ENDPOINT}" ]]; then
828 image_service_endpoint_args=("--image-service-endpoint=${IMAGE_SERVICE_ENDPOINT}")
829 fi
830
831 # shellcheck disable=SC2206
832 all_kubelet_flags=(
833 "--v=${LOG_LEVEL}"
834 "--vmodule=${LOG_SPEC}"
835 "--hostname-override=${HOSTNAME_OVERRIDE}"
836 "${cloud_config_arg[@]}"
837 "--bootstrap-kubeconfig=${CERT_DIR}/kubelet.kubeconfig"
838 "--kubeconfig=${CERT_DIR}/kubelet-rotated.kubeconfig"
839 ${image_service_endpoint_args[@]+"${image_service_endpoint_args[@]}"}
840 ${KUBELET_FLAGS}
841 )
842
843 # warn if users are running with swap allowed
844 if [ "${FAIL_SWAP_ON}" == "false" ]; then
845 echo "WARNING : The kubelet is configured to not fail even if swap is enabled; production deployments should disable swap unless testing NodeSwap feature."
846 fi
847
848 if [[ "${REUSE_CERTS}" != true ]]; then
849 # clear previous dynamic certs
850 sudo rm -fr "/var/lib/kubelet/pki" "${CERT_DIR}/kubelet-rotated.kubeconfig"
851 # create new certs
852 generate_kubelet_certs
853 fi
854
855 cat <<EOF > "${TMP_DIR}"/kubelet.yaml
856apiVersion: kubelet.config.k8s.io/v1beta1
857kind: KubeletConfiguration
858address: "${KUBELET_HOST}"
859cgroupDriver: "${CGROUP_DRIVER}"
860cgroupRoot: "${CGROUP_ROOT}"
861cgroupsPerQOS: ${CGROUPS_PER_QOS}
862containerRuntimeEndpoint: ${CONTAINER_RUNTIME_ENDPOINT}
863cpuCFSQuota: ${CPU_CFS_QUOTA}
864enableControllerAttachDetach: ${ENABLE_CONTROLLER_ATTACH_DETACH}
865localStorageCapacityIsolation: ${LOCAL_STORAGE_CAPACITY_ISOLATION}
866evictionPressureTransitionPeriod: "${EVICTION_PRESSURE_TRANSITION_PERIOD}"
867failSwapOn: ${FAIL_SWAP_ON}
868port: ${KUBELET_PORT}
869readOnlyPort: ${KUBELET_READ_ONLY_PORT}
870rotateCertificates: true
871runtimeRequestTimeout: "${RUNTIME_REQUEST_TIMEOUT}"
872staticPodPath: "${POD_MANIFEST_PATH}"
873resolvConf: "${KUBELET_RESOLV_CONF}"
874EOF
875
876 if [[ "$ENABLE_TRACING" = true ]]; then
877 cat <<EOF >> "${TMP_DIR}"/kubelet.yaml
878tracing:
879 endpoint: localhost:4317 # the default value
880 samplingRatePerMillion: 1000000 # sample always
881EOF
882 fi
883
884 if [[ "$LIMITED_SWAP" == "true" ]]; then
885 cat <<EOF >> "${TMP_DIR}"/kubelet.yaml
886memorySwap:
887 swapBehavior: LimitedSwap
888EOF
889 fi
890
891 {
892 # authentication
893 echo "authentication:"
894 echo " webhook:"
895 if [[ "${KUBELET_AUTHENTICATION_WEBHOOK:-}" != "false" ]]; then
896 echo " enabled: true"
897 else
898 echo " enabled: false"
899 fi
900 echo " x509:"
901 if [[ -n "${CLIENT_CA_FILE:-}" ]]; then
902 echo " clientCAFile: \"${CLIENT_CA_FILE}\""
903 else
904 echo " clientCAFile: \"${CERT_DIR}/client-ca.crt\""
905 fi
906
907 # authorization
908 if [[ "${KUBELET_AUTHORIZATION_WEBHOOK:-}" != "false" ]]; then
909 echo "authorization:"
910 echo " mode: Webhook"
911 fi
912
913 # dns
914 if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
915 if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
916 echo "clusterDNS: [ \"${LOCAL_DNS_IP}\" ]"
917 else
918 echo "clusterDNS: [ \"${DNS_SERVER_IP}\" ]"
919 fi
920 echo "clusterDomain: \"${DNS_DOMAIN}\""
921 else
922 # To start a private DNS server set ENABLE_CLUSTER_DNS and
923 # DNS_SERVER_IP/DOMAIN. This will at least provide a working
924 # DNS server for real world hostnames.
925 echo "clusterDNS: [ \"8.8.8.8\" ]"
926 fi
927
928 # eviction
929 if [[ -n ${EVICTION_HARD} ]]; then
930 echo "evictionHard:"
931 parse_eviction "${EVICTION_HARD}"
932 fi
933 if [[ -n ${EVICTION_SOFT} ]]; then
934 echo "evictionSoft:"
935 parse_eviction "${EVICTION_SOFT}"
936 fi
937
938 # feature gate
939 if [[ -n ${FEATURE_GATES} ]]; then
940 parse_feature_gates "${FEATURE_GATES}"
941 fi
942
943 # cpumanager policy
944 if [[ -n ${CPUMANAGER_POLICY} ]]; then
945 echo "cpuManagerPolicy: \"${CPUMANAGER_POLICY}\""
946 fi
947
948 # cpumanager reconcile period
949 if [[ -n ${CPUMANAGER_RECONCILE_PERIOD} ]]; then
950 echo "cpuManagerReconcilePeriod: \"${CPUMANAGER_RECONCILE_PERIOD}\""
951 fi
952
953 # cpumanager policy options
954 if [[ -n ${CPUMANAGER_POLICY_OPTIONS} ]]; then
955 parse_cpumanager_policy_options "${CPUMANAGER_POLICY_OPTIONS}"
956 fi
957
958 } >>"${TMP_DIR}"/kubelet.yaml
959
960 # shellcheck disable=SC2024
961 sudo -E "${GO_OUT}/kubelet" "${all_kubelet_flags[@]}" \
962 --config="${TMP_DIR}"/kubelet.yaml >"${KUBELET_LOG}" 2>&1 &
963 KUBELET_PID=$!
964
965 # Quick check that kubelet is running.
966 if [ -n "${KUBELET_PID}" ] && ps -p ${KUBELET_PID} > /dev/null; then
967 echo "kubelet ( ${KUBELET_PID} ) is running."
968 else
969 cat "${KUBELET_LOG}" ; exit 1
970 fi
971}
972
973function start_kubeproxy {
974 PROXY_LOG=${LOG_DIR}/kube-proxy.log
975
976 if [[ "${START_MODE}" != *"nokubelet"* ]]; then
977 # wait for kubelet collect node information
978 echo "wait kubelet ready"
979 wait_node_ready
980 fi
981
982 cat <<EOF > "${TMP_DIR}"/kube-proxy.yaml
983apiVersion: kubeproxy.config.k8s.io/v1alpha1
984kind: KubeProxyConfiguration
985clientConnection:
986 kubeconfig: ${CERT_DIR}/kube-proxy.kubeconfig
987hostnameOverride: ${HOSTNAME_OVERRIDE}
988mode: ${KUBE_PROXY_MODE}
989conntrack:
990# Skip setting sysctl value "net.netfilter.nf_conntrack_max"
991 maxPerCore: 0
992# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_established"
993 tcpEstablishedTimeout: 0s
994# Skip setting "net.netfilter.nf_conntrack_tcp_timeout_close"
995 tcpCloseWaitTimeout: 0s
996EOF
997 if [[ -n ${FEATURE_GATES} ]]; then
998 parse_feature_gates "${FEATURE_GATES}"
999 fi >>"${TMP_DIR}"/kube-proxy.yaml
1000
1001 if [[ "${REUSE_CERTS}" != true ]]; then
1002 generate_kubeproxy_certs
1003 fi
1004
1005 # shellcheck disable=SC2024
1006 sudo "${GO_OUT}/kube-proxy" \
1007 --v="${LOG_LEVEL}" \
1008 --config="${TMP_DIR}"/kube-proxy.yaml \
1009 --master="https://${API_HOST}:${API_SECURE_PORT}" >"${PROXY_LOG}" 2>&1 &
1010 PROXY_PID=$!
1011}
1012
1013function start_kubescheduler {
1014 SCHEDULER_LOG=${LOG_DIR}/kube-scheduler.log
1015
1016 cat <<EOF > "${TMP_DIR}"/kube-scheduler.yaml
1017apiVersion: kubescheduler.config.k8s.io/v1
1018kind: KubeSchedulerConfiguration
1019clientConnection:
1020 kubeconfig: ${CERT_DIR}/scheduler.kubeconfig
1021leaderElection:
1022 leaderElect: false
1023EOF
1024 ${CONTROLPLANE_SUDO} "${GO_OUT}/kube-scheduler" \
1025 --v="${LOG_LEVEL}" \
1026 --config="${TMP_DIR}"/kube-scheduler.yaml \
1027 --feature-gates="${FEATURE_GATES}" \
1028 --authentication-kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \
1029 --authorization-kubeconfig "${CERT_DIR}"/scheduler.kubeconfig \
1030 --master="https://${API_HOST}:${API_SECURE_PORT}" >"${SCHEDULER_LOG}" 2>&1 &
1031 SCHEDULER_PID=$!
1032}
1033
1034function start_dns_addon {
1035 if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
1036 cp "${KUBE_ROOT}/cluster/addons/dns/${DNS_ADDON}/${DNS_ADDON}.yaml.in" dns.yaml
1037 ${SED} -i -e "s/dns_domain/${DNS_DOMAIN}/g" dns.yaml
1038 ${SED} -i -e "s/dns_server/${DNS_SERVER_IP}/g" dns.yaml
1039 ${SED} -i -e "s/dns_memory_limit/${DNS_MEMORY_LIMIT}/g" dns.yaml
1040 # TODO update to dns role once we have one.
1041 # use kubectl to create dns addon
1042 if ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f dns.yaml ; then
1043 echo "${DNS_ADDON} addon successfully deployed."
1044 else
1045 echo "Something is wrong with your DNS input"
1046 cat dns.yaml
1047 exit 1
1048 fi
1049 rm dns.yaml
1050 fi
1051}
1052
1053function start_nodelocaldns {
1054 cp "${KUBE_ROOT}/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml" nodelocaldns.yaml
1055 # eventually all the __PILLAR__ stuff will be gone, but theyre still in nodelocaldns for backward compat.
1056 ${SED} -i -e "s/__PILLAR__DNS__DOMAIN__/${DNS_DOMAIN}/g" nodelocaldns.yaml
1057 ${SED} -i -e "s/__PILLAR__DNS__SERVER__/${DNS_SERVER_IP}/g" nodelocaldns.yaml
1058 ${SED} -i -e "s/__PILLAR__LOCAL__DNS__/${LOCAL_DNS_IP}/g" nodelocaldns.yaml
1059
1060 # use kubectl to create nodelocaldns addon
1061 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f nodelocaldns.yaml
1062 echo "NodeLocalDNS addon successfully deployed."
1063 rm nodelocaldns.yaml
1064}
1065
1066function start_csi_snapshotter {
1067 if [[ "${ENABLE_CSI_SNAPSHOTTER}" = true ]]; then
1068 echo "Creating Kubernetes-CSI snapshotter"
1069 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshots.yaml"
1070 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotclasses.yaml"
1071 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/volumesnapshots/crd/snapshot.storage.k8s.io_volumesnapshotcontents.yaml"
1072 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/volumesnapshots/volume-snapshot-controller/rbac-volume-snapshot-controller.yaml"
1073 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" apply -f "${KUBE_ROOT}/cluster/addons/volumesnapshots/volume-snapshot-controller/volume-snapshot-controller-deployment.yaml"
1074
1075 echo "Kubernetes-CSI snapshotter successfully deployed."
1076 fi
1077}
1078
1079function create_storage_class {
1080 if [ -z "${CLOUD_PROVIDER}" ]; then
1081 CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
1082 else
1083 CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/${CLOUD_PROVIDER}/default.yaml
1084 fi
1085
1086 if [ -e "${CLASS_FILE}" ]; then
1087 echo "Create default storage class for ${CLOUD_PROVIDER}"
1088 ${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${CLASS_FILE}"
1089 else
1090 echo "No storage class available for ${CLOUD_PROVIDER}."
1091 fi
1092}
1093
1094function print_success {
1095if [[ "${START_MODE}" != "kubeletonly" ]]; then
1096 if [[ "${ENABLE_DAEMON}" = false ]]; then
1097 echo "Local Kubernetes cluster is running. Press Ctrl-C to shut it down."
1098 else
1099 echo "Local Kubernetes cluster is running."
1100 fi
1101
1102 echo
1103 echo "Configurations:"
1104 for f in "${TMP_DIR}"/*; do
1105 echo " ${f}"
1106 done
1107
1108 cat <<EOF
1109
1110Logs:
1111 ${ETCD_LOGFILE:-}
1112 ${APISERVER_LOG:-}
1113 ${CTLRMGR_LOG:-}
1114 ${CLOUD_CTLRMGR_LOG:-}
1115 ${PROXY_LOG:-}
1116 ${SCHEDULER_LOG:-}
1117EOF
1118fi
1119
1120if [[ "${START_MODE}" == "all" ]]; then
1121 echo " ${KUBELET_LOG}"
1122elif [[ "${START_MODE}" == *"nokubelet"* ]]; then
1123 echo
1124 echo "No kubelet was started because you set START_MODE=nokubelet"
1125 echo "Run this script again with START_MODE=kubeletonly to run a kubelet"
1126fi
1127
1128if [[ "${START_MODE}" != "kubeletonly" ]]; then
1129 echo
1130 if [[ "${ENABLE_DAEMON}" = false ]]; then
1131 echo "To start using your cluster, you can open up another terminal/tab and run:"
1132 else
1133 echo "To start using your cluster, run:"
1134 fi
1135 cat <<EOF
1136
1137 export KUBECONFIG=${CERT_DIR}/admin.kubeconfig
1138 cluster/kubectl.sh
1139
1140Alternatively, you can write to the default kubeconfig:
1141
1142 export KUBERNETES_PROVIDER=local
1143
1144 cluster/kubectl.sh config set-cluster local --server=https://${API_HOST}:${API_SECURE_PORT} --certificate-authority=${ROOT_CA_FILE}
1145 cluster/kubectl.sh config set-credentials myself ${AUTH_ARGS}
1146 cluster/kubectl.sh config set-context local --cluster=local --user=myself
1147 cluster/kubectl.sh config use-context local
1148 cluster/kubectl.sh
1149EOF
1150else
1151 cat <<EOF
1152The kubelet was started.
1153
1154Logs:
1155 ${KUBELET_LOG}
1156EOF
1157fi
1158}
1159
1160function parse_cpumanager_policy_options {
1161 echo "cpuManagerPolicyOptions:"
1162 # Convert from foo=true,bar=false to
1163 # foo: "true"
1164 # bar: "false"
1165 for option in $(echo "$1" | tr ',' ' '); do
1166 echo "${option}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: "\2"/'
1167 done
1168}
1169
1170function parse_feature_gates {
1171 echo "featureGates:"
1172 # Convert from foo=true,bar=false to
1173 # foo: true
1174 # bar: false
1175 for gate in $(echo "$1" | tr ',' ' '); do
1176 echo "${gate}" | ${SED} -e 's/\(.*\)=\(.*\)/ \1: \2/'
1177 done
1178}
1179
1180function parse_eviction {
1181 # Convert from memory.available<100Mi,nodefs.available<10%,nodefs.inodesFree<5% to
1182 # memory.available: "100Mi"
1183 # nodefs.available: "10%"
1184 # nodefs.inodesFree: "5%"
1185 for eviction in $(echo "$1" | tr ',' ' '); do
1186 echo "${eviction}" | ${SED} -e 's/</: \"/' | ${SED} -e 's/^/ /' | ${SED} -e 's/$/\"/'
1187 done
1188}
1189
1190function update_packages {
1191 apt-get update && apt-get install -y sudo
1192 apt-get remove -y systemd
1193
1194 # Do not update docker / containerd / runc
1195 sed -i 's/\(.*\)docker\(.*\)/#\1docker\2/' /etc/apt/sources.list
1196
1197 # jump through hoops to avoid removing docker/containerd
1198 # when installing nftables and kmod, as those docker/containerd
1199 # packages depend on iptables
1200 dpkg -r --force-depends iptables && \
1201 apt -y --fix-broken install && \
1202 apt -y install nftables kmod && \
1203 apt -y install iptables
1204}
1205
1206function tolerate_cgroups_v2 {
1207 # https://github.com/moby/moby/blob/be220af9fb36e9baa9a75bbc41f784260aa6f96e/hack/dind#L28-L38
1208 # cgroup v2: enable nesting
1209 if [ -f /sys/fs/cgroup/cgroup.controllers ]; then
1210 # move the processes from the root group to the /init group,
1211 # otherwise writing subtree_control fails with EBUSY.
1212 # An error during moving non-existent process (i.e., "cat") is ignored.
1213 mkdir -p /sys/fs/cgroup/init
1214 xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || :
1215 # enable controllers
1216 sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \
1217 > /sys/fs/cgroup/cgroup.subtree_control
1218 fi
1219}
1220
1221function install_cni {
1222 if [[ -n "${CNI_TARGETARCH}" ]]; then
1223 host_arch="${CNI_TARGETARCH}"
1224 else
1225 host_arch=$(detect_arch)
1226 fi
1227
1228 cni_plugin_tarball="cni-plugins-linux-${host_arch}-${CNI_PLUGINS_VERSION}.tgz"
1229 cni_plugins_url="${CNI_PLUGINS_URL}/${CNI_PLUGINS_VERSION}/${cni_plugin_tarball}"
1230 cni_plugin_sha_url="${cni_plugins_url}.sha256"
1231
1232 echo "Installing CNI plugin binaries ..." &&
1233 cd "${TMP_DIR}" &&
1234 curl -sSL --retry 5 -o "${cni_plugin_tarball}" "${cni_plugins_url}" &&
1235 curl -sSL --retry 5 -o "${cni_plugin_tarball}.sha256" "${cni_plugin_sha_url}" &&
1236 sha256sum -c "${cni_plugin_tarball}.sha256" &&
1237 rm -f "${cni_plugin_tarball}.sha256" &&
1238 sudo mkdir -p /opt/cni/bin &&
1239 sudo tar -C /opt/cni/bin -xzvf "${cni_plugin_tarball}" &&
1240 rm -rf "${cni_plugin_tarball}" &&
1241 sudo find /opt/cni/bin -type f -not \( \
1242 -iname host-local \
1243 -o -iname bridge \
1244 -o -iname portmap \
1245 -o -iname loopback \
1246 \) \
1247 -delete
1248
1249 # containerd 1.4.12 installed by docker in kubekins supports CNI version 0.4.0
1250 echo "Configuring cni"
1251 sudo mkdir -p "$CNI_CONFIG_DIR"
1252 cat << EOF | sudo tee "$CNI_CONFIG_DIR"/10-containerd-net.conflist
1253{
1254 "cniVersion": "1.0.0",
1255 "name": "containerd-net",
1256 "plugins": [
1257 {
1258 "type": "bridge",
1259 "bridge": "cni0",
1260 "isGateway": true,
1261 "ipMasq": true,
1262 "promiscMode": true,
1263 "ipam": {
1264 "type": "host-local",
1265 "ranges": [
1266 [{
1267 "subnet": "10.88.0.0/16"
1268 }],
1269 [{
1270 "subnet": "2001:db8:4860::/64"
1271 }]
1272 ],
1273 "routes": [
1274 { "dst": "0.0.0.0/0" },
1275 { "dst": "::/0" }
1276 ]
1277 }
1278 },
1279 {
1280 "type": "portmap",
1281 "capabilities": {"portMappings": true},
1282 "externalSetMarkChain": "KUBE-MARK-MASQ"
1283 }
1284 ]
1285}
1286EOF
1287}
1288
1289function install_cni_if_needed {
1290 echo "Checking CNI Installation at /opt/cni/bin"
1291 if ! command -v /opt/cni/bin/loopback &> /dev/null ; then
1292 echo "CNI Installation not found at /opt/cni/bin"
1293 install_cni
1294 fi
1295}
1296
1297# If we are running in the CI, we need a few more things before we can start
1298if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
1299 echo "Preparing to test ..."
1300 "${KUBE_ROOT}"/hack/install-etcd.sh
1301 export PATH="${KUBE_ROOT}/third_party/etcd:${PATH}"
1302 KUBE_FASTBUILD=true make ginkgo cross
1303
1304 # install things we need that are missing from the kubekins image
1305 update_packages
1306
1307 # configure shared mounts to prevent failure in DIND scenarios
1308 mount --make-rshared /
1309
1310 # kubekins has a special directory for docker root
1311 DOCKER_ROOT="/docker-graph"
1312
1313 # to use docker installed containerd as kubelet container runtime
1314 # we need to enable cri and install cni
1315 # install cni for docker in docker
1316 install_cni
1317
1318 # If we are running in a cgroups v2 environment
1319 # we need to enable nesting
1320 tolerate_cgroups_v2
1321
1322 # enable cri for docker in docker
1323 echo "enable cri"
1324 # shellcheck disable=SC2129
1325 echo "DOCKER_OPTS=\"\${DOCKER_OPTS} --cri-containerd\"" >> /etc/default/docker
1326
1327 # enable debug
1328 echo "DOCKER_OPTS=\"\${DOCKER_OPTS} --debug\"" >> /etc/default/docker
1329
1330 # let's log it where we can grab it later
1331 echo "DOCKER_LOGFILE=${LOG_DIR}/docker.log" >> /etc/default/docker
1332
1333 echo "stopping docker"
1334 service docker stop
1335
1336 # bump up things
1337 refresh_docker_containerd_runc
1338
1339 # check if the new stuff is there
1340 docker version
1341 containerd --version
1342 runc --version
1343
1344 echo "starting docker"
1345 service docker start
1346fi
1347
1348# validate that etcd is: not running, in path, and has minimum required version.
1349if [[ "${START_MODE}" != "kubeletonly" ]]; then
1350 kube::etcd::validate
1351fi
1352
1353if [[ "${START_MODE}" != "kubeletonly" ]]; then
1354 test_apiserver_off
1355fi
1356
1357kube::util::test_openssl_installed
1358kube::util::ensure-cfssl
1359
1360### IF the user didn't supply an output/ for the build... Then we detect.
1361if [ "${GO_OUT}" == "" ]; then
1362 detect_binary
1363fi
1364echo "Detected host and ready to start services. Doing some housekeeping first..."
1365echo "Using GO_OUT ${GO_OUT}"
1366export KUBELET_CIDFILE=${TMP_DIR}/kubelet.cid
1367if [[ "${ENABLE_DAEMON}" = false ]]; then
1368 trap cleanup EXIT
1369 trap cleanup INT
1370fi
1371
1372KUBECTL=$(kube::util::find-binary "kubectl")
1373
1374echo "Starting services now!"
1375if [[ "${START_MODE}" != "kubeletonly" ]]; then
1376 start_etcd
1377 set_service_accounts
1378 start_apiserver
1379 start_controller_manager
1380 if [[ "${EXTERNAL_CLOUD_PROVIDER:-}" == "true" ]]; then
1381 start_cloud_controller_manager
1382 fi
1383 start_kubescheduler
1384 start_dns_addon
1385 if [[ "${ENABLE_NODELOCAL_DNS:-}" == "true" ]]; then
1386 start_nodelocaldns
1387 fi
1388 start_csi_snapshotter
1389fi
1390
1391if [[ "${START_MODE}" != *"nokubelet"* ]]; then
1392 ## TODO remove this check if/when kubelet is supported on darwin
1393 # Detect the OS name/arch and display appropriate error.
1394 case "$(uname -s)" in
1395 Darwin)
1396 print_color "kubelet is not currently supported in darwin, kubelet aborted."
1397 KUBELET_LOG=""
1398 ;;
1399 Linux)
1400 install_cni_if_needed
1401 start_kubelet
1402 ;;
1403 *)
1404 print_color "Unsupported host OS. Must be Linux or Mac OS X, kubelet aborted."
1405 ;;
1406 esac
1407fi
1408
1409if [[ "${START_MODE}" != "kubeletonly" ]]; then
1410 if [[ "${START_MODE}" != *"nokubeproxy"* ]]; then
1411 ## TODO remove this check if/when kubelet is supported on darwin
1412 # Detect the OS name/arch and display appropriate error.
1413 case "$(uname -s)" in
1414 Darwin)
1415 print_color "kubelet is not currently supported in darwin, kube-proxy aborted."
1416 ;;
1417 Linux)
1418 start_kubeproxy
1419 wait_coredns_available
1420 ;;
1421 *)
1422 print_color "Unsupported host OS. Must be Linux or Mac OS X, kube-proxy aborted."
1423 ;;
1424 esac
1425 fi
1426fi
1427
1428if [[ "${DEFAULT_STORAGE_CLASS}" = "true" ]]; then
1429 create_storage_class
1430fi
1431
1432print_success
1433
1434if [[ "${ENABLE_DAEMON}" = false ]]; then
1435 while true; do sleep 1; healthcheck; done
1436fi
1437
1438if [[ "${KUBETEST_IN_DOCKER:-}" == "true" ]]; then
1439 ${KUBECTL} config set-cluster local --server=https://localhost:6443 --certificate-authority=/var/run/kubernetes/server-ca.crt
1440 ${KUBECTL} config set-credentials myself --client-key=/var/run/kubernetes/client-admin.key --client-certificate=/var/run/kubernetes/client-admin.crt
1441 ${KUBECTL} config set-context local --cluster=local --user=myself
1442 ${KUBECTL} config use-context local
1443fi
View as plain text