1#!/usr/bin/env bash
2
3# Copyright 2014 The Kubernetes Authors.
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17# TODO(jbeda): Provide a way to override project
18# gcloud multiplexing for shared GCE/GKE tests.
19KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
20source "${KUBE_ROOT}/cluster/gce/config-common.sh"
21
22# Specifying KUBE_GCE_API_ENDPOINT will override the default GCE Compute API endpoint (https://www.googleapis.com/compute/v1/).
23# This endpoint has to be pointing to v1 api. For example, https://www.googleapis.com/compute/staging_v1/
24export GCE_API_ENDPOINT=${KUBE_GCE_API_ENDPOINT:-}
25export GCLOUD=gcloud
26ZONE=${KUBE_GCE_ZONE:-us-central1-b}
27export REGION=${ZONE%-*}
28RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
29REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
30NODE_SIZE=${NODE_SIZE:-e2-standard-2}
31NUM_NODES=${NUM_NODES:-3}
32NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0}
33MASTER_SIZE=${MASTER_SIZE:-e2-standard-$(get-master-size)}
34MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
35export MASTER_DISK_TYPE=pd-ssd
36MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
37MASTER_ROOT_DISK_SIZE=${MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
38NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
39NODE_DISK_SIZE=${NODE_DISK_SIZE:-100GB}
40NODE_LOCAL_SSDS=${NODE_LOCAL_SSDS:-0}
41NODE_LABELS="${KUBE_NODE_LABELS:-}"
42WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS:-}"
43NODE_LOCAL_SSDS_EPHEMERAL=${NODE_LOCAL_SSDS_EPHEMERAL:-}
44
45# KUBE_CREATE_NODES can be used to avoid creating nodes, while master will be sized for NUM_NODES nodes.
46# Firewalls and node templates are still created.
47KUBE_CREATE_NODES="${KUBE_CREATE_NODES:-true}"
48
49# An extension to local SSDs allowing users to specify block/fs and SCSI/NVMe devices
50# Format of this variable will be "#,scsi/nvme,block/fs" you can specify multiple
51# configurations by separating them by a semi-colon ex. "2,scsi,fs;1,nvme,block"
52# is a request for 2 SCSI formatted and mounted SSDs and 1 NVMe block device SSD.
53NODE_LOCAL_SSDS_EXT=${NODE_LOCAL_SSDS_EXT:-}
54# Accelerators to be attached to each node. Format "type=<accelerator-type>,count=<accelerator-count>"
55# More information on available GPUs here - https://cloud.google.com/compute/docs/gpus/
56NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
57export REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-true}
58PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
59PREEMPTIBLE_MASTER=${PREEMPTIBLE_MASTER:-false}
60KUBE_DELETE_NODES=${KUBE_DELETE_NODES:-true}
61KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-} # default value calculated below
62CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
63MIG_WAIT_UNTIL_STABLE_TIMEOUT=${MIG_WAIT_UNTIL_STABLE_TIMEOUT:-1800}
64
65MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
66NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-${KUBE_OS_DISTRIBUTION:-gci}}
67WINDOWS_NODE_OS_DISTRIBUTION=${WINDOWS_NODE_OS_DISTRIBUTION:-win2019}
68
69if [[ "${MASTER_OS_DISTRIBUTION}" == "cos" ]]; then
70 MASTER_OS_DISTRIBUTION="gci"
71fi
72
73if [[ "${NODE_OS_DISTRIBUTION}" == "cos" ]]; then
74 NODE_OS_DISTRIBUTION="gci"
75fi
76
77# GPUs supported in GCE do not have compatible drivers in Debian 7.
78if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
79 NODE_ACCELERATORS=""
80fi
81
82# By default a cluster will be started with the master and nodes
83# on Container-optimized OS (cos, previously known as gci). If
84# you are updating the os image versions, update this variable.
85# Also please update corresponding image for node e2e at:
86# https://github.com/kubernetes/kubernetes/blob/master/test/e2e_node/jenkins/image-config.yaml
87#
88# By default, the latest image from the image family will be used unless an
89# explicit image will be set.
90GCI_VERSION=${KUBE_GCI_VERSION:-}
91IMAGE_FAMILY=${KUBE_IMAGE_FAMILY:-cos-109-lts}
92export MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-}
93export MASTER_IMAGE_FAMILY=${KUBE_GCE_MASTER_IMAGE_FAMILY:-${IMAGE_FAMILY}}
94export MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
95export NODE_IMAGE=${KUBE_GCE_NODE_IMAGE:-${GCI_VERSION}}
96export NODE_IMAGE_FAMILY=${KUBE_GCE_NODE_IMAGE_FAMILY:-${IMAGE_FAMILY}}
97export NODE_IMAGE_PROJECT=${KUBE_GCE_NODE_PROJECT:-cos-cloud}
98export NODE_SERVICE_ACCOUNT=${KUBE_GCE_NODE_SERVICE_ACCOUNT:-default}
99
100# KUBELET_TEST_ARGS are extra arguments passed to kubelet.
101export KUBELET_TEST_ARGS=${KUBE_KUBELET_EXTRA_ARGS:-}
102
103export CONTAINER_RUNTIME_ENDPOINT=${KUBE_CONTAINER_RUNTIME_ENDPOINT:-unix:///run/containerd/containerd.sock}
104export CONTAINER_RUNTIME_NAME=${KUBE_CONTAINER_RUNTIME_NAME:-containerd}
105export LOG_DUMP_SYSTEMD_SERVICES=${LOG_DUMP_SYSTEMD_SERVICES:-containerd}
106export LOAD_IMAGE_COMMAND=${KUBE_LOAD_IMAGE_COMMAND:-ctr -n=k8s.io images import}
107
108# Ability to inject custom versions (Ubuntu OS images ONLY)
109# if KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION or KUBE_UBUNTU_INSTALL_RUNC_VERSION
110# is set to empty then we do not override the version(s) and just
111# use whatever is in the default installation of containerd package
112export UBUNTU_INSTALL_CONTAINERD_VERSION=${KUBE_UBUNTU_INSTALL_CONTAINERD_VERSION:-}
113export UBUNTU_INSTALL_RUNC_VERSION=${KUBE_UBUNTU_INSTALL_RUNC_VERSION:-}
114
115# Ability to inject custom versions (COS images ONLY)
116# if KUBE_COS_INSTALL_CONTAINERD_VERSION or KUBE_COS_INSTALL_RUNC_VERSION
117# is set to empty then we do not override the version(s) and just
118# use whatever is in the default installation of containerd package
119export COS_INSTALL_CONTAINERD_VERSION=${KUBE_COS_INSTALL_CONTAINERD_VERSION:-}
120export COS_INSTALL_RUNC_VERSION=${KUBE_COS_INSTALL_RUNC_VERSION:-}
121
122# MASTER_EXTRA_METADATA is the extra instance metadata on master instance separated by commas.
123export MASTER_EXTRA_METADATA=${KUBE_MASTER_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
124# MASTER_EXTRA_METADATA is the extra instance metadata on node instance separated by commas.
125export NODE_EXTRA_METADATA=${KUBE_NODE_EXTRA_METADATA:-${KUBE_EXTRA_METADATA:-}}
126
127NETWORK=${KUBE_GCE_NETWORK:-default}
128# Enable network deletion by default (for kube-down), unless we're using 'default' network.
129if [[ "${NETWORK}" == "default" ]]; then
130 KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false}
131else
132 KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-true}
133fi
134if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
135 SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
136fi
137INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX:-kubernetes}"
138CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
139MASTER_NAME="${INSTANCE_PREFIX}-master"
140export AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-aggregator"
141export INITIAL_ETCD_CLUSTER="${MASTER_NAME}"
142export MASTER_TAG="${INSTANCE_PREFIX}-master"
143export NODE_TAG="${INSTANCE_PREFIX}-minion"
144
145CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
146MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
147# NODE_IP_RANGE is used when ENABLE_IP_ALIASES=true or CREATE_CUSTOM_NETWORK=true.
148# It is the primary range in the subnet and is the range used for node instance IPs.
149NODE_IP_RANGE="$(get-node-ip-range)"
150export NODE_IP_RANGE
151
152# NOTE: Avoid giving nodes empty scopes, because kubelet needs a service account
153# in order to initialize properly.
154NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}"
155
156# Extra docker options for nodes.
157EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}"
158
159VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/home/kubernetes/flexvolume}"
160
161SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET
162export ALLOCATE_NODE_CIDRS=true
163
164# When set to true, Docker Cache is enabled by default as part of the cluster bring up.
165export ENABLE_DOCKER_REGISTRY_CACHE=true
166
167# Optional: Deploy a L7 loadbalancer controller to fulfill Ingress requests:
168# glbc - CE L7 Load Balancer Controller
169export ENABLE_L7_LOADBALANCING="${KUBE_ENABLE_L7_LOADBALANCING:-glbc}"
170
171# Optional: Enable Metrics Server. Metrics Server should be enable everywhere,
172# since it's a critical component, but in the first release we need a way to disable
173# this in case of stability issues.
174# TODO(piosz) remove this option once Metrics Server became a stable thing.
175export ENABLE_METRICS_SERVER="${KUBE_ENABLE_METRICS_SERVER:-true}"
176
177# Optional: Metadata agent to setup as part of the cluster bring up:
178# none - No metadata agent
179# stackdriver - Stackdriver metadata agent
180# Metadata agent is a daemon set that provides metadata of kubernetes objects
181# running on the same node for exporting metrics and logs.
182export ENABLE_METADATA_AGENT="${KUBE_ENABLE_METADATA_AGENT:-none}"
183
184# One special node out of NUM_NODES would be created of this type if specified.
185# Useful for scheduling heapster in large clusters with nodes of small size.
186HEAPSTER_MACHINE_TYPE="${HEAPSTER_MACHINE_TYPE:-}"
187
188# Optional: Additional nodes would be created if their type and number is specified.
189# NUM_NODES would be lowered respectively.
190# Useful for running cluster-level addons that needs more resources than would fit
191# on small nodes, like network plugins.
192NUM_ADDITIONAL_NODES="${NUM_ADDITIONAL_NODES:-}"
193ADDITIONAL_MACHINE_TYPE="${ADDITIONAL_MACHINE_TYPE:-}"
194
195export MASTER_NODE_LABELS="${KUBE_MASTER_NODE_LABELS:-}"
196# NON_MASTER_NODE_LABELS are labels will only be applied on non-master nodes.
197NON_MASTER_NODE_LABELS="${KUBE_NON_MASTER_NODE_LABELS:-}"
198WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS:-}"
199
200if [[ "${PREEMPTIBLE_MASTER}" == "true" ]]; then
201 NODE_LABELS="${NODE_LABELS},cloud.google.com/gke-preemptible=true"
202 WINDOWS_NODE_LABELS="${WINDOWS_NODE_LABELS},cloud.google.com/gke-preemptible=true"
203elif [[ "${PREEMPTIBLE_NODE}" == "true" ]]; then
204 NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
205 WINDOWS_NON_MASTER_NODE_LABELS="${WINDOWS_NON_MASTER_NODE_LABELS},cloud.google.com/gke-preemptible=true"
206fi
207
208# To avoid running Calico on a node that is not configured appropriately,
209# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
210# Windows nodes do not support Calico.
211if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
212 NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}projectcalico.org/ds-ready=true"
213fi
214
215# Optional: Enable netd.
216ENABLE_NETD="${KUBE_ENABLE_NETD:-false}"
217export CUSTOM_NETD_YAML="${KUBE_CUSTOM_NETD_YAML:-}"
218export CUSTOM_CALICO_NODE_DAEMONSET_YAML="${KUBE_CUSTOM_CALICO_NODE_DAEMONSET_YAML:-}"
219export CUSTOM_TYPHA_DEPLOYMENT_YAML="${KUBE_CUSTOM_TYPHA_DEPLOYMENT_YAML:-}"
220
221# To avoid running netd on a node that is not configured appropriately,
222# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
223# Windows nodes do not support netd.
224if [[ ${ENABLE_NETD:-} == "true" ]]; then
225 NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS:+${NON_MASTER_NODE_LABELS},}cloud.google.com/gke-netd-ready=true"
226fi
227
228export ENABLE_NODELOCAL_DNS="${KUBE_ENABLE_NODELOCAL_DNS:-false}"
229export LOCAL_DNS_IP="${KUBE_LOCAL_DNS_IP:-169.254.20.10}"
230
231# Enable metadata concealment by firewalling pod traffic to the metadata server
232# and run a proxy daemonset on nodes.
233#
234# TODO(#8867) Enable by default.
235ENABLE_METADATA_CONCEALMENT="${ENABLE_METADATA_CONCEALMENT:-false}" # true, false
236METADATA_CONCEALMENT_NO_FIREWALL="${METADATA_CONCEALMENT_NO_FIREWALL:-false}" # true, false
237if [[ ${ENABLE_METADATA_CONCEALMENT:-} == "true" ]]; then
238 # Put the necessary label on the node so the daemonset gets scheduled.
239 NODE_LABELS="${NODE_LABELS},cloud.google.com/metadata-proxy-ready=true"
240 # Add to the provider custom variables.
241 PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_METADATA_CONCEALMENT METADATA_CONCEALMENT_NO_FIREWALL"
242fi
243
244# Optional: Enable node logging.
245export ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
246export LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: gcp
247
248# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
249export ENABLE_CLUSTER_LOGGING="${KUBE_ENABLE_CLUSTER_LOGGING:-true}"
250export ELASTICSEARCH_LOGGING_REPLICAS=1
251
252# Optional: Don't require https for registries in our local RFC1918 network
253if [[ ${KUBE_ENABLE_INSECURE_REGISTRY:-false} == "true" ]]; then
254 EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS} --insecure-registry 10.0.0.0/8"
255fi
256
257# Optional: customize runtime config
258RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
259
260if [[ "${KUBE_FEATURE_GATES:-}" == "AllAlpha=true" ]]; then
261 export RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-api/all=true}"
262fi
263
264# By default disable gkenetworkparamset controller in CCM
265RUN_CCM_CONTROLLERS="${RUN_CCM_CONTROLLERS:-*,-gkenetworkparamset}"
266
267# List of the set of feature gates recognized by the GCP CCM
268export CCM_FEATURE_GATES="APIPriorityAndFairness,APIResponseCompression,APIServerIdentity,APIServerTracing,AllAlpha,AllBeta,CustomResourceValidationExpressions,KMSv2,OpenAPIEnums,OpenAPIV3,ServerSideFieldValidation,StorageVersionAPI,StorageVersionHash"
269
270# Optional: set feature gates
271# shellcheck disable=SC2034 # Variables sourced in other scripts.
272FEATURE_GATES="${KUBE_FEATURE_GATES:-}"
273
274if [[ -n "${NODE_ACCELERATORS}" ]]; then
275 if [[ "${NODE_ACCELERATORS}" =~ .*type=([a-zA-Z0-9-]+).* ]]; then
276 NON_MASTER_NODE_LABELS="${NON_MASTER_NODE_LABELS},cloud.google.com/gke-accelerator=${BASH_REMATCH[1]}"
277 fi
278fi
279
280# Optional: Install cluster DNS.
281# Set CLUSTER_DNS_CORE_DNS to 'false' to install kube-dns instead of CoreDNS.
282CLUSTER_DNS_CORE_DNS="${CLUSTER_DNS_CORE_DNS:-true}"
283export ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
284export DNS_SERVER_IP="${KUBE_DNS_SERVER_IP:-10.0.0.10}"
285export DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
286export DNS_MEMORY_LIMIT="${KUBE_DNS_MEMORY_LIMIT:-170Mi}"
287
288# Optional: Enable DNS horizontal autoscaler
289export ENABLE_DNS_HORIZONTAL_AUTOSCALER="${KUBE_ENABLE_DNS_HORIZONTAL_AUTOSCALER:-true}"
290
291# Optional: Install node problem detector.
292# none - Not run node problem detector.
293# daemonset - Run node problem detector as daemonset.
294# standalone - Run node problem detector as standalone system daemon.
295export ENABLE_NODE_PROBLEM_DETECTOR="${KUBE_ENABLE_NODE_PROBLEM_DETECTOR:-daemonset}"
296NODE_PROBLEM_DETECTOR_VERSION="${NODE_PROBLEM_DETECTOR_VERSION:-}"
297NODE_PROBLEM_DETECTOR_TAR_HASH="${NODE_PROBLEM_DETECTOR_TAR_HASH:-}"
298NODE_PROBLEM_DETECTOR_RELEASE_PATH="${NODE_PROBLEM_DETECTOR_RELEASE_PATH:-}"
299NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
300
301CNI_HASH="${CNI_HASH:-}"
302CNI_TAR_PREFIX="${CNI_TAR_PREFIX:-cni-plugins-linux-amd64-}"
303CNI_STORAGE_URL_BASE="${CNI_STORAGE_URL_BASE:-https://storage.googleapis.com/k8s-artifacts-cni/release}"
304
305# Optional: Create autoscaler for cluster's nodes.
306ENABLE_CLUSTER_AUTOSCALER="${KUBE_ENABLE_CLUSTER_AUTOSCALER:-false}"
307if [[ "${ENABLE_CLUSTER_AUTOSCALER}" == "true" ]]; then
308 export AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-}"
309 export AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-}"
310 export AUTOSCALER_ENABLE_SCALE_DOWN="${KUBE_AUTOSCALER_ENABLE_SCALE_DOWN:-true}"
311 export AUTOSCALER_EXPANDER_CONFIG="${KUBE_AUTOSCALER_EXPANDER_CONFIG:---expander=price}"
312fi
313
314# Optional: Enable allocation of pod IPs using IP aliases.
315#
316# BETA FEATURE.
317#
318# IP_ALIAS_SIZE is the size of the podCIDR allocated to a node.
319# IP_ALIAS_SUBNETWORK is the subnetwork to allocate from. If empty, a
320# new subnetwork will be created for the cluster.
321ENABLE_IP_ALIASES=${KUBE_GCE_ENABLE_IP_ALIASES:-true}
322NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-RangeAllocator}
323if [ "${ENABLE_IP_ALIASES}" = true ]; then
324 # Number of Pods that can run on this node.
325 MAX_PODS_PER_NODE=${MAX_PODS_PER_NODE:-110}
326 # Size of ranges allocated to each node.
327 IP_ALIAS_SIZE="/$(get-alias-range-size "${MAX_PODS_PER_NODE}")"
328 export IP_ALIAS_SIZE
329 IP_ALIAS_SUBNETWORK=${KUBE_GCE_IP_ALIAS_SUBNETWORK:-${INSTANCE_PREFIX}-subnet-default}
330 # If we're using custom network, use the subnet we already create for it as the one for ip-alias.
331 # Note that this means SUBNETWORK would override KUBE_GCE_IP_ALIAS_SUBNETWORK in case of custom network.
332 if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
333 export IP_ALIAS_SUBNETWORK="${SUBNETWORK}"
334 fi
335 export NODE_IPAM_MODE=${KUBE_GCE_NODE_IPAM_MODE:-CloudAllocator}
336 SECONDARY_RANGE_NAME=${SECONDARY_RANGE_NAME:-}
337 # Add to the provider custom variables.
338 PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_IP_ALIASES"
339 PROVIDER_VARS="${PROVIDER_VARS:-} NODE_IPAM_MODE"
340 PROVIDER_VARS="${PROVIDER_VARS:-} SECONDARY_RANGE_NAME"
341else
342 if [[ -n "${MAX_PODS_PER_NODE:-}" ]]; then
343 # Should not have MAX_PODS_PER_NODE set for route-based clusters.
344 echo -e "${color_red:-}Cannot set MAX_PODS_PER_NODE for route-based projects for ${PROJECT}." >&2
345 exit 1
346 fi
347 if [[ "$(get-num-nodes)" -gt 100 ]]; then
348 echo -e "${color_red:-}Cannot create cluster with more than 100 nodes for route-based projects for ${PROJECT}." >&2
349 exit 1
350 fi
351fi
352
353# Enable GCE Alpha features.
354if [[ -n "${GCE_ALPHA_FEATURES:-}" ]]; then
355 PROVIDER_VARS="${PROVIDER_VARS:-} GCE_ALPHA_FEATURES"
356fi
357
358# Disable Docker live-restore.
359if [[ -n "${DISABLE_DOCKER_LIVE_RESTORE:-}" ]]; then
360 PROVIDER_VARS="${PROVIDER_VARS:-} DISABLE_DOCKER_LIVE_RESTORE"
361fi
362
363# Override default GLBC image
364if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
365 PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
366fi
367CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
368
369# Admission Controllers to invoke prior to persisting objects in cluster
370ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,RuntimeClass
371
372# MutatingAdmissionWebhook should be the last controller that modifies the
373# request object, otherwise users will be confused if the mutating webhooks'
374# modification is overwritten.
375ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook"
376
377# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
378ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
379
380# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
381KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
382
383# Storage backend. 'etcd2' supported, 'etcd3' experimental.
384STORAGE_BACKEND=${STORAGE_BACKEND:-}
385
386# Networking plugin specific settings.
387NETWORK_PROVIDER="${NETWORK_PROVIDER:-kubenet}" # none, kubenet
388
389# Network Policy plugin specific settings.
390NETWORK_POLICY_PROVIDER="${NETWORK_POLICY_PROVIDER:-none}" # calico
391
392export NON_MASQUERADE_CIDR="0.0.0.0/0"
393
394# How should the kubelet configure hairpin mode?
395HAIRPIN_MODE="${HAIRPIN_MODE:-hairpin-veth}" # promiscuous-bridge, hairpin-veth, none
396# Optional: if set to true, kube-up will configure the cluster to run e2e tests.
397export E2E_STORAGE_TEST_ENVIRONMENT="${KUBE_E2E_STORAGE_TEST_ENVIRONMENT:-false}"
398
399# Evict pods whenever compute resource availability on the nodes gets below a threshold.
400EVICTION_HARD="${EVICTION_HARD:-memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%}"
401
402# Optional: custom scheduling algorithm
403SCHEDULING_ALGORITHM_PROVIDER="${SCHEDULING_ALGORITHM_PROVIDER:-}"
404
405# Optional: install a default StorageClass
406ENABLE_DEFAULT_STORAGE_CLASS="${ENABLE_DEFAULT_STORAGE_CLASS:-false}"
407
408# Optional: install volume snapshot CRDs
409ENABLE_VOLUME_SNAPSHOTS="${ENABLE_VOLUME_SNAPSHOTS:-true}"
410
411# Optional: Enable legacy ABAC policy that makes all service accounts superusers.
412ENABLE_LEGACY_ABAC="${ENABLE_LEGACY_ABAC:-false}" # true, false
413
414# Indicates if the values (i.e. KUBE_USER and KUBE_PASSWORD for basic
415# authentication) in metadata should be treated as canonical, and therefore disk
416# copies ought to be recreated/clobbered.
417METADATA_CLOBBERS_CONFIG="${METADATA_CLOBBERS_CONFIG:-false}"
418
419ENABLE_BIG_CLUSTER_SUBNETS="${ENABLE_BIG_CLUSTER_SUBNETS:-false}"
420
421# Optional: Enable log rotation for k8s services
422ENABLE_LOGROTATE_FILES="${ENABLE_LOGROTATE_FILES:-true}"
423PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_LOGROTATE_FILES"
424if [[ -n "${LOGROTATE_FILES_MAX_COUNT:-}" ]]; then
425 PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_FILES_MAX_COUNT"
426fi
427if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
428 PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE"
429fi
430
431# Optional: Enable log rotation for pod logs
432ENABLE_POD_LOG="${ENABLE_POD_LOG:-false}"
433PROVIDER_VARS="${PROVIDER_VARS:-} ENABLE_POD_LOG"
434
435if [[ -n "${POD_LOG_MAX_FILE:-}" ]]; then
436 PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_FILE"
437fi
438
439if [[ -n "${POD_LOG_MAX_SIZE:-}" ]]; then
440 PROVIDER_VARS="${PROVIDER_VARS:-} POD_LOG_MAX_SIZE"
441fi
442
443# Fluentd requirements
444# YAML exists to trigger a configuration refresh when changes are made.
445export FLUENTD_GCP_YAML_VERSION="v3.2.0"
446FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-1.6.17}"
447FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
448FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
449FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
450
451# Heapster requirements
452HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"
453HEAPSTER_GCP_MEMORY_PER_NODE="${HEAPSTER_GCP_MEMORY_PER_NODE:-4}"
454HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
455HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
456
457# Default Stackdriver resources version exported by Fluentd-gcp addon
458LOGGING_STACKDRIVER_RESOURCE_TYPES="${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}"
459
460# Adding to PROVIDER_VARS, since this is GCP-specific.
461PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_YAML_VERSION FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE LOGGING_STACKDRIVER_RESOURCE_TYPES"
462
463# Fluentd configuration for node-journal
464ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
465
466# prometheus-to-sd configuration
467PROMETHEUS_TO_SD_ENDPOINT="${PROMETHEUS_TO_SD_ENDPOINT:-https://monitoring.googleapis.com/}"
468PROMETHEUS_TO_SD_PREFIX="${PROMETHEUS_TO_SD_PREFIX:-custom.googleapis.com}"
469ENABLE_PROMETHEUS_TO_SD="${ENABLE_PROMETHEUS_TO_SD:-false}"
470
471# TODO(#51292): Make kube-proxy Daemonset default and remove the configuration here.
472# Optional: [Experiment Only] Run kube-proxy as a DaemonSet if set to true, run as static pods otherwise.
473KUBE_PROXY_DAEMONSET="${KUBE_PROXY_DAEMONSET:-false}" # true, false
474
475# Control whether the startup scripts manage the lifecycle of kube-proxy
476# When true, the startup scripts do not enable kube-proxy either as a daemonset addon or as a static pod
477# regardless of the value of KUBE_PROXY_DAEMONSET.
478# When false, the value of KUBE_PROXY_DAEMONSET controls whether kube-proxy comes up as a static pod or
479# as an addon daemonset.
480KUBE_PROXY_DISABLE="${KUBE_PROXY_DISABLE:-false}" # true, false
481
482# Will be passed into the kube-proxy via `--detect-local-mode`
483DETECT_LOCAL_MODE="${DETECT_LOCAL_MODE:-}"
484
485# Optional: duration of cluster signed certificates.
486CLUSTER_SIGNING_DURATION="${CLUSTER_SIGNING_DURATION:-}"
487
488# Optional: enable certificate rotation of the kubelet certificates.
489ROTATE_CERTIFICATES="${ROTATE_CERTIFICATES:-}"
490
491# The number of services that are allowed to sync concurrently. Will be passed
492# into kube-controller-manager via `--concurrent-service-syncs`
493CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-5}"
494
495export SERVICEACCOUNT_ISSUER="https://kubernetes.io/${CLUSTER_NAME}"
496
497# Taint Windows nodes by default to prevent Linux workloads from being
498# scheduled onto them.
499WINDOWS_NODE_TAINTS="${WINDOWS_NODE_TAINTS:-node.kubernetes.io/os=win1809:NoSchedule}"
500
501# Whether to set up a private GCE cluster, i.e. a cluster where nodes have only private IPs.
502export GCE_PRIVATE_CLUSTER="${KUBE_GCE_PRIVATE_CLUSTER:-false}"
503export GCE_PRIVATE_CLUSTER_PORTS_PER_VM="${KUBE_GCE_PRIVATE_CLUSTER_PORTS_PER_VM:-}"
504
505# When KUBE_ENABLE_KONNECTIVITY_SERVICE is enabled, the three variables below will
506# default to true to enable the konnectivity network proxy and start the required pods.
507# Their values can be overridden for more granular control of the proxy.
508
509# Optional: Whether to do the setup for the konnectivity service
510# Includes setting up kubeconfig, tokens, egress files, and firewall rules
511export PREPARE_KONNECTIVITY_SERVICE="${KUBE_ENABLE_KONNECTIVITY_SERVICE:-true}"
512# Optional: Whether to use konnectivity network proxy for all egress from apiserver.
513export EGRESS_VIA_KONNECTIVITY="${KUBE_ENABLE_KONNECTIVITY_SERVICE:-true}"
514# Optional: Whether to start the konnectivity server and agent pods.
515export RUN_KONNECTIVITY_PODS="${KUBE_ENABLE_KONNECTIVITY_SERVICE:-true}"
516# Proxy Protocol Mode determines the protocol to use to communicate between apiserver and network proxy.
517# Valid options are grpc and http-connect. Default is grpc.
518export KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE="${KUBE_KONNECTIVITY_SERVICE_PROXY_PROTOCOL_MODE:-grpc}"
519
520# Optional: Enable Windows CSI-Proxy
521export ENABLE_CSI_PROXY="${ENABLE_CSI_PROXY:-true}"
522
523# KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP decides whether
524# kube-apiserver is healthchecked on host IP instead of 127.0.0.1.
525export KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP="${KUBE_APISERVER_HEALTHCHECK_ON_HOST_IP:-false}"
526
527# ETCD_PROGRESS_NOTIFY_INTERVAL defines the interval for etcd watch progress notify events.
528export ETCD_PROGRESS_NOTIFY_INTERVAL="${ETCD_PROGRESS_NOTIFY_INTERVAL:-5s}"
529
530# Optional: Install Pigz on Windows.
531# Pigz is a multi-core optimized version of unzip.exe.
532# It improves container image pull performance since most time is spent
533# unzipping the image layers to disk.
534export WINDOWS_ENABLE_PIGZ="${WINDOWS_ENABLE_PIGZ:-true}"
535
536# Enable Windows DSR (Direct Server Return)
537export WINDOWS_ENABLE_DSR="${WINDOWS_ENABLE_DSR:-false}"
538
539# Install Node Problem Detector (NPD) on Windows nodes.
540# NPD analyzes the host for problems that can disrupt workloads.
541export WINDOWS_ENABLE_NODE_PROBLEM_DETECTOR="${WINDOWS_ENABLE_NODE_PROBLEM_DETECTOR:-none}"
542export WINDOWS_NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS="${WINDOWS_NODE_PROBLEM_DETECTOR_CUSTOM_FLAGS:-}"
543
544# Enable Windows Hyper-V
545# sig-storage uses it to create Virtual Hard Disks in tests
546export WINDOWS_ENABLE_HYPERV="${WINDOWS_ENABLE_HYPERV:-false}"
547
548# TLS_CIPHER_SUITES defines cipher suites allowed to be used by kube-apiserver.
549# If this variable is unset or empty, kube-apiserver will allow its default set of cipher suites.
550export TLS_CIPHER_SUITES=""
551
552# CLOUD_PROVIDER_FLAG defines the cloud-provider value presented to KCM, apiserver,
553# and kubelet
554export CLOUD_PROVIDER_FLAG="${CLOUD_PROVIDER_FLAG:-external}"
555
556# Don't run the node-ipam-controller on the KCM if cloud-provider external
557if [[ "${CLOUD_PROVIDER_FLAG}" == "external" ]]; then
558 RUN_CONTROLLERS="${RUN_CONTROLLERS:-*,-node-ipam-controller}"
559fi
560
561# When ENABLE_AUTH_PROVIDER_GCP is set, following flags for out-of-tree credential provider for GCP
562# are presented to kubelet:
563# --image-credential-provider-config=${path-to-config}
564# --image-credential-provider-bin-dir=${path-to-auth-provider-binary}
565# Also, it is required that DisableKubeletCloudCredentialProviders
566# feature gates are set to true for kubelet to use external credential provider.
567export ENABLE_AUTH_PROVIDER_GCP="${ENABLE_AUTH_PROVIDER_GCP:-true}"
View as plain text