{{- /* Copyright 2017 CNI authors Modifications copyright (c) Linkerd authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file was inspired by 1) https://github.com/istio/cni/blob/c63a509539b5ed165a6617548c31b686f13c2133/deployments/kubernetes/install/manifests/istio-cni.yaml */ -}} {{- if eq .Release.Service "CLI" -}} kind: Namespace apiVersion: v1 metadata: name: {{.Release.Namespace}} labels: linkerd.io/cni-resource: "true" config.linkerd.io/admission-webhooks: disabled pod-security.kubernetes.io/enforce: privileged --- {{ end -}} apiVersion: v1 kind: ServiceAccount metadata: name: linkerd-cni namespace: {{ .Release.Namespace }} labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} {{- if .Values.imagePullSecrets }} imagePullSecrets: {{ toYaml .Values.imagePullSecrets | indent 2 }} {{- end }} {{ if .Values.enablePSP -}} --- apiVersion: policy/v1beta1 kind: PodSecurityPolicy metadata: name: linkerd-{{.Release.Namespace}}-cni annotations: seccomp.security.alpha.kubernetes.io/allowedProfileNames: "runtime/default" labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} spec: {{- if not .Values.privileged }} allowPrivilegeEscalation: false {{- end }} fsGroup: rule: RunAsAny runAsUser: rule: RunAsAny runAsGroup: rule: RunAsAny seLinux: rule: RunAsAny supplementalGroups: rule: RunAsAny volumes: - hostPath - secret - emptyDir --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: linkerd-cni namespace: {{ .Release.Namespace }} labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} rules: - apiGroups: ['extensions', 'policy'] resources: ['podsecuritypolicies'] resourceNames: - linkerd-{{.Release.Namespace}}-cni verbs: ['use'] --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: linkerd-cni namespace: {{ .Release.Namespace }} labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: linkerd-cni subjects: - kind: ServiceAccount name: linkerd-cni namespace: {{.Release.Namespace}} {{ end -}} --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-cni labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] {{- if .Values.repairController.enabled }} - apiGroups: [""] resources: ["pods"] verbs: ["delete"] - apiGroups: ["events.k8s.io"] resources: ["events"] verbs: ["create"] {{- end }} --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: linkerd-cni labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: linkerd-cni subjects: - kind: ServiceAccount name: linkerd-cni namespace: {{.Release.Namespace}} --- kind: ConfigMap apiVersion: v1 metadata: name: linkerd-cni-config namespace: {{ .Release.Namespace }} labels: linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} data: dest_cni_net_dir: "{{.Values.destCNINetDir}}" dest_cni_bin_dir: "{{.Values.destCNIBinDir}}" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. # iptables-mode and ipv6 flags are only considered as of linkerd-cni v1.4.0 cni_network_config: |- { "name": "linkerd-cni", "type": "linkerd-cni", "log_level": "{{.Values.logLevel}}", "policy": { "type": "k8s", "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" }, "linkerd": { "incoming-proxy-port": {{.Values.inboundProxyPort}}, "outgoing-proxy-port": {{.Values.outboundProxyPort}}, "proxy-uid": {{.Values.proxyUID}}, {{- if ge (int .Values.proxyGID) 0 }} "proxy-gid": {{.Values.proxyGID}}, {{- end }} "ports-to-redirect": [{{.Values.portsToRedirect}}], "inbound-ports-to-ignore": ["{{- .Values.proxyAdminPort }}","{{ .Values.proxyControlPort }}" {{- if .Values.ignoreInboundPorts }},{{- include "partials.splitStringList" .Values.ignoreInboundPorts -}}{{- end }}], {{- if .Values.ignoreOutboundPorts }} "outbound-ports-to-ignore": [ {{- include "partials.splitStringList" .Values.ignoreOutboundPorts -}} ], {{- end }} "simulate": false, "use-wait-flag": {{.Values.useWaitFlag}}, "iptables-mode": {{.Values.iptablesMode | quote}}, "ipv6": {{.Values.disableIPv6 | ternary "false" "true"}} } } --- kind: DaemonSet apiVersion: apps/v1 metadata: name: linkerd-cni namespace: {{ .Release.Namespace }} labels: k8s-app: linkerd-cni linkerd.io/cni-resource: "true" {{- with .Values.commonLabels }}{{ toYaml . | trim | nindent 4 }}{{- end }} annotations: {{ include "partials.annotations.created-by" . }} spec: revisionHistoryLimit: {{.Values.revisionHistoryLimit}} selector: matchLabels: k8s-app: linkerd-cni updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: linkerd-cni linkerd.io/cni-resource: "true" {{- with .Values.podLabels }}{{ toYaml . | trim | nindent 8 }}{{- end }} annotations: {{ include "partials.annotations.created-by" . }} linkerd.io/inject: disabled spec: {{- if .Values.tolerations }} {{- include "linkerd.tolerations" . | nindent 6 }} {{- end }} nodeSelector: kubernetes.io/os: linux {{- if .Values.nodeAffinity }} affinity: {{- include "linkerd.node-affinity" . | nindent 8 }} {{- end }} securityContext: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} {{- if .Values.extraInitContainers }} initContainers: {{- toYaml .Values.extraInitContainers | nindent 6 }} {{- end }} containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install # script copies the files into place and then sleeps so # that Kubernetes doesn't keep trying to restart it. - name: install-cni image: {{ .Values.image.name -}}:{{- .Values.image.version }} imagePullPolicy: {{ .Values.image.pullPolicy }} env: - name: DEST_CNI_NET_DIR valueFrom: configMapKeyRef: name: linkerd-cni-config key: dest_cni_net_dir - name: DEST_CNI_BIN_DIR valueFrom: configMapKeyRef: name: linkerd-cni-config key: dest_cni_bin_dir - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: linkerd-cni-config key: cni_network_config - name: SLEEP value: "true" lifecycle: # In some edge-cases this helps ensure that cleanup() is called in the container's script # https://github.com/linkerd/linkerd2/issues/2355 preStop: exec: command: - /bin/sh - -c - kill -15 1; sleep 15s volumeMounts: {{- if ne .Values.destCNIBinDir .Values.destCNINetDir }} - mountPath: /host{{.Values.destCNIBinDir}} name: cni-bin-dir - mountPath: /host{{.Values.destCNINetDir}} name: cni-net-dir {{- else }} - mountPath: /host{{.Values.destCNINetDir}} name: cni-net-dir {{- end }} - mountPath: /tmp name: linkerd-tmp-dir securityContext: readOnlyRootFilesystem: true privileged: {{.Values.privileged}} {{- if .Values.resources }} {{- include "partials.resources" .Values.resources | nindent 8 }} {{- end }} {{- if .Values.repairController.enabled }} # This container watches over pods whose linkerd-network-validator # container failed, probably because of a race condition while setting up # the CNI plugin chain, and deletes those pods so they can try acquiring a # proper network config again - name: repair-controller image: {{ .Values.image.name -}}:{{- .Values.image.version }} imagePullPolicy: {{ .Values.image.pullPolicy }} {{- if .Values.repairController.enableSecurityContext }} env: - name: LINKERD_CNI_REPAIR_CONTROLLER_NODE_NAME valueFrom: fieldRef: fieldPath: spec.nodeName - name: LINKERD_CNI_REPAIR_CONTROLLER_POD_NAME valueFrom: fieldRef: fieldPath: metadata.name command: - /usr/lib/linkerd/linkerd-cni-repair-controller args: - --admin-addr=0.0.0.0:9990 - --log-format - {{ .Values.repairController.logFormat }} - --log-level - {{ .Values.repairController.logLevel }} livenessProbe: httpGet: path: /live port: admin-http readinessProbe: failureThreshold: 7 httpGet: path: /ready port: admin-http initialDelaySeconds: 10 ports: - containerPort: 9990 name: admin-http securityContext: allowPrivilegeEscalation: false capabilities: drop: - ALL privileged: false readOnlyRootFilesystem: true seccompProfile: type: RuntimeDefault {{- end }} {{- if .Values.repairController.resources }} {{- include "partials.resources" .Values.repairController.resources | nindent 8 }} {{- end }} {{- end }} volumes: {{- if ne .Values.destCNIBinDir .Values.destCNINetDir }} - name: cni-bin-dir hostPath: path: {{.Values.destCNIBinDir}} - name: cni-net-dir hostPath: path: {{.Values.destCNINetDir}} {{- else }} - name: cni-net-dir hostPath: path: {{.Values.destCNINetDir}} {{- end }} - name: linkerd-tmp-dir emptyDir: {}