kind: Namespace apiVersion: v1 metadata: name: linkerd-cni labels: linkerd.io/cni-resource: "true" config.linkerd.io/admission-webhooks: disabled pod-security.kubernetes.io/enforce: privileged --- apiVersion: v1 kind: ServiceAccount metadata: name: linkerd-cni namespace: linkerd-cni labels: linkerd.io/cni-resource: "true" --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: linkerd-cni labels: linkerd.io/cni-resource: "true" rules: - apiGroups: [""] resources: ["pods", "nodes", "namespaces", "services"] verbs: ["list", "get", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: linkerd-cni labels: linkerd.io/cni-resource: "true" roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: linkerd-cni subjects: - kind: ServiceAccount name: linkerd-cni namespace: linkerd-cni --- kind: ConfigMap apiVersion: v1 metadata: name: linkerd-cni-config namespace: linkerd-cni labels: linkerd.io/cni-resource: "true" data: dest_cni_net_dir: "/etc/kubernetes/cni/net.d" dest_cni_bin_dir: "/opt/my-cni/bin" # The CNI network configuration to install on each node. The special # values in this config will be automatically populated. # iptables-mode and ipv6 flags are only considered as of linkerd-cni v1.4.0 cni_network_config: |- { "name": "linkerd-cni", "type": "linkerd-cni", "log_level": "debug", "policy": { "type": "k8s", "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" }, "kubernetes": { "kubeconfig": "__KUBECONFIG_FILEPATH__" }, "linkerd": { "incoming-proxy-port": 5143, "outgoing-proxy-port": 5140, "proxy-uid": 12102, "proxy-gid": 12102, "ports-to-redirect": [], "inbound-ports-to-ignore": ["4191","4190"], "simulate": false, "use-wait-flag": false, "iptables-mode": "legacy", "ipv6": false } } --- kind: DaemonSet apiVersion: apps/v1 metadata: name: linkerd-cni namespace: linkerd-cni labels: k8s-app: linkerd-cni linkerd.io/cni-resource: "true" annotations: linkerd.io/created-by: linkerd/cli dev-undefined spec: revisionHistoryLimit: 10 selector: matchLabels: k8s-app: linkerd-cni updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 template: metadata: labels: k8s-app: linkerd-cni linkerd.io/cni-resource: "true" annotations: linkerd.io/created-by: linkerd/cli dev-undefined linkerd.io/inject: disabled spec: tolerations: - operator: Exists nodeSelector: kubernetes.io/os: linux securityContext: seccompProfile: type: RuntimeDefault serviceAccountName: linkerd-cni priorityClassName: system-node-critical containers: # This container installs the linkerd CNI binaries # and CNI network config file on each node. The install # script copies the files into place and then sleeps so # that Kubernetes doesn't keep trying to restart it. - name: install-cni image: my-docker-registry.io/awesome/cni-plugin-test-image:v1.4.0 imagePullPolicy: env: - name: DEST_CNI_NET_DIR valueFrom: configMapKeyRef: name: linkerd-cni-config key: dest_cni_net_dir - name: DEST_CNI_BIN_DIR valueFrom: configMapKeyRef: name: linkerd-cni-config key: dest_cni_bin_dir - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: linkerd-cni-config key: cni_network_config - name: SLEEP value: "true" lifecycle: # In some edge-cases this helps ensure that cleanup() is called in the container's script # https://github.com/linkerd/linkerd2/issues/2355 preStop: exec: command: - /bin/sh - -c - kill -15 1; sleep 15s volumeMounts: - mountPath: /host/opt/my-cni/bin name: cni-bin-dir - mountPath: /host/etc/kubernetes/cni/net.d name: cni-net-dir - mountPath: /tmp name: linkerd-tmp-dir securityContext: readOnlyRootFilesystem: true privileged: false resources: volumes: - name: cni-bin-dir hostPath: path: /opt/my-cni/bin - name: cni-net-dir hostPath: path: /etc/kubernetes/cni/net.d - name: linkerd-tmp-dir emptyDir: {} ---