...

Source file src/github.com/linkerd/linkerd2/pkg/healthcheck/healthcheck_test.go

Documentation: github.com/linkerd/linkerd2/pkg/healthcheck

     1  package healthcheck
     2  
     3  import (
     4  	"context"
     5  	"encoding/base64"
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"strings"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/go-test/deep"
    14  	"github.com/linkerd/linkerd2/pkg/charts/linkerd2"
    15  	"github.com/linkerd/linkerd2/pkg/identity"
    16  	"github.com/linkerd/linkerd2/pkg/issuercerts"
    17  	"github.com/linkerd/linkerd2/pkg/k8s"
    18  	"github.com/linkerd/linkerd2/pkg/tls"
    19  	corev1 "k8s.io/api/core/v1"
    20  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    21  )
    22  
    23  type observer struct {
    24  	results []string
    25  }
    26  
    27  func newObserver() *observer {
    28  	return &observer{
    29  		results: []string{},
    30  	}
    31  }
    32  func (o *observer) resultFn(result *CheckResult) {
    33  	res := fmt.Sprintf("%s %s", result.Category, result.Description)
    34  	if result.Err != nil {
    35  		res += fmt.Sprintf(": %s", result.Err)
    36  	}
    37  	o.results = append(o.results, res)
    38  }
    39  
    40  func (o *observer) resultWithHintFn(result *CheckResult) {
    41  	res := fmt.Sprintf("%s %s", result.Category, result.Description)
    42  	if result.Err != nil {
    43  		res += fmt.Sprintf(": %s", result.Err)
    44  	}
    45  
    46  	if result.HintURL != "" {
    47  		res += fmt.Sprintf(": %s", result.HintURL)
    48  	}
    49  	o.results = append(o.results, res)
    50  }
    51  
    52  func (hc *HealthChecker) addCheckAsCategory(
    53  	testCategoryID CategoryID,
    54  	categoryID CategoryID,
    55  	desc string,
    56  ) {
    57  	testCategory := NewCategory(
    58  		testCategoryID,
    59  		[]Checker{},
    60  		false,
    61  	)
    62  
    63  	for _, cat := range hc.categories {
    64  		if cat.ID == categoryID {
    65  			for _, ch := range cat.checkers {
    66  				if ch.description == desc {
    67  					testCategory.checkers = append(testCategory.checkers, ch)
    68  					testCategory.enabled = true
    69  					break
    70  				}
    71  			}
    72  			break
    73  		}
    74  	}
    75  	hc.AppendCategories(testCategory)
    76  }
    77  
    78  func TestHealthChecker(t *testing.T) {
    79  	nullObserver := func(*CheckResult) {}
    80  
    81  	passingCheck1 := NewCategory(
    82  		"cat1",
    83  		[]Checker{
    84  			{
    85  				description: "desc1",
    86  				check: func(context.Context) error {
    87  					return nil
    88  				},
    89  				retryDeadline: time.Time{},
    90  			},
    91  		},
    92  		true,
    93  	)
    94  
    95  	passingCheck2 := NewCategory(
    96  		"cat2",
    97  		[]Checker{
    98  			{
    99  				description: "desc2",
   100  				check: func(context.Context) error {
   101  					return nil
   102  				},
   103  				retryDeadline: time.Time{},
   104  			},
   105  		},
   106  		true,
   107  	)
   108  
   109  	failingCheck := NewCategory(
   110  		"cat3",
   111  		[]Checker{
   112  			{
   113  				description: "desc3",
   114  				check: func(context.Context) error {
   115  					return fmt.Errorf("error")
   116  				},
   117  				retryDeadline: time.Time{},
   118  			},
   119  		},
   120  		true,
   121  	)
   122  
   123  	fatalCheck := NewCategory(
   124  		"cat6",
   125  		[]Checker{
   126  			{
   127  				description: "desc6",
   128  				fatal:       true,
   129  				check: func(context.Context) error {
   130  					return fmt.Errorf("fatal")
   131  				},
   132  				retryDeadline: time.Time{},
   133  			},
   134  		},
   135  		true,
   136  	)
   137  
   138  	skippingCheck := NewCategory(
   139  		"cat7",
   140  		[]Checker{
   141  			{
   142  				description: "skip",
   143  				check: func(context.Context) error {
   144  					return SkipError{Reason: "needs skipping"}
   145  				},
   146  				retryDeadline: time.Time{},
   147  			},
   148  		},
   149  		true,
   150  	)
   151  
   152  	skippingRPCCheck := NewCategory(
   153  		"cat8",
   154  		[]Checker{
   155  			{
   156  				description: "skipRpc",
   157  				check: func(context.Context) error {
   158  					return SkipError{Reason: "needs skipping"}
   159  				},
   160  				retryDeadline: time.Time{},
   161  			},
   162  		},
   163  		true,
   164  	)
   165  
   166  	troubleshootingCheck := NewCategory(
   167  		"cat9",
   168  		[]Checker{
   169  			{
   170  				description: "failCheck",
   171  				hintAnchor:  "cat9",
   172  				check: func(context.Context) error {
   173  					return fmt.Errorf("fatal")
   174  				},
   175  			},
   176  		},
   177  		true,
   178  	)
   179  
   180  	t.Run("Notifies observer of all results", func(t *testing.T) {
   181  		hc := NewHealthChecker(
   182  			[]CategoryID{},
   183  			&Options{},
   184  		)
   185  
   186  		hc.AppendCategories(passingCheck1)
   187  		hc.AppendCategories(passingCheck2)
   188  		hc.AppendCategories(failingCheck)
   189  
   190  		expectedResults := []string{
   191  			"cat1 desc1",
   192  			"cat2 desc2",
   193  			"cat3 desc3: error",
   194  		}
   195  
   196  		obs := newObserver()
   197  		hc.RunChecks(obs.resultFn)
   198  
   199  		if diff := deep.Equal(obs.results, expectedResults); diff != nil {
   200  			t.Fatalf("%+v", diff)
   201  		}
   202  	})
   203  
   204  	t.Run("Is successful if all checks were successful", func(t *testing.T) {
   205  		hc := NewHealthChecker(
   206  			[]CategoryID{},
   207  			&Options{},
   208  		)
   209  		hc.AppendCategories(passingCheck1)
   210  		hc.AppendCategories(passingCheck2)
   211  
   212  		success, _ := hc.RunChecks(nullObserver)
   213  
   214  		if !success {
   215  			t.Fatalf("Expecting checks to be successful, but got [%t]", success)
   216  		}
   217  	})
   218  
   219  	t.Run("Is not successful if one check fails", func(t *testing.T) {
   220  		hc := NewHealthChecker(
   221  			[]CategoryID{},
   222  			&Options{},
   223  		)
   224  		hc.AppendCategories(passingCheck1)
   225  		hc.AppendCategories(failingCheck)
   226  		hc.AppendCategories(passingCheck2)
   227  
   228  		success, _ := hc.RunChecks(nullObserver)
   229  
   230  		if success {
   231  			t.Fatalf("Expecting checks to not be successful, but got [%t]", success)
   232  		}
   233  	})
   234  
   235  	t.Run("Check for troubleshooting URL", func(t *testing.T) {
   236  		hc := NewHealthChecker(
   237  			[]CategoryID{},
   238  			&Options{},
   239  		)
   240  		troubleshootingCheck.WithHintBaseURL("www.extension.com/troubleshooting/#")
   241  		hc.AppendCategories(troubleshootingCheck)
   242  		expectedResults := []string{
   243  			"cat9 failCheck: fatal: www.extension.com/troubleshooting/#cat9",
   244  		}
   245  
   246  		obs := newObserver()
   247  		hc.RunChecks(obs.resultWithHintFn)
   248  
   249  		if diff := deep.Equal(obs.results, expectedResults); diff != nil {
   250  			t.Fatalf("%+v", diff)
   251  		}
   252  	})
   253  
   254  	t.Run("Does not run remaining check if fatal check fails", func(t *testing.T) {
   255  		hc := NewHealthChecker(
   256  			[]CategoryID{},
   257  			&Options{},
   258  		)
   259  		hc.AppendCategories(passingCheck1)
   260  		hc.AppendCategories(fatalCheck)
   261  		hc.AppendCategories(passingCheck2)
   262  
   263  		expectedResults := []string{
   264  			"cat1 desc1",
   265  			"cat6 desc6: fatal",
   266  		}
   267  
   268  		obs := newObserver()
   269  		hc.RunChecks(obs.resultFn)
   270  
   271  		if diff := deep.Equal(obs.results, expectedResults); diff != nil {
   272  			t.Fatalf("%+v", diff)
   273  		}
   274  	})
   275  
   276  	t.Run("Retries checks if retry is specified", func(t *testing.T) {
   277  		retryWindow = 0
   278  		returnError := true
   279  
   280  		retryCheck := NewCategory(
   281  			"cat7",
   282  			[]Checker{
   283  				{
   284  					description:   "desc7",
   285  					retryDeadline: time.Now().Add(100 * time.Second),
   286  					check: func(context.Context) error {
   287  						if returnError {
   288  							returnError = false
   289  							return fmt.Errorf("retry")
   290  						}
   291  						return nil
   292  					},
   293  				},
   294  			},
   295  			true,
   296  		)
   297  
   298  		hc := NewHealthChecker(
   299  			[]CategoryID{},
   300  			&Options{},
   301  		)
   302  		hc.AppendCategories(passingCheck1)
   303  		hc.AppendCategories(retryCheck)
   304  
   305  		observedResults := make([]string, 0)
   306  		observer := func(result *CheckResult) {
   307  			res := fmt.Sprintf("%s %s retry=%t", result.Category, result.Description, result.Retry)
   308  			if result.Err != nil {
   309  				res += fmt.Sprintf(": %s", result.Err)
   310  			}
   311  			observedResults = append(observedResults, res)
   312  		}
   313  
   314  		expectedResults := []string{
   315  			"cat1 desc1 retry=false",
   316  			"cat7 desc7 retry=true: waiting for check to complete",
   317  			"cat7 desc7 retry=false",
   318  		}
   319  
   320  		hc.RunChecks(observer)
   321  
   322  		if diff := deep.Equal(observedResults, expectedResults); diff != nil {
   323  			t.Fatalf("%+v", diff)
   324  		}
   325  	})
   326  
   327  	t.Run("Does not notify observer of skipped checks", func(t *testing.T) {
   328  		hc := NewHealthChecker(
   329  			[]CategoryID{},
   330  			&Options{},
   331  		)
   332  		hc.AppendCategories(passingCheck1)
   333  		hc.AppendCategories(skippingCheck)
   334  		hc.AppendCategories(skippingRPCCheck)
   335  
   336  		expectedResults := []string{
   337  			"cat1 desc1",
   338  		}
   339  
   340  		obs := newObserver()
   341  		hc.RunChecks(obs.resultFn)
   342  
   343  		if diff := deep.Equal(obs.results, expectedResults); diff != nil {
   344  			t.Fatalf("%+v", diff)
   345  		}
   346  	})
   347  }
   348  
   349  func TestCheckCanCreate(t *testing.T) {
   350  	exp := fmt.Errorf("not authorized to access deployments.apps")
   351  
   352  	hc := NewHealthChecker(
   353  		[]CategoryID{},
   354  		&Options{},
   355  	)
   356  	var err error
   357  	hc.kubeAPI, err = k8s.NewFakeAPI()
   358  	if err != nil {
   359  		t.Fatalf("Unexpected error: %s", err)
   360  	}
   361  	err = hc.checkCanCreate(context.Background(), "", "apps", "v1", "deployments")
   362  	if err == nil ||
   363  		err.Error() != exp.Error() {
   364  		t.Fatalf("Unexpected error (Expected: %s, Got: %s)", exp, err)
   365  	}
   366  }
   367  
   368  func TestCheckExtensionAPIServerAuthentication(t *testing.T) {
   369  	tests := []struct {
   370  		k8sConfigs []string
   371  		err        error
   372  	}{
   373  		{
   374  			[]string{},
   375  			fmt.Errorf("configmaps %q not found", k8s.ExtensionAPIServerAuthenticationConfigMapName),
   376  		},
   377  		{
   378  			[]string{`
   379  apiVersion: v1
   380  kind: ConfigMap
   381  metadata:
   382   name: extension-apiserver-authentication
   383   namespace: kube-system
   384  data:
   385   foo : 'bar'
   386   `,
   387  			},
   388  			fmt.Errorf("--%s is not configured", k8s.ExtensionAPIServerAuthenticationRequestHeaderClientCAFileKey),
   389  		},
   390  		{
   391  
   392  			[]string{fmt.Sprintf(`
   393  apiVersion: v1
   394  kind: ConfigMap
   395  metadata:
   396   name: extension-apiserver-authentication
   397   namespace: kube-system
   398  data:
   399    %s : 'bar'
   400    `, k8s.ExtensionAPIServerAuthenticationRequestHeaderClientCAFileKey)},
   401  			nil,
   402  		},
   403  	}
   404  	for i, test := range tests {
   405  		test := test
   406  		t.Run(fmt.Sprintf("%d: returns expected extension apiserver authentication check result", i), func(t *testing.T) {
   407  			hc := NewHealthChecker([]CategoryID{}, &Options{})
   408  			var err error
   409  			hc.kubeAPI, err = k8s.NewFakeAPI(test.k8sConfigs...)
   410  			if err != nil {
   411  				t.Fatal(err)
   412  			}
   413  			err = hc.checkExtensionAPIServerAuthentication(context.Background())
   414  			if err != nil || test.err != nil {
   415  				if (err == nil && test.err != nil) ||
   416  					(err != nil && test.err == nil) ||
   417  					(err.Error() != test.err.Error()) {
   418  					t.Fatalf("Unexpected error (Expected: %s, Got: %s)", test.err, err)
   419  				}
   420  			}
   421  		})
   422  	}
   423  }
   424  
   425  func TestCheckClockSkew(t *testing.T) {
   426  	tests := []struct {
   427  		k8sConfigs []string
   428  		err        error
   429  	}{
   430  		{
   431  			[]string{},
   432  			nil,
   433  		},
   434  		{
   435  			[]string{`apiVersion: v1
   436  kind: Node
   437  metadata:
   438    name: test-node
   439  status:
   440    conditions:
   441    - lastHeartbeatTime: "2000-01-01T01:00:00Z"
   442      status: "True"
   443      type: Ready`,
   444  			},
   445  			fmt.Errorf("clock skew detected for node(s): test-node"),
   446  		},
   447  	}
   448  
   449  	for i, test := range tests {
   450  		test := test // pin
   451  		t.Run(fmt.Sprintf("%d: returns expected clock skew check result", i), func(t *testing.T) {
   452  			hc := NewHealthChecker(
   453  				[]CategoryID{},
   454  				&Options{},
   455  			)
   456  
   457  			var err error
   458  			hc.kubeAPI, err = k8s.NewFakeAPI(test.k8sConfigs...)
   459  			if err != nil {
   460  				t.Fatalf("Unexpected error: %s", err)
   461  			}
   462  
   463  			err = hc.checkClockSkew(context.Background())
   464  			if err != nil || test.err != nil {
   465  				if (err == nil && test.err != nil) ||
   466  					(err != nil && test.err == nil) ||
   467  					(err.Error() != test.err.Error()) {
   468  					t.Fatalf("Unexpected error (Expected: %s, Got: %s)", test.err, err)
   469  				}
   470  			}
   471  		})
   472  	}
   473  
   474  }
   475  
   476  func TestNamespaceExtCfg(t *testing.T) {
   477  	namespaces := map[string]string{
   478  		"vizOne": `
   479  apiVersion: v1
   480  kind: Namespace
   481  metadata:
   482    name: viz-1
   483    labels:
   484      linkerd.io/extension: viz
   485  `,
   486  		"mcOne": `
   487  apiVersion: v1
   488  kind: Namespace
   489  metadata:
   490    name: mc-1
   491    labels:
   492      linkerd.io/extension: multicluster
   493  `,
   494  		"mcTwo": `
   495  apiVersion: v1
   496  kind: Namespace
   497  metadata:
   498    name: mc-2
   499    labels:
   500      linkerd.io/extension: multicluster
   501  `}
   502  
   503  	testCases := []struct {
   504  		description string
   505  		k8sConfigs  []string
   506  		results     []string
   507  	}{
   508  		{
   509  			description: "successfully passes checks",
   510  			k8sConfigs:  []string{namespaces["vizOne"], namespaces["mcOne"]},
   511  			results: []string{
   512  				"linkerd-extension-checks namespace configuration for extensions",
   513  			},
   514  		},
   515  		{
   516  			description: "fails invalid configuration",
   517  			k8sConfigs:  []string{namespaces["vizOne"], namespaces["mcOne"], namespaces["mcTwo"]},
   518  			results: []string{
   519  				"linkerd-extension-checks namespace configuration for extensions: some extensions have invalid configuration\n\t* label \"linkerd.io/extension=multicluster\" is present on more than one namespace:\n\t\t* mc-1\n\t\t* mc-2",
   520  			},
   521  		},
   522  	}
   523  
   524  	for _, tc := range testCases {
   525  		// pin tc
   526  		tc := tc
   527  		t.Run(tc.description, func(t *testing.T) {
   528  			hc := NewHealthChecker(
   529  				[]CategoryID{LinkerdExtensionChecks},
   530  				&Options{
   531  					ControlPlaneNamespace: "test-ns",
   532  				},
   533  			)
   534  
   535  			var err error
   536  			hc.kubeAPI, err = k8s.NewFakeAPI(tc.k8sConfigs...)
   537  			if err != nil {
   538  				t.Fatalf("Unexpected error: %s", err)
   539  			}
   540  
   541  			obs := newObserver()
   542  			hc.RunChecks(obs.resultFn)
   543  			if diff := deep.Equal(obs.results, tc.results); diff != nil {
   544  				t.Fatalf("%+v", diff)
   545  			}
   546  		})
   547  	}
   548  }
   549  
   550  func TestConfigExists(t *testing.T) {
   551  
   552  	namespace := []string{`
   553  apiVersion: v1
   554  kind: Namespace
   555  metadata:
   556    name: test-ns
   557  `}
   558  	clusterRoles := []string{`
   559  kind: ClusterRole
   560  apiVersion: rbac.authorization.k8s.io/v1
   561  metadata:
   562    name: linkerd-test-ns-identity
   563    labels:
   564      linkerd.io/control-plane-ns: test-ns
   565  `,
   566  		`
   567  kind: ClusterRole
   568  apiVersion: rbac.authorization.k8s.io/v1
   569  metadata:
   570    name: linkerd-test-ns-proxy-injector
   571    labels:
   572      linkerd.io/control-plane-ns: test-ns
   573  `}
   574  	clusterRoleBindings := []string{`
   575  kind: ClusterRoleBinding
   576  apiVersion: rbac.authorization.k8s.io/v1
   577  metadata:
   578    name: linkerd-test-ns-identity
   579    labels:
   580      linkerd.io/control-plane-ns: test-ns
   581  `,
   582  		`
   583  kind: ClusterRoleBinding
   584  apiVersion: rbac.authorization.k8s.io/v1
   585  metadata:
   586    name: linkerd-test-ns-proxy-injector
   587    labels:
   588      linkerd.io/control-plane-ns: test-ns
   589  `}
   590  	serviceAccounts := []string{`
   591  kind: ServiceAccount
   592  apiVersion: v1
   593  metadata:
   594    name: linkerd-destination
   595    namespace: test-ns
   596    labels:
   597      linkerd.io/control-plane-ns: test-ns
   598  `,
   599  		`
   600  kind: ServiceAccount
   601  apiVersion: v1
   602  metadata:
   603    name: linkerd-identity
   604    namespace: test-ns
   605    labels:
   606      linkerd.io/control-plane-ns: test-ns
   607  `,
   608  		`
   609  kind: ServiceAccount
   610  apiVersion: v1
   611  metadata:
   612    name: linkerd-proxy-injector
   613    namespace: test-ns
   614    labels:
   615      linkerd.io/control-plane-ns: test-ns
   616  `,
   617  		`
   618  kind: ServiceAccount
   619  apiVersion: v1
   620  metadata:
   621    name: linkerd-heartbeat
   622    namespace: test-ns
   623    labels:
   624      linkerd.io/control-plane-ns: test-ns
   625  `}
   626  	crds := []string{}
   627  	for _, crd := range []struct{ name, version string }{
   628  		{name: "authorizationpolicies.policy.linkerd.io", version: "v1alpha1"},
   629  		{name: "meshtlsauthentications.policy.linkerd.io", version: "v1alpha1"},
   630  		{name: "networkauthentications.policy.linkerd.io", version: "v1alpha1"},
   631  		{name: "serverauthorizations.policy.linkerd.io", version: "v1beta1"},
   632  		{name: "servers.policy.linkerd.io", version: "v1beta1"},
   633  		{name: "serviceprofiles.linkerd.io", version: "v1alpha2"},
   634  	} {
   635  		crds = append(crds, fmt.Sprintf(`
   636  apiVersion: apiextensions.k8s.io/v1
   637  kind: CustomResourceDefinition
   638  metadata:
   639    name: %s
   640    labels:
   641      linkerd.io/control-plane-ns: test-ns
   642  spec:
   643    versions:
   644    - name: %s`, crd.name, crd.version))
   645  	}
   646  	mutatingWebhooks := []string{`
   647  apiVersion: admissionregistration.k8s.io/v1
   648  kind: MutatingWebhookConfiguration
   649  metadata:
   650    name: linkerd-proxy-injector-webhook-config
   651    labels:
   652      linkerd.io/control-plane-ns: test-ns
   653  `}
   654  	validatingWebhooks := []string{`
   655  apiVersion: admissionregistration.k8s.io/v1
   656  kind: ValidatingWebhookConfiguration
   657  metadata:
   658    name: linkerd-sp-validator-webhook-config
   659    labels:
   660      linkerd.io/control-plane-ns: test-ns
   661  `}
   662  
   663  	testCases := []struct {
   664  		k8sConfigs []string
   665  		results    []string
   666  	}{
   667  		{
   668  			[]string{},
   669  			[]string{"linkerd-config control plane Namespace exists: The \"test-ns\" namespace does not exist"},
   670  		},
   671  		{
   672  			namespace,
   673  			[]string{
   674  				"linkerd-config control plane Namespace exists",
   675  				"linkerd-config control plane ClusterRoles exist: missing ClusterRoles: linkerd-test-ns-identity, linkerd-test-ns-proxy-injector",
   676  			},
   677  		},
   678  		{
   679  			multiappend(
   680  				namespace,
   681  				clusterRoles,
   682  			),
   683  			[]string{
   684  				"linkerd-config control plane Namespace exists",
   685  				"linkerd-config control plane ClusterRoles exist",
   686  				"linkerd-config control plane ClusterRoleBindings exist: missing ClusterRoleBindings: linkerd-test-ns-identity, linkerd-test-ns-proxy-injector",
   687  			},
   688  		},
   689  		{
   690  			multiappend(
   691  				namespace,
   692  				clusterRoles,
   693  				clusterRoleBindings,
   694  				serviceAccounts,
   695  			),
   696  			[]string{
   697  				"linkerd-config control plane Namespace exists",
   698  				"linkerd-config control plane ClusterRoles exist",
   699  				"linkerd-config control plane ClusterRoleBindings exist",
   700  				"linkerd-config control plane ServiceAccounts exist",
   701  				"linkerd-config control plane CustomResourceDefinitions exist: missing authorizationpolicies.policy.linkerd.io, missing meshtlsauthentications.policy.linkerd.io, missing networkauthentications.policy.linkerd.io, missing serverauthorizations.policy.linkerd.io, missing servers.policy.linkerd.io, missing serviceprofiles.linkerd.io",
   702  			},
   703  		},
   704  		{
   705  			multiappend(
   706  				namespace,
   707  				clusterRoles,
   708  				clusterRoleBindings,
   709  				serviceAccounts,
   710  				crds,
   711  			),
   712  			[]string{
   713  				"linkerd-config control plane Namespace exists",
   714  				"linkerd-config control plane ClusterRoles exist",
   715  				"linkerd-config control plane ClusterRoleBindings exist",
   716  				"linkerd-config control plane ServiceAccounts exist",
   717  				"linkerd-config control plane CustomResourceDefinitions exist",
   718  				"linkerd-config control plane MutatingWebhookConfigurations exist: missing MutatingWebhookConfigurations: linkerd-proxy-injector-webhook-config",
   719  			},
   720  		},
   721  		{
   722  			multiappend(
   723  				namespace,
   724  				clusterRoles,
   725  				clusterRoleBindings,
   726  				serviceAccounts,
   727  				crds,
   728  				mutatingWebhooks,
   729  			),
   730  			[]string{
   731  				"linkerd-config control plane Namespace exists",
   732  				"linkerd-config control plane ClusterRoles exist",
   733  				"linkerd-config control plane ClusterRoleBindings exist",
   734  				"linkerd-config control plane ServiceAccounts exist",
   735  				"linkerd-config control plane CustomResourceDefinitions exist",
   736  				"linkerd-config control plane MutatingWebhookConfigurations exist",
   737  				"linkerd-config control plane ValidatingWebhookConfigurations exist: missing ValidatingWebhookConfigurations: linkerd-sp-validator-webhook-config",
   738  			},
   739  		},
   740  		{
   741  			multiappend(
   742  				namespace,
   743  				clusterRoles,
   744  				clusterRoleBindings,
   745  				serviceAccounts,
   746  				crds,
   747  				mutatingWebhooks,
   748  				validatingWebhooks,
   749  			),
   750  			[]string{
   751  				"linkerd-config control plane Namespace exists",
   752  				"linkerd-config control plane ClusterRoles exist",
   753  				"linkerd-config control plane ClusterRoleBindings exist",
   754  				"linkerd-config control plane ServiceAccounts exist",
   755  				"linkerd-config control plane CustomResourceDefinitions exist",
   756  				"linkerd-config control plane MutatingWebhookConfigurations exist",
   757  				"linkerd-config control plane ValidatingWebhookConfigurations exist",
   758  			},
   759  		},
   760  	}
   761  
   762  	for i, tc := range testCases {
   763  		tc := tc // pin
   764  		t.Run(fmt.Sprintf("%d: returns expected config result", i), func(t *testing.T) {
   765  			hc := NewHealthChecker(
   766  				[]CategoryID{LinkerdConfigChecks},
   767  				&Options{
   768  					ControlPlaneNamespace: "test-ns",
   769  					CRDManifest:           strings.Join(crds, "\n---\n"),
   770  				},
   771  			)
   772  
   773  			var err error
   774  			hc.kubeAPI, err = k8s.NewFakeAPI(tc.k8sConfigs...)
   775  			if err != nil {
   776  				t.Fatalf("Unexpected error: %s", err)
   777  			}
   778  
   779  			obs := newObserver()
   780  			hc.RunChecks(obs.resultFn)
   781  			if diff := deep.Equal(obs.results, tc.results); diff != nil {
   782  				t.Fatalf("%+v", diff)
   783  			}
   784  		})
   785  	}
   786  }
   787  
   788  func TestCheckControlPlanePodExistence(t *testing.T) {
   789  	var testCases = []struct {
   790  		checkDescription string
   791  		resources        []string
   792  		expected         []string
   793  	}{
   794  		{
   795  			checkDescription: "'linkerd-config' config map exists",
   796  			resources: []string{`
   797  apiVersion: v1
   798  kind: ConfigMap
   799  metadata:
   800    name: linkerd-config
   801    namespace: test-ns
   802  data:
   803    values: "{}"
   804  `,
   805  			},
   806  			expected: []string{
   807  				"cat1 'linkerd-config' config map exists",
   808  			},
   809  		},
   810  	}
   811  
   812  	for id, testCase := range testCases {
   813  		testCase := testCase
   814  		t.Run(fmt.Sprintf("%d", id), func(t *testing.T) {
   815  			hc := NewHealthChecker(
   816  				[]CategoryID{},
   817  				&Options{
   818  					ControlPlaneNamespace: "test-ns",
   819  				},
   820  			)
   821  
   822  			var err error
   823  			hc.kubeAPI, err = k8s.NewFakeAPI(testCase.resources...)
   824  			if err != nil {
   825  				t.Fatalf("Unexpected error: %s", err)
   826  			}
   827  
   828  			// validate that this check relies on the k8s api, not on hc.controlPlanePods
   829  			hc.addCheckAsCategory("cat1", LinkerdControlPlaneExistenceChecks,
   830  				testCase.checkDescription)
   831  
   832  			obs := newObserver()
   833  			hc.RunChecks(obs.resultFn)
   834  			if diff := deep.Equal(obs.results, testCase.expected); diff != nil {
   835  				t.Fatalf("%+v", diff)
   836  			}
   837  		})
   838  	}
   839  }
   840  
   841  func TestCheckClusterNetworks(t *testing.T) {
   842  	var testCases = []struct {
   843  		checkDescription string
   844  		k8sConfigs       []string
   845  		expected         []string
   846  	}{
   847  		{
   848  			checkDescription: "cluster networks contains all node podCIDRs",
   849  			k8sConfigs: []string{`
   850  apiVersion: v1
   851  kind: Namespace
   852  metadata:
   853    name: test-ns
   854  `,
   855  				`
   856  apiVersion: v1
   857  kind: Node
   858  metadata:
   859    name: linkerd-test-ns-identity
   860  spec:
   861    podCIDR: 90.10.90.24/24
   862  `,
   863  				`
   864  apiVersion: v1
   865  kind: Node
   866  metadata:
   867    name: linkerd-test-ns-identity2
   868  spec:
   869    podCIDR: 242.3.64.0/25
   870  `,
   871  				`
   872  kind: ConfigMap
   873  apiVersion: v1
   874  metadata:
   875    name: linkerd-config
   876    namespace: test-ns
   877    labels:
   878      linkerd.io/control-plane-ns: test-ns
   879  data:
   880    values: |
   881      clusterNetworks: "10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16"
   882  `,
   883  			},
   884  			expected: []string{
   885  				"linkerd-existence cluster networks contains all node podCIDRs: node has podCIDR(s) [242.3.64.0/25 90.10.90.24/24] which are not contained in the Linkerd clusterNetworks.\n\tTry installing linkerd via --set clusterNetworks=\"242.3.64.0/25\\,90.10.90.24/24\"",
   886  			},
   887  		},
   888  		{
   889  			checkDescription: "cluster networks contains all node podCIDRs",
   890  			k8sConfigs: []string{`
   891  apiVersion: v1
   892  kind: Namespace
   893  metadata:
   894    name: test-ns
   895  `,
   896  				`
   897  apiVersion: v1
   898  kind: Node
   899  metadata:
   900    name: linkerd-test-ns-identity
   901  spec:
   902    podCIDR: 10.0.0.24/24
   903  `,
   904  				`
   905  kind: ConfigMap
   906  apiVersion: v1
   907  metadata:
   908    name: linkerd-config
   909    namespace: test-ns
   910    labels:
   911      linkerd.io/control-plane-ns: test-ns
   912  data:
   913    values: |
   914      clusterNetworks: "10.0.0.0/8,100.64.0.0/10,172.16.0.0/12,192.168.0.0/16"
   915  `,
   916  			},
   917  			expected: []string{
   918  				"linkerd-existence cluster networks contains all node podCIDRs",
   919  			},
   920  		},
   921  	}
   922  
   923  	for i, tc := range testCases {
   924  		tc := tc // pin
   925  		t.Run(fmt.Sprintf("%d: returns expected config result", i), func(t *testing.T) {
   926  			hc := NewHealthChecker(
   927  				[]CategoryID{},
   928  				&Options{
   929  					ControlPlaneNamespace: "test-ns",
   930  				},
   931  			)
   932  
   933  			var err error
   934  			hc.kubeAPI, err = k8s.NewFakeAPI(tc.k8sConfigs...)
   935  			if err != nil {
   936  				t.Fatalf("Unexpected error: %s", err)
   937  			}
   938  
   939  			obs := newObserver()
   940  			hc.addCheckAsCategory("linkerd-existence", LinkerdControlPlaneExistenceChecks,
   941  				tc.checkDescription)
   942  			hc.RunChecks(obs.resultFn)
   943  			if diff := deep.Equal(obs.results, tc.expected); diff != nil {
   944  				t.Fatalf("%+v", diff)
   945  			}
   946  		})
   947  	}
   948  }
   949  
   950  func proxiesWithCertificates(certificates ...string) []string {
   951  	result := []string{}
   952  	for i, certificate := range certificates {
   953  		result = append(result, fmt.Sprintf(`
   954  apiVersion: v1
   955  kind: Pod
   956  metadata:
   957    name: pod-%d
   958    namespace: namespace-%d
   959    labels:
   960      %s: linkerd
   961  spec:
   962    containers:
   963    - name: %s
   964      env:
   965      - name: %s
   966        value: %s
   967  `, i, i, k8s.ControllerNSLabel, k8s.ProxyContainerName, identity.EnvTrustAnchors, certificate))
   968  	}
   969  	return result
   970  }
   971  
   972  func TestCheckDataPlaneProxiesCertificate(t *testing.T) {
   973  	const currentCertificate = "current-certificate"
   974  	const oldCertificate = "old-certificate"
   975  
   976  	linkerdIdentityTrustRoots := fmt.Sprintf(`
   977  kind: ConfigMap
   978  apiVersion: v1
   979  metadata:
   980    name: %s
   981  data:
   982    ca-bundle.crt: %s
   983  
   984  `, "linkerd-identity-trust-roots", currentCertificate)
   985  
   986  	var testCases = []struct {
   987  		checkDescription string
   988  		resources        []string
   989  		namespace        string
   990  		expectedErr      error
   991  	}{
   992  		{
   993  			checkDescription: "all proxies match CA certificate (all namespaces)",
   994  			resources:        proxiesWithCertificates(currentCertificate, currentCertificate),
   995  			namespace:        "",
   996  			expectedErr:      nil,
   997  		},
   998  		{
   999  			checkDescription: "some proxies match CA certificate (all namespaces)",
  1000  			resources:        proxiesWithCertificates(currentCertificate, oldCertificate),
  1001  			namespace:        "",
  1002  			expectedErr:      errors.New("Some pods do not have the current trust bundle and must be restarted:\n\t* namespace-1/pod-1"),
  1003  		},
  1004  		{
  1005  			checkDescription: "no proxies match CA certificate (all namespaces)",
  1006  			resources:        proxiesWithCertificates(oldCertificate, oldCertificate),
  1007  			namespace:        "",
  1008  			expectedErr:      errors.New("Some pods do not have the current trust bundle and must be restarted:\n\t* namespace-0/pod-0\n\t* namespace-1/pod-1"),
  1009  		},
  1010  		{
  1011  			checkDescription: "some proxies match CA certificate (match in target namespace)",
  1012  			resources:        proxiesWithCertificates(currentCertificate, oldCertificate),
  1013  			namespace:        "namespace-0",
  1014  			expectedErr:      nil,
  1015  		},
  1016  		{
  1017  			checkDescription: "some proxies match CA certificate (unmatch in target namespace)",
  1018  			resources:        proxiesWithCertificates(currentCertificate, oldCertificate),
  1019  			namespace:        "namespace-1",
  1020  			expectedErr:      errors.New("Some pods do not have the current trust bundle and must be restarted:\n\t* pod-1"),
  1021  		},
  1022  		{
  1023  			checkDescription: "no proxies match CA certificate (specific namespace)",
  1024  			resources:        proxiesWithCertificates(oldCertificate, oldCertificate),
  1025  			namespace:        "namespace-0",
  1026  			expectedErr:      errors.New("Some pods do not have the current trust bundle and must be restarted:\n\t* pod-0"),
  1027  		},
  1028  	}
  1029  
  1030  	for id, testCase := range testCases {
  1031  		testCase := testCase
  1032  		t.Run(fmt.Sprintf("%d", id), func(t *testing.T) {
  1033  			hc := NewHealthChecker([]CategoryID{}, &Options{})
  1034  			hc.DataPlaneNamespace = testCase.namespace
  1035  
  1036  			var err error
  1037  			hc.kubeAPI, err = k8s.NewFakeAPI(append(testCase.resources, linkerdIdentityTrustRoots)...)
  1038  			if err != nil {
  1039  				t.Fatalf("Unexpected error: %q", err)
  1040  			}
  1041  
  1042  			err = hc.checkDataPlaneProxiesCertificate(context.Background())
  1043  			if diff := deep.Equal(err, testCase.expectedErr); diff != nil {
  1044  				t.Fatalf("%+v", diff)
  1045  			}
  1046  		})
  1047  	}
  1048  }
  1049  
  1050  func TestValidateControlPlanePods(t *testing.T) {
  1051  	pod := func(name string, phase corev1.PodPhase, ready bool) corev1.Pod {
  1052  		return corev1.Pod{
  1053  			ObjectMeta: metav1.ObjectMeta{Name: name},
  1054  			Status: corev1.PodStatus{
  1055  				Phase: phase,
  1056  				ContainerStatuses: []corev1.ContainerStatus{
  1057  					{
  1058  						Name:  strings.Split(name, "-")[1],
  1059  						Ready: ready,
  1060  					},
  1061  				},
  1062  			},
  1063  		}
  1064  	}
  1065  
  1066  	t.Run("Returns an error if not all pods are running", func(t *testing.T) {
  1067  		pods := []corev1.Pod{
  1068  			pod("linkerd-destination-9849948665-37082", corev1.PodFailed, true),
  1069  			pod("linkerd-identity-6849948664-27982", corev1.PodFailed, true),
  1070  		}
  1071  
  1072  		err := validateControlPlanePods(pods)
  1073  		if err == nil {
  1074  			t.Fatal("Expected error, got nothing")
  1075  		}
  1076  		if err.Error() != "No running pods for \"linkerd-destination\"" {
  1077  			t.Fatalf("Unexpected error message: %s", err.Error())
  1078  		}
  1079  	})
  1080  
  1081  	t.Run("Returns an error if not all containers are ready", func(t *testing.T) {
  1082  		pods := []corev1.Pod{
  1083  			pod("linkerd-identity-6849948664-27982", corev1.PodRunning, true),
  1084  			pod("linkerd-tap-6c878df6c8-2hmtd", corev1.PodRunning, true),
  1085  		}
  1086  
  1087  		err := validateControlPlanePods(pods)
  1088  		if err == nil {
  1089  			t.Fatal("Expected error, got nothing")
  1090  		}
  1091  	})
  1092  
  1093  	t.Run("Returns nil if all pods are running and all containers are ready", func(t *testing.T) {
  1094  		pods := []corev1.Pod{
  1095  			pod("linkerd-destination-9849948665-37082", corev1.PodRunning, true),
  1096  			pod("linkerd-identity-6849948664-27982", corev1.PodRunning, true),
  1097  			pod("linkerd-proxy-injector-5f79ff4844-", corev1.PodRunning, true),
  1098  		}
  1099  
  1100  		err := validateControlPlanePods(pods)
  1101  		if err != nil {
  1102  			t.Fatalf("Unexpected error: %s", err)
  1103  		}
  1104  	})
  1105  
  1106  	// This test is just for ensuring full coverage of the validateControlPlanePods function
  1107  	t.Run("Returns an error if all the controller pods are not ready", func(t *testing.T) {
  1108  		pods := []corev1.Pod{
  1109  			pod("linkerd-destination-9849948665-37082", corev1.PodRunning, false),
  1110  			pod("linkerd-identity-6849948664-27982", corev1.PodRunning, false),
  1111  			pod("linkerd-proxy-injector-5f79ff4844-", corev1.PodRunning, false),
  1112  		}
  1113  
  1114  		err := validateControlPlanePods(pods)
  1115  		if err == nil {
  1116  			t.Fatal("Expected error, got nothing")
  1117  		}
  1118  	})
  1119  
  1120  	t.Run("Returns nil if, HA mode, at least one pod of each control plane component is ready", func(t *testing.T) {
  1121  		pods := []corev1.Pod{
  1122  			pod("linkerd-destination-9843948665-48082", corev1.PodRunning, true),
  1123  			pod("linkerd-destination-9843948665-48083", corev1.PodRunning, false),
  1124  			pod("linkerd-destination-9843948665-48084", corev1.PodFailed, false),
  1125  			pod("linkerd-identity-6849948664-27982", corev1.PodRunning, true),
  1126  			pod("linkerd-identity-6849948664-27983", corev1.PodRunning, false),
  1127  			pod("linkerd-identity-6849948664-27984", corev1.PodFailed, false),
  1128  			pod("linkerd-proxy-injector-5f79ff4844-", corev1.PodRunning, true),
  1129  		}
  1130  
  1131  		err := validateControlPlanePods(pods)
  1132  		if err != nil {
  1133  			t.Fatalf("Unexpected error: %s", err)
  1134  		}
  1135  	})
  1136  
  1137  	t.Run("Returns nil if all linkerd pods are running and pod list includes non-linkerd pod", func(t *testing.T) {
  1138  		pods := []corev1.Pod{
  1139  			pod("linkerd-destination-9843948665-48082", corev1.PodRunning, true),
  1140  			pod("linkerd-identity-6849948664-27982", corev1.PodRunning, true),
  1141  			pod("linkerd-proxy-injector-5f79ff4844-", corev1.PodRunning, true),
  1142  			pod("hello-43c25d", corev1.PodRunning, true),
  1143  		}
  1144  
  1145  		err := validateControlPlanePods(pods)
  1146  		if err != nil {
  1147  			t.Fatalf("Unexpected error message: %s", err.Error())
  1148  		}
  1149  	})
  1150  }
  1151  
  1152  func TestValidateDataPlaneNamespace(t *testing.T) {
  1153  	testCases := []struct {
  1154  		ns     string
  1155  		result string
  1156  	}{
  1157  		{
  1158  			"",
  1159  			"data-plane-ns-test-cat data plane namespace exists",
  1160  		},
  1161  		{
  1162  			"bad-ns",
  1163  			"data-plane-ns-test-cat data plane namespace exists: The \"bad-ns\" namespace does not exist",
  1164  		},
  1165  	}
  1166  
  1167  	for i, tc := range testCases {
  1168  		tc := tc // pin
  1169  		t.Run(fmt.Sprintf("%d/%s", i, tc.ns), func(t *testing.T) {
  1170  			hc := NewHealthChecker(
  1171  				[]CategoryID{},
  1172  				&Options{
  1173  					DataPlaneNamespace: tc.ns,
  1174  				},
  1175  			)
  1176  			var err error
  1177  			hc.kubeAPI, err = k8s.NewFakeAPI()
  1178  			if err != nil {
  1179  				t.Fatalf("Unexpected error: %s", err)
  1180  			}
  1181  
  1182  			// create a synthetic category that only includes the "data plane namespace exists" check
  1183  			hc.addCheckAsCategory("data-plane-ns-test-cat", LinkerdDataPlaneChecks, "data plane namespace exists")
  1184  
  1185  			expectedResults := []string{
  1186  				tc.result,
  1187  			}
  1188  			obs := newObserver()
  1189  			hc.RunChecks(obs.resultFn)
  1190  			if diff := deep.Equal(obs.results, expectedResults); diff != nil {
  1191  				t.Fatalf("%+v", diff)
  1192  			}
  1193  		})
  1194  	}
  1195  }
  1196  
  1197  func TestCheckDataPlanePods(t *testing.T) {
  1198  
  1199  	t.Run("Returns an error if no inject pods were found", func(t *testing.T) {
  1200  		err := CheckPodsRunning([]corev1.Pod{}, "emojivoto")
  1201  		if err == nil {
  1202  			t.Fatal("Expected error, got nothing")
  1203  		}
  1204  		if err.Error() != "no \"linkerd-proxy\" containers found in the \"emojivoto\" namespace" {
  1205  			t.Fatalf("Unexpected error message: %s", err.Error())
  1206  		}
  1207  	})
  1208  
  1209  	t.Run("Returns an error if not all pods are running", func(t *testing.T) {
  1210  		pods := []corev1.Pod{
  1211  			{
  1212  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1213  				Status: corev1.PodStatus{
  1214  					Phase: "Running",
  1215  					ContainerStatuses: []corev1.ContainerStatus{
  1216  						{
  1217  							Name:  k8s.ProxyContainerName,
  1218  							Ready: true,
  1219  						},
  1220  					},
  1221  				},
  1222  			},
  1223  			{
  1224  				ObjectMeta: metav1.ObjectMeta{Name: "vote-bot-644b8cb6b4-g8nlr"},
  1225  				Status: corev1.PodStatus{
  1226  					Phase: "Running",
  1227  					ContainerStatuses: []corev1.ContainerStatus{
  1228  						{
  1229  							Name:  k8s.ProxyContainerName,
  1230  							Ready: true,
  1231  						},
  1232  					},
  1233  				},
  1234  			},
  1235  			{
  1236  				ObjectMeta: metav1.ObjectMeta{Name: "voting-65b9fffd77-rlwsd"},
  1237  				Status: corev1.PodStatus{
  1238  					Phase: "Failed",
  1239  					ContainerStatuses: []corev1.ContainerStatus{
  1240  						{
  1241  							Name:  k8s.ProxyContainerName,
  1242  							Ready: false,
  1243  						},
  1244  					},
  1245  				},
  1246  			},
  1247  			{
  1248  				ObjectMeta: metav1.ObjectMeta{Name: "web-6cfbccc48-5g8px"},
  1249  				Status: corev1.PodStatus{
  1250  					Phase: "Running",
  1251  					ContainerStatuses: []corev1.ContainerStatus{
  1252  						{
  1253  							Name:  k8s.ProxyContainerName,
  1254  							Ready: true,
  1255  						},
  1256  					},
  1257  				},
  1258  			},
  1259  		}
  1260  
  1261  		err := CheckPodsRunning(pods, "emojivoto")
  1262  		if err == nil {
  1263  			t.Fatal("Expected error, got nothing")
  1264  		}
  1265  		if err.Error() != "pod \"voting-65b9fffd77-rlwsd\" status is Failed" {
  1266  			t.Fatalf("Unexpected error message: %s", err.Error())
  1267  		}
  1268  	})
  1269  
  1270  	t.Run("Does not return an error if the pod is Evicted", func(t *testing.T) {
  1271  		pods := []corev1.Pod{
  1272  			{
  1273  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1274  				Status: corev1.PodStatus{
  1275  					Phase: "Evicted",
  1276  					ContainerStatuses: []corev1.ContainerStatus{
  1277  						{
  1278  							Name:  k8s.ProxyContainerName,
  1279  							Ready: true,
  1280  						},
  1281  					},
  1282  				},
  1283  			},
  1284  		}
  1285  
  1286  		err := CheckPodsRunning(pods, "emojivoto")
  1287  		if err != nil {
  1288  			t.Fatalf("Expected no error, got %s", err)
  1289  		}
  1290  	})
  1291  
  1292  	t.Run("Does not return an error if the pod is in Shutdown state", func(t *testing.T) {
  1293  		pods := []corev1.Pod{
  1294  			{
  1295  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1296  				Status: corev1.PodStatus{
  1297  					Phase:  "Failed",
  1298  					Reason: "Shutdown",
  1299  				},
  1300  			},
  1301  		}
  1302  
  1303  		err := CheckPodsRunning(pods, "emojivoto")
  1304  		if err != nil {
  1305  			t.Fatalf("Expected no error, got %s", err)
  1306  		}
  1307  	})
  1308  
  1309  	t.Run("Does not return an error if the pod is in NodeShutdown state", func(t *testing.T) {
  1310  		pods := []corev1.Pod{
  1311  			{
  1312  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1313  				Status: corev1.PodStatus{
  1314  					Phase:  "Failed",
  1315  					Reason: "NodeShutdown",
  1316  				},
  1317  			},
  1318  		}
  1319  
  1320  		err := CheckPodsRunning(pods, "emojivoto")
  1321  		if err != nil {
  1322  			t.Fatalf("Expected no error, got %s", err)
  1323  		}
  1324  	})
  1325  
  1326  	t.Run("Does not return an error if the pod is in Terminated state", func(t *testing.T) {
  1327  		pods := []corev1.Pod{
  1328  			{
  1329  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1330  				Status: corev1.PodStatus{
  1331  					Phase:  "Failed",
  1332  					Reason: "Terminated",
  1333  				},
  1334  			},
  1335  		}
  1336  
  1337  		err := CheckPodsRunning(pods, "emojivoto")
  1338  		if err != nil {
  1339  			t.Fatalf("Expected no error, got %s", err)
  1340  		}
  1341  	})
  1342  
  1343  	t.Run("Returns an error if the proxy container is not ready", func(t *testing.T) {
  1344  		pods := []corev1.Pod{
  1345  			{
  1346  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1347  				Status: corev1.PodStatus{
  1348  					Phase: "Running",
  1349  					ContainerStatuses: []corev1.ContainerStatus{
  1350  						{
  1351  							Name:  k8s.ProxyContainerName,
  1352  							Ready: true,
  1353  						},
  1354  					},
  1355  				},
  1356  			},
  1357  			{
  1358  				ObjectMeta: metav1.ObjectMeta{Name: "vote-bot-644b8cb6b4-g8nlr"},
  1359  				Status: corev1.PodStatus{
  1360  					Phase: "Running",
  1361  					ContainerStatuses: []corev1.ContainerStatus{
  1362  						{
  1363  							Name:  k8s.ProxyContainerName,
  1364  							Ready: false,
  1365  						},
  1366  					},
  1367  				},
  1368  			},
  1369  			{
  1370  				ObjectMeta: metav1.ObjectMeta{Name: "voting-65b9fffd77-rlwsd"},
  1371  				Status: corev1.PodStatus{
  1372  					Phase: "Running",
  1373  					ContainerStatuses: []corev1.ContainerStatus{
  1374  						{
  1375  							Name:  k8s.ProxyContainerName,
  1376  							Ready: false,
  1377  						},
  1378  					},
  1379  				},
  1380  			},
  1381  			{
  1382  				ObjectMeta: metav1.ObjectMeta{Name: "web-6cfbccc48-5g8px"},
  1383  				Status: corev1.PodStatus{
  1384  					Phase: "Running",
  1385  					ContainerStatuses: []corev1.ContainerStatus{
  1386  						{
  1387  							Name:  k8s.ProxyContainerName,
  1388  							Ready: true,
  1389  						},
  1390  					},
  1391  				},
  1392  			},
  1393  		}
  1394  
  1395  		err := CheckPodsRunning(pods, "emojivoto")
  1396  		if err == nil {
  1397  			t.Fatal("Expected error, got nothing")
  1398  		}
  1399  		if err.Error() != "container \"linkerd-proxy\" in pod \"vote-bot-644b8cb6b4-g8nlr\" is not ready" {
  1400  			t.Fatalf("Unexpected error message: %s", err.Error())
  1401  		}
  1402  	})
  1403  
  1404  	t.Run("Returns nil if all pods are running and all proxy containers are ready", func(t *testing.T) {
  1405  		pods := []corev1.Pod{
  1406  			{
  1407  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1408  				Status: corev1.PodStatus{
  1409  					Phase: "Running",
  1410  					ContainerStatuses: []corev1.ContainerStatus{
  1411  						{
  1412  							Name:  k8s.ProxyContainerName,
  1413  							Ready: true,
  1414  						},
  1415  					},
  1416  				},
  1417  			},
  1418  			{
  1419  				ObjectMeta: metav1.ObjectMeta{Name: "vote-bot-644b8cb6b4-g8nlr"},
  1420  				Status: corev1.PodStatus{
  1421  					Phase: "Running",
  1422  					ContainerStatuses: []corev1.ContainerStatus{
  1423  						{
  1424  							Name:  k8s.ProxyContainerName,
  1425  							Ready: true,
  1426  						},
  1427  					},
  1428  				},
  1429  			},
  1430  			{
  1431  				ObjectMeta: metav1.ObjectMeta{Name: "voting-65b9fffd77-rlwsd"},
  1432  				Status: corev1.PodStatus{
  1433  					Phase: "Running",
  1434  					ContainerStatuses: []corev1.ContainerStatus{
  1435  						{
  1436  							Name:  k8s.ProxyContainerName,
  1437  							Ready: true,
  1438  						},
  1439  					},
  1440  				},
  1441  			},
  1442  			{
  1443  				ObjectMeta: metav1.ObjectMeta{Name: "web-6cfbccc48-5g8px"},
  1444  				Status: corev1.PodStatus{
  1445  					Phase: "Running",
  1446  					ContainerStatuses: []corev1.ContainerStatus{
  1447  						{
  1448  							Name:  k8s.ProxyContainerName,
  1449  							Ready: true,
  1450  						},
  1451  					},
  1452  				},
  1453  			},
  1454  		}
  1455  
  1456  		err := CheckPodsRunning(pods, "emojivoto")
  1457  		if err != nil {
  1458  			t.Fatalf("Unexpected error: %s", err)
  1459  		}
  1460  	})
  1461  
  1462  	// This test relates to https://github.com/linkerd/linkerd2/issues/6128
  1463  	t.Run("Returns nil if some pods are in the Succeeded phase and their proxies are no longer ready", func(t *testing.T) {
  1464  		pods := []corev1.Pod{
  1465  			{
  1466  				ObjectMeta: metav1.ObjectMeta{Name: "emoji-d9c7866bb-7v74n"},
  1467  				Status: corev1.PodStatus{
  1468  					Phase:  "Succeeded",
  1469  					Reason: "Completed",
  1470  					ContainerStatuses: []corev1.ContainerStatus{
  1471  						{
  1472  							Name:  k8s.ProxyContainerName,
  1473  							Ready: false,
  1474  						},
  1475  					},
  1476  				},
  1477  			},
  1478  			{
  1479  				ObjectMeta: metav1.ObjectMeta{Name: "vote-bot-644b8cb6b4-g8nlr"},
  1480  				Status: corev1.PodStatus{
  1481  					Phase: "Running",
  1482  					ContainerStatuses: []corev1.ContainerStatus{
  1483  						{
  1484  							Name:  k8s.ProxyContainerName,
  1485  							Ready: true,
  1486  						},
  1487  					},
  1488  				},
  1489  			},
  1490  			{
  1491  				ObjectMeta: metav1.ObjectMeta{Name: "voting-65b9fffd77-rlwsd"},
  1492  				Status: corev1.PodStatus{
  1493  					Phase: "Running",
  1494  					ContainerStatuses: []corev1.ContainerStatus{
  1495  						{
  1496  							Name:  k8s.ProxyContainerName,
  1497  							Ready: true,
  1498  						},
  1499  					},
  1500  				},
  1501  			},
  1502  			{
  1503  				ObjectMeta: metav1.ObjectMeta{Name: "web-6cfbccc48-5g8px"},
  1504  				Status: corev1.PodStatus{
  1505  					Phase:  "Succeeded",
  1506  					Reason: "Completed",
  1507  					ContainerStatuses: []corev1.ContainerStatus{
  1508  						{
  1509  							Name:  k8s.ProxyContainerName,
  1510  							Ready: false,
  1511  						},
  1512  					},
  1513  				},
  1514  			},
  1515  		}
  1516  
  1517  		err := CheckPodsRunning(pods, "emojivoto")
  1518  		if err != nil {
  1519  			t.Fatalf("Unexpected error: %s", err)
  1520  		}
  1521  	})
  1522  }
  1523  
  1524  func TestDataPlanePodLabels(t *testing.T) {
  1525  
  1526  	t.Run("Returns nil if pod labels are ok", func(t *testing.T) {
  1527  		pods := []corev1.Pod{
  1528  			{
  1529  				ObjectMeta: metav1.ObjectMeta{
  1530  					Name:        "emoji-d9c7866bb-7v74n",
  1531  					Annotations: map[string]string{k8s.ProxyControlPortAnnotation: "3000"},
  1532  					Labels:      map[string]string{"app": "test"},
  1533  				},
  1534  			},
  1535  		}
  1536  
  1537  		err := checkMisconfiguredPodsLabels(pods)
  1538  		if err != nil {
  1539  			t.Fatalf("Unexpected error: %s", err)
  1540  		}
  1541  	})
  1542  
  1543  	t.Run("Returns error if any labels are misconfigured", func(t *testing.T) {
  1544  		for _, tc := range []struct {
  1545  			description      string
  1546  			pods             []corev1.Pod
  1547  			expectedErrorMsg string
  1548  		}{
  1549  			{
  1550  				description: "config as label",
  1551  				pods: []corev1.Pod{
  1552  					{
  1553  						ObjectMeta: metav1.ObjectMeta{
  1554  							Name:   "emoji-d9c7866bb-7v74n",
  1555  							Labels: map[string]string{k8s.ProxyControlPortAnnotation: "3000"},
  1556  						},
  1557  					},
  1558  				},
  1559  				expectedErrorMsg: "Some labels on data plane pods should be annotations:\n\t* /emoji-d9c7866bb-7v74n\n\t\tconfig.linkerd.io/control-port",
  1560  			},
  1561  			{
  1562  				description: "alpha config as label",
  1563  				pods: []corev1.Pod{
  1564  					{
  1565  						ObjectMeta: metav1.ObjectMeta{
  1566  							Name:   "emoji-d9c7866bb-7v74n",
  1567  							Labels: map[string]string{k8s.ProxyConfigAnnotationsPrefixAlpha + "/alpha-setting": "3000"},
  1568  						},
  1569  					},
  1570  				},
  1571  				expectedErrorMsg: "Some labels on data plane pods should be annotations:\n\t* /emoji-d9c7866bb-7v74n\n\t\tconfig.alpha.linkerd.io/alpha-setting",
  1572  			},
  1573  			{
  1574  				description: "inject annotation as label",
  1575  				pods: []corev1.Pod{
  1576  					{
  1577  						ObjectMeta: metav1.ObjectMeta{
  1578  							Name:   "emoji-d9c7866bb-7v74n",
  1579  							Labels: map[string]string{k8s.ProxyInjectAnnotation: "enable"},
  1580  						},
  1581  					},
  1582  				},
  1583  				expectedErrorMsg: "Some labels on data plane pods should be annotations:\n\t* /emoji-d9c7866bb-7v74n\n\t\tlinkerd.io/inject",
  1584  			},
  1585  		} {
  1586  			tc := tc // pin
  1587  			t.Run(tc.description, func(t *testing.T) {
  1588  				err := checkMisconfiguredPodsLabels(tc.pods)
  1589  
  1590  				if err == nil {
  1591  					t.Fatal("Expected error, got nothing")
  1592  				}
  1593  
  1594  				if err.Error() != tc.expectedErrorMsg {
  1595  					t.Fatalf("Unexpected error message: %s", err.Error())
  1596  				}
  1597  			})
  1598  		}
  1599  	})
  1600  }
  1601  
  1602  func TestServicesLabels(t *testing.T) {
  1603  
  1604  	t.Run("Returns nil if service labels are ok", func(t *testing.T) {
  1605  		services := []corev1.Service{
  1606  			{
  1607  				ObjectMeta: metav1.ObjectMeta{
  1608  					Name:        "emoji-d9c7866bb-7v74n",
  1609  					Annotations: map[string]string{k8s.ProxyControlPortAnnotation: "3000"},
  1610  					Labels:      map[string]string{"app": "test", k8s.DefaultExportedServiceSelector: "true"},
  1611  				},
  1612  			},
  1613  		}
  1614  
  1615  		err := checkMisconfiguredServiceLabels(services)
  1616  		if err != nil {
  1617  			t.Fatalf("Unexpected error: %s", err)
  1618  		}
  1619  	})
  1620  
  1621  	t.Run("Returns error if service labels or annotation misconfigured", func(t *testing.T) {
  1622  		for _, tc := range []struct {
  1623  			description      string
  1624  			services         []corev1.Service
  1625  			expectedErrorMsg string
  1626  		}{
  1627  			{
  1628  				description: "config as label",
  1629  				services: []corev1.Service{
  1630  					{
  1631  						ObjectMeta: metav1.ObjectMeta{
  1632  							Name:   "emoji-d9c7866bb-7v74n",
  1633  							Labels: map[string]string{k8s.ProxyControlPortAnnotation: "3000"},
  1634  						},
  1635  					},
  1636  				},
  1637  				expectedErrorMsg: "Some labels on data plane services should be annotations:\n\t* /emoji-d9c7866bb-7v74n\n\t\tconfig.linkerd.io/control-port",
  1638  			},
  1639  			{
  1640  				description: "alpha config as label",
  1641  				services: []corev1.Service{
  1642  					{
  1643  						ObjectMeta: metav1.ObjectMeta{
  1644  							Name:   "emoji-d9c7866bb-7v74n",
  1645  							Labels: map[string]string{k8s.ProxyConfigAnnotationsPrefixAlpha + "/alpha-setting": "3000"},
  1646  						},
  1647  					},
  1648  				},
  1649  				expectedErrorMsg: "Some labels on data plane services should be annotations:\n\t* /emoji-d9c7866bb-7v74n\n\t\tconfig.alpha.linkerd.io/alpha-setting",
  1650  			},
  1651  		} {
  1652  			tc := tc // pin
  1653  			t.Run(tc.description, func(t *testing.T) {
  1654  				err := checkMisconfiguredServiceLabels(tc.services)
  1655  				if err == nil {
  1656  					t.Fatal("Expected error, got nothing")
  1657  				}
  1658  				if err.Error() != tc.expectedErrorMsg {
  1659  					t.Fatalf("Unexpected error message: %s", err.Error())
  1660  				}
  1661  			})
  1662  		}
  1663  	})
  1664  }
  1665  
  1666  func TestServicesAnnotations(t *testing.T) {
  1667  
  1668  	t.Run("Returns nil if service annotations are ok", func(t *testing.T) {
  1669  		services := []corev1.Service{
  1670  			{
  1671  				ObjectMeta: metav1.ObjectMeta{
  1672  					Name:        "emoji-d9c7866bb-7v74n",
  1673  					Annotations: map[string]string{k8s.ProxyControlPortAnnotation: "3000"},
  1674  					Labels:      map[string]string{"app": "test", k8s.DefaultExportedServiceSelector: "true"},
  1675  				},
  1676  			},
  1677  		}
  1678  
  1679  		err := checkMisconfiguredServiceAnnotations(services)
  1680  		if err != nil {
  1681  			t.Fatalf("Unexpected error: %s", err)
  1682  		}
  1683  	})
  1684  
  1685  	t.Run("Returns error if service annotations are misconfigured", func(t *testing.T) {
  1686  		for _, tc := range []struct {
  1687  			description      string
  1688  			services         []corev1.Service
  1689  			expectedErrorMsg string
  1690  		}{
  1691  			{
  1692  				description: "mirror as annotations",
  1693  				services: []corev1.Service{
  1694  					{
  1695  						ObjectMeta: metav1.ObjectMeta{
  1696  							Name:        "emoji-d9c7866bb-7v74n",
  1697  							Annotations: map[string]string{k8s.DefaultExportedServiceSelector: "true"},
  1698  						},
  1699  					},
  1700  				},
  1701  				expectedErrorMsg: "Some annotations on data plane services should be labels:\n\t* /emoji-d9c7866bb-7v74n\n\t\tmirror.linkerd.io/exported",
  1702  			},
  1703  		} {
  1704  			tc := tc // pin
  1705  			t.Run(tc.description, func(t *testing.T) {
  1706  				err := checkMisconfiguredServiceAnnotations(tc.services)
  1707  				if err == nil {
  1708  					t.Fatal("Expected error, got nothing")
  1709  				}
  1710  				if err.Error() != tc.expectedErrorMsg {
  1711  					t.Fatalf("Unexpected error message: %s", err.Error())
  1712  				}
  1713  			})
  1714  		}
  1715  	})
  1716  }
  1717  
  1718  func TestFetchCurrentConfiguration(t *testing.T) {
  1719  	defaultValues, err := linkerd2.NewValues()
  1720  
  1721  	if err != nil {
  1722  		t.Fatalf("Unexpected error validating options: %v", err)
  1723  	}
  1724  
  1725  	testCases := []struct {
  1726  		k8sConfigs []string
  1727  		expected   *linkerd2.Values
  1728  		err        error
  1729  	}{
  1730  		{
  1731  			[]string{`
  1732  kind: ConfigMap
  1733  apiVersion: v1
  1734  metadata:
  1735    name: linkerd-config
  1736    namespace: linkerd
  1737  data:
  1738    global: |
  1739      {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"fake-trust-anchors-pem","issuanceLifetime":"86400s","clockSkewAllowance":"20s"}}
  1740    proxy: |
  1741      {"proxyImage":{"imageName":"cr.l5d.io/linkerd/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"cr.l5d.io/linkerd/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","proxyGid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxy_init_image_version":"v2.3.0","debugImage":{"imageName":"cr.l5d.io/linkerd/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"}
  1742    install: |
  1743      {"cliVersion":"dev-undefined","flags":[]}
  1744    values: |
  1745      controllerImage: ControllerImage
  1746      controllerReplicas: 1
  1747      controllerUID: 2103
  1748      controllerGID: 2103
  1749      debugContainer: null
  1750      destinationProxyResources: null
  1751      destinationResources: null
  1752      disableHeartBeat: false
  1753      enableH2Upgrade: true
  1754      enablePodAntiAffinity: false
  1755      nodeAffinity: null
  1756      cliVersion: CliVersion
  1757      clusterDomain: cluster.local
  1758      clusterNetworks: ClusterNetworks
  1759      cniEnabled: false
  1760      controlPlaneTracing: false
  1761      controllerLogLevel: ControllerLogLevel
  1762      enableEndpointSlices: false
  1763      highAvailability: false
  1764      imagePullPolicy: ImagePullPolicy
  1765      imagePullSecrets: null
  1766      linkerdVersion: ""
  1767      prometheusUrl: ""
  1768      proxy:
  1769        capabilities: null
  1770        component: linkerd-controller
  1771        disableTap: false
  1772        enableExternalProfiles: false
  1773        image:
  1774          name: ProxyImageName
  1775          pullPolicy: ImagePullPolicy
  1776          version: ProxyVersion
  1777        inboundConnectTimeout: ""
  1778        isGateway: false
  1779        logFormat: plain
  1780        logLevel: warn,linkerd=info
  1781        opaquePorts: ""
  1782        outboundConnectTimeout: ""
  1783        ports:
  1784          admin: 4191
  1785          control: 4190
  1786          inbound: 4143
  1787          outbound: 4140
  1788        requireIdentityOnInboundPorts: ""
  1789        resources: null
  1790        saMountPath: null
  1791        uid: 2102
  1792        gid: 2102
  1793        waitBeforeExitSeconds: 0
  1794        workloadKind: deployment
  1795      proxyContainerName: ProxyContainerName
  1796      proxyInit:
  1797        capabilities: null
  1798        closeWaitTimeoutSecs: 0
  1799        ignoreInboundPorts: ""
  1800        ignoreOutboundPorts: ""
  1801        image:
  1802          name: ProxyInitImageName
  1803          pullPolicy: ImagePullPolicy
  1804          version: ProxyInitVersion
  1805        resources:
  1806          cpu:
  1807            limit: 100m
  1808            request: 10m
  1809          memory:
  1810            limit: 50Mi
  1811            request: 10Mi
  1812        saMountPath: null
  1813        xtMountPath:
  1814          mountPath: /run
  1815          name: linkerd-proxy-init-xtables-lock
  1816          readOnly: false
  1817      heartbeatResources: null
  1818      heartbeatSchedule: ""
  1819      identityProxyResources: null
  1820      identityResources: null
  1821      nodeSelector:
  1822        kubernetes.io/os: linux
  1823      proxyInjectorProxyResources: null
  1824      proxyInjectorResources: null
  1825      stage: ""
  1826      tolerations: null
  1827      webhookFailurePolicy: WebhookFailurePolicy
  1828  `,
  1829  			},
  1830  			&linkerd2.Values{
  1831  				ControllerImage:      "ControllerImage",
  1832  				ControllerUID:        2103,
  1833  				ControllerGID:        2103,
  1834  				EnableH2Upgrade:      true,
  1835  				WebhookFailurePolicy: "WebhookFailurePolicy",
  1836  				NodeSelector:         defaultValues.NodeSelector,
  1837  				Tolerations:          defaultValues.Tolerations,
  1838  				ClusterDomain:        "cluster.local",
  1839  				ClusterNetworks:      "ClusterNetworks",
  1840  				ImagePullPolicy:      "ImagePullPolicy",
  1841  				CliVersion:           "CliVersion",
  1842  				ControllerLogLevel:   "ControllerLogLevel",
  1843  				ProxyContainerName:   "ProxyContainerName",
  1844  				CNIEnabled:           false,
  1845  				Proxy: &linkerd2.Proxy{
  1846  					Image: &linkerd2.Image{
  1847  						Name:       "ProxyImageName",
  1848  						PullPolicy: "ImagePullPolicy",
  1849  						Version:    "ProxyVersion",
  1850  					},
  1851  					LogLevel:  "warn,linkerd=info",
  1852  					LogFormat: "plain",
  1853  					Ports: &linkerd2.Ports{
  1854  						Admin:    4191,
  1855  						Control:  4190,
  1856  						Inbound:  4143,
  1857  						Outbound: 4140,
  1858  					},
  1859  					UID: 2102,
  1860  					GID: 2102,
  1861  				},
  1862  				ProxyInit: &linkerd2.ProxyInit{
  1863  					Image: &linkerd2.Image{
  1864  						Name:       "ProxyInitImageName",
  1865  						PullPolicy: "ImagePullPolicy",
  1866  						Version:    "ProxyInitVersion",
  1867  					},
  1868  					Resources: &linkerd2.Resources{
  1869  						CPU: linkerd2.Constraints{
  1870  							Limit:   "100m",
  1871  							Request: "10m",
  1872  						},
  1873  						Memory: linkerd2.Constraints{
  1874  							Limit:   "50Mi",
  1875  							Request: "10Mi",
  1876  						},
  1877  					},
  1878  					XTMountPath: &linkerd2.VolumeMountPath{
  1879  						MountPath: "/run",
  1880  						Name:      "linkerd-proxy-init-xtables-lock",
  1881  					},
  1882  				},
  1883  				ControllerReplicas: 1,
  1884  			},
  1885  			nil,
  1886  		},
  1887  		{
  1888  			[]string{`
  1889  kind: ConfigMap
  1890  apiVersion: v1
  1891  metadata:
  1892    name: linkerd-config
  1893    namespace: linkerd
  1894  data:
  1895    global: |
  1896      {"linkerdNamespace":"linkerd","cniEnabled":false,"version":"install-control-plane-version","identityContext":{"trustDomain":"cluster.local","trustAnchorsPem":"fake-trust-anchors-pem","issuanceLifetime":"86400s","clockSkewAllowance":"20s"}}
  1897    proxy: |
  1898      {"proxyImage":{"imageName":"cr.l5d.io/linkerd/proxy","pullPolicy":"IfNotPresent"},"proxyInitImage":{"imageName":"cr.l5d.io/linkerd/proxy-init","pullPolicy":"IfNotPresent"},"controlPort":{"port":4190},"ignoreInboundPorts":[],"ignoreOutboundPorts":[],"inboundPort":{"port":4143},"adminPort":{"port":4191},"outboundPort":{"port":4140},"resource":{"requestCpu":"","requestMemory":"","limitCpu":"","limitMemory":""},"proxyUid":"2102","proxyGid":"2102","logLevel":{"level":"warn,linkerd=info"},"disableExternalProfiles":true,"proxyVersion":"install-proxy-version","proxy_init_image_version":"v2.3.0","debugImage":{"imageName":"cr.l5d.io/linkerd/debug","pullPolicy":"IfNotPresent"},"debugImageVersion":"install-debug-version"}
  1899    install: |
  1900      {"cliVersion":"dev-undefined","flags":[]}
  1901    values: |
  1902      controllerImage: ControllerImage
  1903      controllerReplicas: 1
  1904      controllerUID: 2103
  1905      controllerGID: 2103
  1906      debugContainer: null
  1907      destinationProxyResources: null
  1908      destinationResources: null
  1909      disableHeartBeat: false
  1910      enableH2Upgrade: true
  1911      enablePodAntiAffinity: false
  1912      global:
  1913        cliVersion: CliVersion
  1914        clusterDomain: cluster.local
  1915        clusterNetworks: ClusterNetworks
  1916        cniEnabled: false
  1917        controlPlaneTracing: false
  1918        controllerLogLevel: ControllerLogLevel
  1919        enableEndpointSlices: false
  1920        highAvailability: false
  1921        imagePullPolicy: ImagePullPolicy
  1922        imagePullSecrets: null
  1923        linkerdVersion: ""
  1924        prometheusUrl: ""
  1925        proxy:
  1926          capabilities: null
  1927          component: linkerd-controller
  1928          disableTap: false
  1929          enableExternalProfiles: false
  1930          image:
  1931            name: ProxyImageName
  1932            pullPolicy: ImagePullPolicy
  1933            version: ProxyVersion
  1934          inboundConnectTimeout: ""
  1935          isGateway: false
  1936          logFormat: plain
  1937          logLevel: warn,linkerd=info
  1938          opaquePorts: ""
  1939          outboundConnectTimeout: ""
  1940          ports:
  1941            admin: 4191
  1942            control: 4190
  1943            inbound: 4143
  1944            outbound: 4140
  1945          requireIdentityOnInboundPorts: ""
  1946          resources: null
  1947          saMountPath: null
  1948          uid: 2102
  1949          gid: 2102
  1950          waitBeforeExitSeconds: 0
  1951          workloadKind: deployment
  1952        proxyContainerName: ProxyContainerName
  1953        proxyInit:
  1954          capabilities: null
  1955          closeWaitTimeoutSecs: 0
  1956          ignoreInboundPorts: ""
  1957          ignoreOutboundPorts: ""
  1958          image:
  1959            name: ProxyInitImageName
  1960            pullPolicy: ImagePullPolicy
  1961            version: ProxyInitVersion
  1962          resources:
  1963            cpu:
  1964              limit: 100m
  1965              request: 10m
  1966            memory:
  1967              limit: 50Mi
  1968              request: 10Mi
  1969          saMountPath: null
  1970          xtMountPath:
  1971            mountPath: /run
  1972            name: linkerd-proxy-init-xtables-lock
  1973            readOnly: false
  1974      heartbeatResources: null
  1975      heartbeatSchedule: ""
  1976      identityProxyResources: null
  1977      identityResources: null
  1978      nodeSelector:
  1979        kubernetes.io/os: linux
  1980      proxyInjectorProxyResources: null
  1981      proxyInjectorResources: null
  1982      stage: ""
  1983      tolerations: null
  1984      webhookFailurePolicy: WebhookFailurePolicy
  1985  `,
  1986  			},
  1987  			&linkerd2.Values{
  1988  				ControllerImage:      "ControllerImage",
  1989  				ControllerUID:        2103,
  1990  				ControllerGID:        2103,
  1991  				EnableH2Upgrade:      true,
  1992  				WebhookFailurePolicy: "WebhookFailurePolicy",
  1993  				NodeSelector:         defaultValues.NodeSelector,
  1994  				Tolerations:          defaultValues.Tolerations,
  1995  				ClusterDomain:        "cluster.local",
  1996  				ClusterNetworks:      "ClusterNetworks",
  1997  				ImagePullPolicy:      "ImagePullPolicy",
  1998  				CliVersion:           "CliVersion",
  1999  				ControllerLogLevel:   "ControllerLogLevel",
  2000  				ProxyContainerName:   "ProxyContainerName",
  2001  				CNIEnabled:           false,
  2002  				Proxy: &linkerd2.Proxy{
  2003  					Image: &linkerd2.Image{
  2004  						Name:       "ProxyImageName",
  2005  						PullPolicy: "ImagePullPolicy",
  2006  						Version:    "ProxyVersion",
  2007  					},
  2008  					LogLevel:  "warn,linkerd=info",
  2009  					LogFormat: "plain",
  2010  					Ports: &linkerd2.Ports{
  2011  						Admin:    4191,
  2012  						Control:  4190,
  2013  						Inbound:  4143,
  2014  						Outbound: 4140,
  2015  					},
  2016  					UID: 2102,
  2017  					GID: 2102,
  2018  				},
  2019  				ProxyInit: &linkerd2.ProxyInit{
  2020  					Image: &linkerd2.Image{
  2021  						Name:       "ProxyInitImageName",
  2022  						PullPolicy: "ImagePullPolicy",
  2023  						Version:    "ProxyInitVersion",
  2024  					},
  2025  					Resources: &linkerd2.Resources{
  2026  						CPU: linkerd2.Constraints{
  2027  							Limit:   "100m",
  2028  							Request: "10m",
  2029  						},
  2030  						Memory: linkerd2.Constraints{
  2031  							Limit:   "50Mi",
  2032  							Request: "10Mi",
  2033  						},
  2034  					},
  2035  					XTMountPath: &linkerd2.VolumeMountPath{
  2036  						MountPath: "/run",
  2037  						Name:      "linkerd-proxy-init-xtables-lock",
  2038  					},
  2039  				},
  2040  				ControllerReplicas: 1,
  2041  			},
  2042  			nil,
  2043  		},
  2044  	}
  2045  
  2046  	for i, tc := range testCases {
  2047  		tc := tc // pin
  2048  		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
  2049  			clientset, err := k8s.NewFakeAPI(tc.k8sConfigs...)
  2050  			if err != nil {
  2051  				t.Fatalf("Unexpected error: %s", err)
  2052  			}
  2053  
  2054  			_, values, err := FetchCurrentConfiguration(context.Background(), clientset, "linkerd")
  2055  			if diff := deep.Equal(err, tc.err); diff != nil {
  2056  				t.Fatalf("%+v", diff)
  2057  			}
  2058  			if diff := deep.Equal(values, tc.expected); diff != nil {
  2059  				t.Fatalf("%+v", diff)
  2060  			}
  2061  		})
  2062  	}
  2063  }
  2064  
  2065  func getFakeConfigMap(scheme string, issuerCerts *issuercerts.IssuerCertData) string {
  2066  	anchors, _ := json.Marshal(issuerCerts.TrustAnchors)
  2067  	return fmt.Sprintf(`
  2068  kind: ConfigMap
  2069  apiVersion: v1
  2070  metadata:
  2071    name: linkerd-config
  2072    namespace: linkerd
  2073  data:
  2074    values: |
  2075      namespace: linkerd
  2076      identityTrustAnchorsPEM: %s
  2077      identityTrustDomain: cluster.local
  2078      identity:
  2079        issuer:
  2080          scheme: %s
  2081  ---
  2082  `, anchors, scheme)
  2083  }
  2084  
  2085  func getFakeSecret(scheme string, issuerCerts *issuercerts.IssuerCertData) string {
  2086  	if scheme == k8s.IdentityIssuerSchemeLinkerd {
  2087  		return fmt.Sprintf(`
  2088  kind: Secret
  2089  apiVersion: v1
  2090  metadata:
  2091    name: linkerd-identity-issuer
  2092    namespace: linkerd
  2093  data:
  2094    crt.pem: %s
  2095    key.pem: %s
  2096  ---
  2097  `, base64.StdEncoding.EncodeToString([]byte(issuerCerts.IssuerCrt)), base64.StdEncoding.EncodeToString([]byte(issuerCerts.IssuerKey)))
  2098  	}
  2099  	return fmt.Sprintf(
  2100  		`
  2101  kind: Secret
  2102  apiVersion: v1
  2103  metadata:
  2104    name: linkerd-identity-issuer
  2105    namespace: linkerd
  2106  data:
  2107    ca.crt: %s
  2108    tls.crt: %s
  2109    tls.key: %s
  2110  ---
  2111  `, base64.StdEncoding.EncodeToString([]byte(issuerCerts.TrustAnchors)), base64.StdEncoding.EncodeToString([]byte(issuerCerts.IssuerCrt)), base64.StdEncoding.EncodeToString([]byte(issuerCerts.IssuerKey)))
  2112  }
  2113  
  2114  func createIssuerData(dnsName string, notBefore, notAfter time.Time) *issuercerts.IssuerCertData {
  2115  	// Generate a new root key.
  2116  	key, _ := tls.GenerateKey()
  2117  
  2118  	rootCa, _ := tls.CreateRootCA(dnsName, key, tls.Validity{
  2119  		Lifetime:  notAfter.Sub(notBefore),
  2120  		ValidFrom: &notBefore,
  2121  	})
  2122  
  2123  	return &issuercerts.IssuerCertData{
  2124  		TrustAnchors: rootCa.Cred.Crt.EncodeCertificatePEM(),
  2125  		IssuerCrt:    rootCa.Cred.Crt.EncodeCertificatePEM(),
  2126  		IssuerKey:    rootCa.Cred.EncodePrivateKeyPEM(),
  2127  	}
  2128  }
  2129  
  2130  type lifeSpan struct {
  2131  	starts time.Time
  2132  	ends   time.Time
  2133  }
  2134  
  2135  func runIdentityCheckTestCase(ctx context.Context, t *testing.T, testID int, testDescription string, checkerToTest string, fakeConfigMap string, fakeSecret string, expectedOutput []string) {
  2136  	t.Run(fmt.Sprintf("%d/%s", testID, testDescription), func(t *testing.T) {
  2137  		hc := NewHealthChecker(
  2138  			[]CategoryID{},
  2139  			&Options{
  2140  				DataPlaneNamespace: "linkerd",
  2141  			},
  2142  		)
  2143  		hc.addCheckAsCategory("linkerd-identity-test-cat", LinkerdIdentity, checkerToTest)
  2144  		var err error
  2145  		hc.ControlPlaneNamespace = "linkerd"
  2146  		hc.kubeAPI, err = k8s.NewFakeAPI(fakeConfigMap, fakeSecret)
  2147  		_, hc.linkerdConfig, _ = hc.checkLinkerdConfigConfigMap(ctx)
  2148  
  2149  		if testDescription != "certificate config is valid" {
  2150  			hc.issuerCert, hc.trustAnchors, _ = hc.checkCertificatesConfig(ctx)
  2151  		}
  2152  
  2153  		if err != nil {
  2154  			t.Fatalf("Unexpected error: %s", err)
  2155  		}
  2156  
  2157  		obs := newObserver()
  2158  		hc.RunChecks(obs.resultFn)
  2159  		if diff := deep.Equal(obs.results, expectedOutput); diff != nil {
  2160  			t.Fatalf("%+v", diff)
  2161  		}
  2162  	})
  2163  }
  2164  
  2165  func TestLinkerdIdentityCheckCertConfig(t *testing.T) {
  2166  	var testCases = []struct {
  2167  		checkDescription            string
  2168  		tlsSecretScheme             string
  2169  		schemeInConfig              string
  2170  		expectedOutput              []string
  2171  		configMapIssuerDataModifier func(issuercerts.IssuerCertData) issuercerts.IssuerCertData
  2172  		tlsSecretIssuerDataModifier func(issuercerts.IssuerCertData) issuercerts.IssuerCertData
  2173  	}{
  2174  		{
  2175  			checkDescription: "works with valid cert and linkerd.io/tls secret",
  2176  			tlsSecretScheme:  k8s.IdentityIssuerSchemeLinkerd,
  2177  			schemeInConfig:   k8s.IdentityIssuerSchemeLinkerd,
  2178  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid"},
  2179  		},
  2180  		{
  2181  			checkDescription: "works with valid cert and kubernetes.io/tls secret",
  2182  			tlsSecretScheme:  string(corev1.SecretTypeTLS),
  2183  			schemeInConfig:   string(corev1.SecretTypeTLS),
  2184  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid"},
  2185  		},
  2186  		{
  2187  			checkDescription: "works if config scheme is empty and secret scheme is linkerd.io/tls (pre 2.7)",
  2188  			tlsSecretScheme:  k8s.IdentityIssuerSchemeLinkerd,
  2189  			schemeInConfig:   "",
  2190  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid"},
  2191  		},
  2192  		{
  2193  			checkDescription: "fails if config scheme is empty and secret scheme is kubernetes.io/tls (pre 2.7)",
  2194  			tlsSecretScheme:  string(corev1.SecretTypeTLS),
  2195  			schemeInConfig:   "",
  2196  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid: key crt.pem containing the issuer certificate needs to exist in secret linkerd-identity-issuer if --identity-external-issuer=false"},
  2197  		},
  2198  		{
  2199  			checkDescription: "fails when config scheme is linkerd.io/tls but secret scheme is kubernetes.io/tls in config is different than the one in the issuer secret",
  2200  			tlsSecretScheme:  string(corev1.SecretTypeTLS),
  2201  			schemeInConfig:   k8s.IdentityIssuerSchemeLinkerd,
  2202  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid: key crt.pem containing the issuer certificate needs to exist in secret linkerd-identity-issuer if --identity-external-issuer=false"},
  2203  		},
  2204  		{
  2205  			checkDescription: "fails when config scheme is kubernetes.io/tls but secret scheme is linkerd.io/tls in config is different than the one in the issuer secret",
  2206  			tlsSecretScheme:  k8s.IdentityIssuerSchemeLinkerd,
  2207  			schemeInConfig:   string(corev1.SecretTypeTLS),
  2208  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid: key ca.crt containing the trust anchors needs to exist in secret linkerd-identity-issuer if --identity-external-issuer=true"},
  2209  		},
  2210  		{
  2211  			checkDescription: "fails when trying to parse trust anchors from secret (extra newline in secret)",
  2212  			tlsSecretScheme:  string(corev1.SecretTypeTLS),
  2213  			schemeInConfig:   string(corev1.SecretTypeTLS),
  2214  			expectedOutput:   []string{"linkerd-identity-test-cat certificate config is valid: not a PEM certificate"},
  2215  			tlsSecretIssuerDataModifier: func(issuerData issuercerts.IssuerCertData) issuercerts.IssuerCertData {
  2216  				issuerData.TrustAnchors += "\n"
  2217  				return issuerData
  2218  			},
  2219  		},
  2220  	}
  2221  
  2222  	for id, testCase := range testCases {
  2223  		testCase := testCase
  2224  		issuerData := createIssuerData("identity.linkerd.cluster.local", time.Now().AddDate(-1, 0, 0), time.Now().AddDate(1, 0, 0))
  2225  		var fakeConfigMap string
  2226  		if testCase.configMapIssuerDataModifier != nil {
  2227  			modifiedIssuerData := testCase.configMapIssuerDataModifier(*issuerData)
  2228  			fakeConfigMap = getFakeConfigMap(testCase.schemeInConfig, &modifiedIssuerData)
  2229  		} else {
  2230  			fakeConfigMap = getFakeConfigMap(testCase.schemeInConfig, issuerData)
  2231  		}
  2232  
  2233  		var fakeSecret string
  2234  		if testCase.tlsSecretIssuerDataModifier != nil {
  2235  			modifiedIssuerData := testCase.tlsSecretIssuerDataModifier(*issuerData)
  2236  			fakeSecret = getFakeSecret(testCase.tlsSecretScheme, &modifiedIssuerData)
  2237  		} else {
  2238  			fakeSecret = getFakeSecret(testCase.tlsSecretScheme, issuerData)
  2239  		}
  2240  		runIdentityCheckTestCase(context.Background(), t, id, testCase.checkDescription, "certificate config is valid", fakeConfigMap, fakeSecret, testCase.expectedOutput)
  2241  	}
  2242  }
  2243  
  2244  func TestLinkerdIdentityCheckCertValidity(t *testing.T) {
  2245  	var testCases = []struct {
  2246  		checkDescription string
  2247  		checkerToTest    string
  2248  		lifespan         *lifeSpan
  2249  		expectedOutput   []string
  2250  	}{
  2251  		{
  2252  			checkerToTest:    "trust anchors are within their validity period",
  2253  			checkDescription: "fails when the only anchor is not valid yet",
  2254  			lifespan: &lifeSpan{
  2255  				starts: time.Date(2100, 1, 1, 1, 1, 1, 1, time.UTC),
  2256  				ends:   time.Date(2101, 1, 1, 1, 1, 1, 1, time.UTC),
  2257  			},
  2258  			expectedOutput: []string{"linkerd-identity-test-cat trust anchors are within their validity period: Invalid anchors:\n\t* 1 identity.linkerd.cluster.local not valid before: 2100-01-01T01:00:51Z"},
  2259  		},
  2260  		{
  2261  			checkerToTest:    "trust anchors are within their validity period",
  2262  			checkDescription: "fails when the only trust anchor is expired",
  2263  			lifespan: &lifeSpan{
  2264  				starts: time.Date(1989, 1, 1, 1, 1, 1, 1, time.UTC),
  2265  				ends:   time.Date(1990, 1, 1, 1, 1, 1, 1, time.UTC),
  2266  			},
  2267  			expectedOutput: []string{"linkerd-identity-test-cat trust anchors are within their validity period: Invalid anchors:\n\t* 1 identity.linkerd.cluster.local not valid anymore. Expired on 1990-01-01T01:01:11Z"},
  2268  		},
  2269  		{
  2270  			checkerToTest:    "issuer cert is within its validity period",
  2271  			checkDescription: "fails when the issuer cert is not valid yet",
  2272  			lifespan: &lifeSpan{
  2273  				starts: time.Date(2100, 1, 1, 1, 1, 1, 1, time.UTC),
  2274  				ends:   time.Date(2101, 1, 1, 1, 1, 1, 1, time.UTC),
  2275  			},
  2276  			expectedOutput: []string{"linkerd-identity-test-cat issuer cert is within its validity period: issuer certificate is not valid before: 2100-01-01T01:00:51Z"},
  2277  		},
  2278  		{
  2279  			checkerToTest:    "issuer cert is within its validity period",
  2280  			checkDescription: "fails when the issuer cert is expired",
  2281  			lifespan: &lifeSpan{
  2282  				starts: time.Date(1989, 1, 1, 1, 1, 1, 1, time.UTC),
  2283  				ends:   time.Date(1990, 1, 1, 1, 1, 1, 1, time.UTC),
  2284  			},
  2285  			expectedOutput: []string{"linkerd-identity-test-cat issuer cert is within its validity period: issuer certificate is not valid anymore. Expired on 1990-01-01T01:01:11Z"},
  2286  		},
  2287  	}
  2288  
  2289  	for id, testCase := range testCases {
  2290  		testCase := testCase
  2291  		issuerData := createIssuerData("identity.linkerd.cluster.local", testCase.lifespan.starts, testCase.lifespan.ends)
  2292  		fakeConfigMap := getFakeConfigMap(k8s.IdentityIssuerSchemeLinkerd, issuerData)
  2293  		fakeSecret := getFakeSecret(k8s.IdentityIssuerSchemeLinkerd, issuerData)
  2294  		runIdentityCheckTestCase(context.Background(), t, id, testCase.checkDescription, testCase.checkerToTest, fakeConfigMap, fakeSecret, testCase.expectedOutput)
  2295  	}
  2296  }
  2297  
  2298  type fakeCniResourcesOpts struct {
  2299  	hasConfigMap          bool
  2300  	hasClusterRole        bool
  2301  	hasClusterRoleBinding bool
  2302  	hasServiceAccount     bool
  2303  	hasDaemonSet          bool
  2304  	scheduled             int
  2305  	ready                 int
  2306  }
  2307  
  2308  func getFakeCniResources(opts fakeCniResourcesOpts) []string {
  2309  	var resources []string
  2310  
  2311  	if opts.hasConfigMap {
  2312  		resources = append(resources, `
  2313  kind: ConfigMap
  2314  apiVersion: v1
  2315  metadata:
  2316    name: linkerd-cni-config
  2317    namespace: test-ns
  2318    labels:
  2319      linkerd.io/cni-resource: "true"
  2320  data:
  2321    dest_cni_net_dir: "/etc/cni/net.d"
  2322  ---
  2323  `)
  2324  	}
  2325  
  2326  	if opts.hasClusterRole {
  2327  		resources = append(resources, `
  2328  kind: ClusterRole
  2329  apiVersion: rbac.authorization.k8s.io/v1
  2330  metadata:
  2331    name: linkerd-cni
  2332    labels:
  2333      linkerd.io/cni-resource: "true"
  2334  rules:
  2335  - apiGroups: [""]
  2336    resources: ["pods", "nodes", "namespaces"]
  2337    verbs: ["list", "get", "watch"]
  2338  ---
  2339  `)
  2340  	}
  2341  
  2342  	if opts.hasClusterRoleBinding {
  2343  		resources = append(resources, `
  2344  apiVersion: rbac.authorization.k8s.io/v1
  2345  kind: ClusterRoleBinding
  2346  metadata:
  2347    name: linkerd-cni
  2348    labels:
  2349      linkerd.io/cni-resource: "true"
  2350  roleRef:
  2351    apiGroup: rbac.authorization.k8s.io
  2352    kind: ClusterRole
  2353    name: linkerd-cni
  2354  subjects:
  2355  - kind: ServiceAccount
  2356    name: linkerd-cni
  2357    namespace: test-ns
  2358  ---
  2359  `)
  2360  	}
  2361  
  2362  	if opts.hasServiceAccount {
  2363  		resources = append(resources, `
  2364  apiVersion: v1
  2365  kind: ServiceAccount
  2366  metadata:
  2367    name: linkerd-cni
  2368    namespace: test-ns
  2369    labels:
  2370      linkerd.io/cni-resource: "true"
  2371  ---
  2372  `)
  2373  	}
  2374  
  2375  	if opts.hasDaemonSet {
  2376  		resources = append(resources, fmt.Sprintf(`
  2377  kind: DaemonSet
  2378  apiVersion: apps/v1
  2379  metadata:
  2380    name: linkerd-cni
  2381    namespace: test-ns
  2382    labels:
  2383      k8s-app: linkerd-cni
  2384      linkerd.io/cni-resource: "true"
  2385    annotations:
  2386      linkerd.io/created-by: linkerd/cli git-b4266c93
  2387  spec:
  2388    selector:
  2389      matchLabels:
  2390        k8s-app: linkerd-cni
  2391    updateStrategy:
  2392      type: RollingUpdate
  2393      rollingUpdate:
  2394        maxUnavailable: 1
  2395    template:
  2396      metadata:
  2397        labels:
  2398          k8s-app: linkerd-cni
  2399        annotations:
  2400          linkerd.io/created-by: linkerd/cli git-b4266c93
  2401      spec:
  2402        nodeSelector:
  2403          kubernetes.io/os: linux
  2404        serviceAccountName: linkerd-cni
  2405        containers:
  2406        - name: install-cni
  2407          image: cr.l5d.io/linkerd/cni-plugin:v1.5.0
  2408          env:
  2409          - name: DEST_CNI_NET_DIR
  2410            valueFrom:
  2411              configMapKeyRef:
  2412                name: linkerd-cni-config
  2413                key: dest_cni_net_dir
  2414          - name: DEST_CNI_BIN_DIR
  2415            valueFrom:
  2416              configMapKeyRef:
  2417                name: linkerd-cni-config
  2418                key: dest_cni_bin_dir
  2419          - name: CNI_NETWORK_CONFIG
  2420            valueFrom:
  2421              configMapKeyRef:
  2422                name: linkerd-cni-config
  2423                key: cni_network_config
  2424          - name: SLEEP
  2425            value: "true"
  2426          lifecycle:
  2427            preStop:
  2428              exec:
  2429                command: ["kill","-15","1"]
  2430          volumeMounts:
  2431          - mountPath: /host/opt/cni/bin
  2432            name: cni-bin-dir
  2433          - mountPath: /host/etc/cni/net.d
  2434            name: cni-net-dir
  2435        volumes:
  2436        - name: cni-bin-dir
  2437          hostPath:
  2438            path: /opt/cni/bin
  2439        - name: cni-net-dir
  2440          hostPath:
  2441            path: /etc/cni/net.d
  2442  status:
  2443    desiredNumberScheduled: %d
  2444    numberReady: %d
  2445  ---
  2446  `, opts.scheduled, opts.ready))
  2447  	}
  2448  
  2449  	return resources
  2450  
  2451  }
  2452  
  2453  func TestCniChecks(t *testing.T) {
  2454  	testCases := []struct {
  2455  		description  string
  2456  		testCaseOpts fakeCniResourcesOpts
  2457  		results      []string
  2458  	}{
  2459  		{
  2460  			"fails when there is no config map",
  2461  			fakeCniResourcesOpts{},
  2462  			[]string{"linkerd-cni-plugin cni plugin ConfigMap exists: configmaps \"linkerd-cni-config\" not found"},
  2463  		},
  2464  		{
  2465  			"fails then there is no ClusterRole",
  2466  			fakeCniResourcesOpts{hasConfigMap: true},
  2467  			[]string{
  2468  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2469  				"linkerd-cni-plugin cni plugin ClusterRole exists: missing ClusterRole: linkerd-cni"},
  2470  		},
  2471  		{
  2472  			"fails then there is no ClusterRoleBinding",
  2473  			fakeCniResourcesOpts{hasConfigMap: true, hasClusterRole: true},
  2474  			[]string{
  2475  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2476  				"linkerd-cni-plugin cni plugin ClusterRole exists",
  2477  				"linkerd-cni-plugin cni plugin ClusterRoleBinding exists: missing ClusterRoleBinding: linkerd-cni"},
  2478  		},
  2479  		{
  2480  			"fails then there is no ServiceAccount",
  2481  			fakeCniResourcesOpts{hasConfigMap: true, hasClusterRole: true, hasClusterRoleBinding: true},
  2482  			[]string{
  2483  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2484  				"linkerd-cni-plugin cni plugin ClusterRole exists",
  2485  				"linkerd-cni-plugin cni plugin ClusterRoleBinding exists",
  2486  				"linkerd-cni-plugin cni plugin ServiceAccount exists: missing ServiceAccount: linkerd-cni",
  2487  			},
  2488  		},
  2489  		{
  2490  			"fails then there is no DaemonSet",
  2491  			fakeCniResourcesOpts{hasConfigMap: true, hasClusterRole: true, hasClusterRoleBinding: true, hasServiceAccount: true},
  2492  			[]string{
  2493  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2494  				"linkerd-cni-plugin cni plugin ClusterRole exists",
  2495  				"linkerd-cni-plugin cni plugin ClusterRoleBinding exists",
  2496  				"linkerd-cni-plugin cni plugin ServiceAccount exists",
  2497  				"linkerd-cni-plugin cni plugin DaemonSet exists: missing DaemonSet: linkerd-cni",
  2498  			},
  2499  		},
  2500  		{
  2501  			"fails then there is nodes are not ready",
  2502  			fakeCniResourcesOpts{hasConfigMap: true, hasClusterRole: true, hasClusterRoleBinding: true, hasServiceAccount: true, hasDaemonSet: true, scheduled: 5, ready: 4},
  2503  			[]string{
  2504  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2505  				"linkerd-cni-plugin cni plugin ClusterRole exists",
  2506  				"linkerd-cni-plugin cni plugin ClusterRoleBinding exists",
  2507  				"linkerd-cni-plugin cni plugin ServiceAccount exists",
  2508  				"linkerd-cni-plugin cni plugin DaemonSet exists",
  2509  				"linkerd-cni-plugin cni plugin pod is running on all nodes: number ready: 4, number scheduled: 5",
  2510  			},
  2511  		},
  2512  		{
  2513  			"fails then there is nodes are not ready",
  2514  			fakeCniResourcesOpts{hasConfigMap: true, hasClusterRole: true, hasClusterRoleBinding: true, hasServiceAccount: true, hasDaemonSet: true, scheduled: 5, ready: 5},
  2515  			[]string{
  2516  				"linkerd-cni-plugin cni plugin ConfigMap exists",
  2517  				"linkerd-cni-plugin cni plugin ClusterRole exists",
  2518  				"linkerd-cni-plugin cni plugin ClusterRoleBinding exists",
  2519  				"linkerd-cni-plugin cni plugin ServiceAccount exists",
  2520  				"linkerd-cni-plugin cni plugin DaemonSet exists",
  2521  				"linkerd-cni-plugin cni plugin pod is running on all nodes",
  2522  			},
  2523  		},
  2524  	}
  2525  
  2526  	for _, tc := range testCases {
  2527  		tc := tc // pin
  2528  		t.Run(tc.description, func(t *testing.T) {
  2529  			hc := NewHealthChecker(
  2530  				[]CategoryID{LinkerdCNIPluginChecks},
  2531  				&Options{
  2532  					CNINamespace: "test-ns",
  2533  				},
  2534  			)
  2535  
  2536  			k8sConfigs := getFakeCniResources(tc.testCaseOpts)
  2537  			var err error
  2538  			hc.kubeAPI, err = k8s.NewFakeAPI(k8sConfigs...)
  2539  			hc.CNIEnabled = true
  2540  			if err != nil {
  2541  				t.Fatalf("Unexpected error: %s", err)
  2542  			}
  2543  
  2544  			obs := newObserver()
  2545  			hc.RunChecks(obs.resultFn)
  2546  			if diff := deep.Equal(obs.results, tc.results); diff != nil {
  2547  				t.Fatalf("%+v", diff)
  2548  			}
  2549  		})
  2550  	}
  2551  
  2552  }
  2553  
  2554  func TestMinReplicaCheck(t *testing.T) {
  2555  	hc := NewHealthChecker(
  2556  		[]CategoryID{LinkerdHAChecks},
  2557  		&Options{
  2558  			ControlPlaneNamespace: "linkerd",
  2559  		},
  2560  	)
  2561  
  2562  	var err error
  2563  
  2564  	testCases := []struct {
  2565  		controlPlaneResourceDefs []string
  2566  		expected                 error
  2567  	}{
  2568  		{
  2569  			controlPlaneResourceDefs: generateAllControlPlaneDef(&controlPlaneReplicaOptions{
  2570  				destination:   1,
  2571  				identity:      3,
  2572  				proxyInjector: 3,
  2573  				tap:           3,
  2574  			}, t),
  2575  			expected: fmt.Errorf("not enough replicas available for [linkerd-destination]"),
  2576  		},
  2577  		{
  2578  			controlPlaneResourceDefs: generateAllControlPlaneDef(&controlPlaneReplicaOptions{
  2579  				destination:   2,
  2580  				identity:      1,
  2581  				proxyInjector: 1,
  2582  				tap:           3,
  2583  			}, t),
  2584  			expected: fmt.Errorf("not enough replicas available for [linkerd-identity linkerd-proxy-injector]"),
  2585  		},
  2586  		{
  2587  			controlPlaneResourceDefs: generateAllControlPlaneDef(&controlPlaneReplicaOptions{
  2588  				destination:   2,
  2589  				identity:      2,
  2590  				proxyInjector: 3,
  2591  				tap:           3,
  2592  			}, t),
  2593  			expected: nil,
  2594  		},
  2595  	}
  2596  
  2597  	for i, tc := range testCases {
  2598  		tc := tc // pin
  2599  		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
  2600  			hc.kubeAPI, err = k8s.NewFakeAPI(tc.controlPlaneResourceDefs...)
  2601  			if err != nil {
  2602  				t.Fatal(err)
  2603  			}
  2604  			err = hc.checkMinReplicasAvailable(context.Background())
  2605  			if err == nil && tc.expected != nil {
  2606  				t.Log("Expected error: nil")
  2607  				t.Logf("Received error: %s\n", err)
  2608  				t.Fatal("test case failed")
  2609  			}
  2610  			if err != nil {
  2611  				if err.Error() != tc.expected.Error() {
  2612  					t.Logf("Expected error: %s\n", tc.expected)
  2613  					t.Logf("Received error: %s\n", err)
  2614  					t.Fatal("test case failed")
  2615  				}
  2616  			}
  2617  		})
  2618  	}
  2619  }
  2620  
  2621  func TestCheckOpaquePortAnnotations(t *testing.T) {
  2622  	hc := NewHealthChecker(
  2623  		[]CategoryID{LinkerdOpaquePortsDefinitionChecks},
  2624  		&Options{
  2625  			DataPlaneNamespace: "test-ns",
  2626  		},
  2627  	)
  2628  
  2629  	var err error
  2630  
  2631  	var testCases = []struct {
  2632  		resources []string
  2633  		expected  error
  2634  	}{
  2635  		{
  2636  			resources: []string{`
  2637  apiVersion: v1
  2638  kind: Service
  2639  metadata:
  2640    name: svc
  2641    namespace: test-ns
  2642    annotations:
  2643      config.linkerd.io/opaque-ports: "9200"
  2644  spec:
  2645    selector:
  2646      app: test
  2647    ports:
  2648    - name: test
  2649      port: 9200
  2650      targetPort: 9200
  2651  `,
  2652  				`
  2653  apiVersion: v1
  2654  kind: Pod
  2655  metadata:
  2656    name: pod
  2657    namespace: test-ns
  2658    labels:
  2659      app: test
  2660    annotations:
  2661      config.linkerd.io/opaque-ports: "9200"
  2662  spec:
  2663    containers:
  2664    - name: test
  2665      image: test
  2666      ports:
  2667      - name: test
  2668        containerPort: 9200
  2669  `,
  2670  				`
  2671  apiVersion: v1
  2672  kind: Endpoints
  2673  metadata:
  2674    name: svc
  2675    namespace: test-ns
  2676  subsets:
  2677  - addresses:
  2678    - ip: 10.244.3.12
  2679      nodeName: nod
  2680      targetRef:
  2681        kind: Pod
  2682        name: pod
  2683        namespace: test-ns
  2684    ports:
  2685    - name: test
  2686      port: 9200
  2687      protocol: TCP
  2688  `,
  2689  			},
  2690  		},
  2691  		{
  2692  			resources: []string{`
  2693  apiVersion: v1
  2694  kind: Service
  2695  metadata:
  2696    name: svc
  2697    namespace: test-ns
  2698  spec:
  2699    selector:
  2700      app: test
  2701    ports:
  2702    - name: http
  2703      port: 9200
  2704      targetPort: 9200
  2705  `,
  2706  				`
  2707  apiVersion: v1
  2708  kind: Pod
  2709  metadata:
  2710    name: pod
  2711    namespace: test-ns
  2712    labels:
  2713      app: test
  2714    annotations:
  2715      config.linkerd.io/opaque-ports: "9200"
  2716  spec:
  2717    containers:
  2718    - name: test
  2719      image: test
  2720      ports:
  2721      - name: test
  2722        containerPort: 9200
  2723  `,
  2724  				`
  2725  apiVersion: v1
  2726  kind: Endpoints
  2727  metadata:
  2728    name: svc
  2729    namespace: test-ns
  2730  subsets:
  2731  - addresses:
  2732    - ip: 10.244.3.12
  2733      nodeName: nod
  2734      targetRef:
  2735        kind: Pod
  2736        name: pod
  2737        namespace: test-ns
  2738    ports:
  2739    - name: test
  2740      port: 9200
  2741      protocol: TCP
  2742  `,
  2743  			},
  2744  			expected: fmt.Errorf("\t* service svc targets the opaque port 9200 through 9200; add 9200 to its config.linkerd.io/opaque-ports annotation"),
  2745  		},
  2746  		{
  2747  			resources: []string{`
  2748  apiVersion: v1
  2749  kind: Service
  2750  metadata:
  2751    name: svc
  2752    namespace: test-ns
  2753    annotations:
  2754      config.linkerd.io/opaque-ports: "9200"
  2755  spec:
  2756    selector:
  2757      app: test
  2758    ports:
  2759    - name: test
  2760      port: 9200
  2761      targetPort: 9200
  2762  `,
  2763  				`
  2764  apiVersion: v1
  2765  kind: Pod
  2766  metadata:
  2767    name: pod
  2768    namespace: test-ns
  2769    labels:
  2770      app: test
  2771  spec:
  2772    containers:
  2773    - name: test
  2774      image: test
  2775      ports:
  2776      - name: test
  2777        containerPort: 9200
  2778  `,
  2779  				`
  2780  apiVersion: v1
  2781  kind: Endpoints
  2782  metadata:
  2783    name: svc
  2784    namespace: test-ns
  2785  subsets:
  2786  - addresses:
  2787    - ip: 10.244.3.12
  2788      nodeName: nod
  2789      targetRef:
  2790        kind: Pod
  2791        name: pod
  2792        namespace: test-ns
  2793    ports:
  2794    - name: test
  2795      port: 9200
  2796      protocol: TCP
  2797  `,
  2798  			},
  2799  			expected: fmt.Errorf("\t* service svc expects target port 9200 to be opaque; add it to pod pod config.linkerd.io/opaque-ports annotation"),
  2800  		},
  2801  		{
  2802  			resources: []string{`
  2803  apiVersion: v1
  2804  kind: Service
  2805  metadata:
  2806    name: svc
  2807    namespace: test-ns
  2808    annotations:
  2809      config.linkerd.io/opaque-ports: "9200"
  2810  spec:
  2811    selector:
  2812      app: test
  2813    ports:
  2814      - name: test
  2815        port: 9200
  2816        targetPort: 9200
  2817  `,
  2818  				`
  2819  apiVersion: v1
  2820  kind: Pod
  2821  metadata:
  2822    name: pod
  2823    namespace: test-ns
  2824    labels:
  2825      app: test
  2826    annotations:
  2827      config.linkerd.io/opaque-ports: "9300"
  2828  spec:
  2829    containers:
  2830    - name: test
  2831      image: test
  2832      ports:
  2833      - name: test
  2834        containerPort: 9300
  2835  `,
  2836  				`
  2837  apiVersion: v1
  2838  kind: Endpoints
  2839  metadata:
  2840    name: svc
  2841    namespace: test-ns
  2842  subsets:
  2843  - addresses:
  2844    - ip: 10.244.3.12
  2845      nodeName: node
  2846      targetRef:
  2847        kind: Pod
  2848        name: pod
  2849        namespace: test-ns
  2850    ports:
  2851    - name: test
  2852      port: 9200
  2853      protocol: TCP
  2854  `,
  2855  			},
  2856  		},
  2857  		{
  2858  			resources: []string{`
  2859  apiVersion: v1
  2860  kind: Service
  2861  metadata:
  2862    name: svc
  2863    namespace: test-ns
  2864  spec:
  2865    selector:
  2866      app: test
  2867    ports:
  2868    - name: test
  2869      port: 1002
  2870      targetPort: 2002
  2871  `,
  2872  				`
  2873  apiVersion: v1
  2874  kind: Pod
  2875  metadata:
  2876    name: pod
  2877    namespace: test-ns
  2878    annotations:
  2879      config.linkerd.io/opaque-ports: "2002"
  2880    labels:
  2881      app: test
  2882  spec:
  2883    containers:
  2884    - name: test
  2885      image: test
  2886      ports:
  2887      - name: test
  2888        containerPort: 2002
  2889  `,
  2890  				`
  2891  apiVersion: v1
  2892  kind: Endpoints
  2893  metadata:
  2894    name: svc
  2895    namespace: test-ns
  2896  subsets:
  2897  - addresses:
  2898    - ip: 10.42.0.111
  2899      nodeName: node
  2900      targetRef:
  2901        kind: Pod
  2902        name: pod
  2903    ports:
  2904    - name: test
  2905      port: 2002
  2906      protocol: TCP
  2907  `,
  2908  			},
  2909  			expected: fmt.Errorf("\t* service svc targets the opaque port 2002 through 1002; add 1002 to its config.linkerd.io/opaque-ports annotation"),
  2910  		},
  2911  		{
  2912  			resources: []string{`
  2913  apiVersion: v1
  2914  kind: Service
  2915  metadata:
  2916    name: svc
  2917    namespace: test-ns
  2918  spec:
  2919    selector:
  2920      app: test
  2921    ports:
  2922    - name: test
  2923      port: 1003
  2924      targetPort: pod-test
  2925  `,
  2926  				`
  2927  apiVersion: v1
  2928  kind: Pod
  2929  metadata:
  2930    name: pod
  2931    namespace: test-ns
  2932    annotations:
  2933      config.linkerd.io/opaque-ports: "2003"
  2934    labels:
  2935      app: test
  2936  spec:
  2937    containers:
  2938    - name: test
  2939      image: test
  2940      ports:
  2941      - name: pod-test
  2942        containerPort: 2003
  2943  `,
  2944  				`
  2945  apiVersion: v1
  2946  kind: Endpoints
  2947  metadata:
  2948    name: svc
  2949    namespace: test-ns
  2950  subsets:
  2951  - addresses:
  2952    - ip: 10.42.0.112
  2953      nodeName: node
  2954      targetRef:
  2955        kind: Pod
  2956        name: pod
  2957    ports:
  2958    - name: test
  2959      port: 2003
  2960      protocol: TCP
  2961  `,
  2962  			},
  2963  			expected: fmt.Errorf("\t* service svc targets the opaque port pod-test through 1003; add 1003 to its config.linkerd.io/opaque-ports annotation"),
  2964  		},
  2965  		{
  2966  			resources: []string{`
  2967  apiVersion: v1
  2968  kind: Service
  2969  metadata:
  2970    name: svc
  2971    namespace: test-ns
  2972  spec:
  2973    selector:
  2974      app: test
  2975    ports:
  2976    - port: 80
  2977      targetPort: 6502
  2978  `,
  2979  				`
  2980  apiVersion: v1
  2981  kind: Pod
  2982  metadata:
  2983    name: pod
  2984    namespace: test-ns
  2985    annotations:
  2986      config.linkerd.io/opaque-ports: "5432"
  2987    labels:
  2988      app: test
  2989  spec:
  2990    containers:
  2991    - name: c1
  2992      image: test
  2993      ports:
  2994      - containerPort: 6502
  2995    - name: c2
  2996      image: test
  2997      ports:
  2998      - containerPort: 5432
  2999  `,
  3000  				`
  3001  apiVersion: v1
  3002  kind: Endpoints
  3003  metadata:
  3004    name: svc
  3005    namespace: test-ns
  3006  subsets:
  3007  - addresses:
  3008    - ip: 10.42.0.112
  3009      nodeName: node
  3010      targetRef:
  3011        kind: Pod
  3012        name: pod
  3013    ports:
  3014    - port: 6502
  3015      protocol: TCP
  3016    - port: 5432
  3017      protocol: TCP
  3018  `,
  3019  			},
  3020  			expected: nil,
  3021  		},
  3022  	}
  3023  
  3024  	for i, tc := range testCases {
  3025  		tc := tc // pin
  3026  		t.Run(fmt.Sprintf("%d", i), func(t *testing.T) {
  3027  			hc.kubeAPI, err = k8s.NewFakeAPI(tc.resources...)
  3028  			if err != nil {
  3029  				t.Fatalf("unexpected error: %s", err)
  3030  			}
  3031  			err = hc.checkMisconfiguredOpaquePortAnnotations(context.Background())
  3032  			if err == nil && tc.expected != nil {
  3033  				t.Fatalf("Expected check to fail with %s", tc.expected.Error())
  3034  			}
  3035  			if err != nil && tc.expected != nil {
  3036  				if err.Error() != tc.expected.Error() {
  3037  					t.Fatalf("Expected error: %s, received: %s", tc.expected, err)
  3038  				}
  3039  			}
  3040  			if err != nil && tc.expected == nil {
  3041  				t.Fatalf("Did not expect error but got: %s", err.Error())
  3042  			}
  3043  		})
  3044  	}
  3045  }
  3046  
  3047  type controlPlaneReplicaOptions struct {
  3048  	destination   int
  3049  	identity      int
  3050  	proxyInjector int
  3051  	tap           int
  3052  }
  3053  
  3054  func getSingleControlPlaneDef(component string, availableReplicas int) string {
  3055  	return fmt.Sprintf(`
  3056  apiVersion: apps/v1
  3057  kind: Deployment
  3058  metadata:
  3059    name: %s
  3060    namespace: linkerd
  3061  spec:
  3062    template:
  3063      spec:
  3064        containers:
  3065          - image: "hello-world"
  3066            name: test
  3067  status:
  3068    availableReplicas: %d`, component, availableReplicas)
  3069  }
  3070  
  3071  func generateAllControlPlaneDef(replicaOptions *controlPlaneReplicaOptions, t *testing.T) []string {
  3072  	resourceDefs := []string{}
  3073  	for _, component := range linkerdHAControlPlaneComponents {
  3074  		switch component {
  3075  		case "linkerd-destination":
  3076  			resourceDefs = append(resourceDefs, getSingleControlPlaneDef(component, replicaOptions.destination))
  3077  		case "linkerd-identity":
  3078  			resourceDefs = append(resourceDefs, getSingleControlPlaneDef(component, replicaOptions.identity))
  3079  		case "linkerd-proxy-injector":
  3080  			resourceDefs = append(resourceDefs, getSingleControlPlaneDef(component, replicaOptions.proxyInjector))
  3081  		case "linkerd-tap":
  3082  			resourceDefs = append(resourceDefs, getSingleControlPlaneDef(component, replicaOptions.tap))
  3083  		default:
  3084  			t.Fatal("Could not find the resource")
  3085  		}
  3086  	}
  3087  	return resourceDefs
  3088  }
  3089  
  3090  func multiappend(slices ...[]string) []string {
  3091  	res := []string{}
  3092  	for _, slice := range slices {
  3093  		res = append(res, slice...)
  3094  	}
  3095  	return res
  3096  }
  3097  

View as plain text