...

Source file src/k8s.io/kubernetes/pkg/kubelet/kubelet_node_status_test.go

Documentation: k8s.io/kubernetes/pkg/kubelet

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package kubelet
    18  
    19  import (
    20  	"context"
    21  	"encoding/json"
    22  	"fmt"
    23  	"net"
    24  	goruntime "runtime"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"sync/atomic"
    29  	"testing"
    30  	"time"
    31  
    32  	"github.com/stretchr/testify/assert"
    33  	"github.com/stretchr/testify/require"
    34  
    35  	cadvisorapi "github.com/google/cadvisor/info/v1"
    36  	"github.com/google/go-cmp/cmp"
    37  	v1 "k8s.io/api/core/v1"
    38  	apiequality "k8s.io/apimachinery/pkg/api/equality"
    39  	apierrors "k8s.io/apimachinery/pkg/api/errors"
    40  	"k8s.io/apimachinery/pkg/api/resource"
    41  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    42  	"k8s.io/apimachinery/pkg/runtime"
    43  	"k8s.io/apimachinery/pkg/util/rand"
    44  	"k8s.io/apimachinery/pkg/util/strategicpatch"
    45  	"k8s.io/apimachinery/pkg/util/uuid"
    46  	"k8s.io/apimachinery/pkg/util/wait"
    47  	clientset "k8s.io/client-go/kubernetes"
    48  	"k8s.io/client-go/kubernetes/fake"
    49  	"k8s.io/client-go/rest"
    50  	core "k8s.io/client-go/testing"
    51  	"k8s.io/component-base/version"
    52  	kubeletapis "k8s.io/kubelet/pkg/apis"
    53  	cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
    54  	"k8s.io/kubernetes/pkg/kubelet/cm"
    55  	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
    56  	"k8s.io/kubernetes/pkg/kubelet/nodestatus"
    57  	"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
    58  	kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
    59  	taintutil "k8s.io/kubernetes/pkg/util/taints"
    60  	"k8s.io/kubernetes/pkg/volume/util"
    61  	netutils "k8s.io/utils/net"
    62  )
    63  
    64  const (
    65  	maxImageTagsForTest = 20
    66  )
    67  
    68  // generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
    69  func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
    70  	// imageList is randomly generated image list
    71  	var imageList []kubecontainer.Image
    72  	for ; count > 0; count-- {
    73  		imageItem := kubecontainer.Image{
    74  			ID:       string(uuid.NewUUID()),
    75  			RepoTags: generateImageTags(),
    76  			Size:     rand.Int63nRange(minImgSize, maxImgSize+1),
    77  		}
    78  		imageList = append(imageList, imageItem)
    79  	}
    80  
    81  	expectedImageList := makeExpectedImageList(imageList, maxImages)
    82  	return imageList, expectedImageList
    83  }
    84  
    85  func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
    86  	// expectedImageList is generated by imageList according to size and maxImages
    87  	// 1. sort the imageList by size
    88  	sort.Sort(sliceutils.ByImageSize(imageList))
    89  	// 2. convert sorted imageList to v1.ContainerImage list
    90  	var expectedImageList []v1.ContainerImage
    91  	for _, kubeImage := range imageList {
    92  		apiImage := v1.ContainerImage{
    93  			Names:     kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
    94  			SizeBytes: kubeImage.Size,
    95  		}
    96  
    97  		expectedImageList = append(expectedImageList, apiImage)
    98  	}
    99  	// 3. only returns the top maxImages images in expectedImageList
   100  	if maxImages == -1 { // -1 means no limit
   101  		return expectedImageList
   102  	}
   103  	return expectedImageList[0:maxImages]
   104  }
   105  
   106  func generateImageTags() []string {
   107  	var tagList []string
   108  	// Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
   109  	// that kubelet report up to MaxNamesPerImageInNodeStatus tags.
   110  	count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
   111  	for ; count > 0; count-- {
   112  		tagList = append(tagList, "registry.k8s.io:v"+strconv.Itoa(count))
   113  	}
   114  	return tagList
   115  }
   116  
   117  func applyNodeStatusPatch(originalNode *v1.Node, patch []byte) (*v1.Node, error) {
   118  	original, err := json.Marshal(originalNode)
   119  	if err != nil {
   120  		return nil, fmt.Errorf("failed to marshal original node %#v: %v", originalNode, err)
   121  	}
   122  	updated, err := strategicpatch.StrategicMergePatch(original, patch, v1.Node{})
   123  	if err != nil {
   124  		return nil, fmt.Errorf("failed to apply strategic merge patch %q on node %#v: %v",
   125  			patch, originalNode, err)
   126  	}
   127  	updatedNode := &v1.Node{}
   128  	if err := json.Unmarshal(updated, updatedNode); err != nil {
   129  		return nil, fmt.Errorf("failed to unmarshal updated node %q: %v", updated, err)
   130  	}
   131  	return updatedNode, nil
   132  }
   133  
   134  func notImplemented(action core.Action) (bool, runtime.Object, error) {
   135  	return true, nil, fmt.Errorf("no reaction implemented for %s", action)
   136  }
   137  
   138  func addNotImplatedReaction(kubeClient *fake.Clientset) {
   139  	if kubeClient == nil {
   140  		return
   141  	}
   142  
   143  	kubeClient.AddReactor("*", "*", notImplemented)
   144  }
   145  
   146  type localCM struct {
   147  	cm.ContainerManager
   148  	allocatableReservation v1.ResourceList
   149  	capacity               v1.ResourceList
   150  }
   151  
   152  func (lcm *localCM) GetNodeAllocatableReservation() v1.ResourceList {
   153  	return lcm.allocatableReservation
   154  }
   155  
   156  func (lcm *localCM) GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList {
   157  	if !localStorageCapacityIsolation {
   158  		delete(lcm.capacity, v1.ResourceEphemeralStorage)
   159  	}
   160  	return lcm.capacity
   161  }
   162  
   163  func TestUpdateNewNodeStatus(t *testing.T) {
   164  	cases := []struct {
   165  		desc                string
   166  		nodeStatusMaxImages int32
   167  	}{
   168  		{
   169  			desc:                "5 image limit",
   170  			nodeStatusMaxImages: 5,
   171  		},
   172  		{
   173  			desc:                "no image limit",
   174  			nodeStatusMaxImages: -1,
   175  		},
   176  	}
   177  
   178  	for _, tc := range cases {
   179  		t.Run(tc.desc, func(t *testing.T) {
   180  			ctx := context.Background()
   181  			// generate one more in inputImageList than we configure the Kubelet to report,
   182  			// or 5 images if unlimited
   183  			numTestImages := int(tc.nodeStatusMaxImages) + 1
   184  			if tc.nodeStatusMaxImages == -1 {
   185  				numTestImages = 5
   186  			}
   187  			inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
   188  			testKubelet := newTestKubeletWithImageList(
   189  				t, inputImageList, false /* controllerAttachDetachEnabled */, true /*initFakeVolumePlugin*/, true /* localStorageCapacityIsolation */)
   190  			defer testKubelet.Cleanup()
   191  			kubelet := testKubelet.kubelet
   192  			kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
   193  			kubelet.kubeClient = nil // ensure only the heartbeat client is used
   194  			kubelet.containerManager = &localCM{
   195  				ContainerManager: cm.NewStubContainerManager(),
   196  				allocatableReservation: v1.ResourceList{
   197  					v1.ResourceCPU:              *resource.NewMilliQuantity(200, resource.DecimalSI),
   198  					v1.ResourceMemory:           *resource.NewQuantity(100e6, resource.BinarySI),
   199  					v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
   200  				},
   201  				capacity: v1.ResourceList{
   202  					v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   203  					v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   204  					v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   205  				},
   206  			}
   207  			// Since this test retroactively overrides the stub container manager,
   208  			// we have to regenerate default status setters.
   209  			kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   210  
   211  			kubeClient := testKubelet.fakeKubeClient
   212  			existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   213  			kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   214  			machineInfo := &cadvisorapi.MachineInfo{
   215  				MachineID:      "123",
   216  				SystemUUID:     "abc",
   217  				BootID:         "1b3",
   218  				NumCores:       2,
   219  				MemoryCapacity: 10e9, // 10G
   220  			}
   221  			kubelet.setCachedMachineInfo(machineInfo)
   222  
   223  			expectedNode := &v1.Node{
   224  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   225  				Spec:       v1.NodeSpec{},
   226  				Status: v1.NodeStatus{
   227  					Conditions: []v1.NodeCondition{
   228  						{
   229  							Type:               v1.NodeMemoryPressure,
   230  							Status:             v1.ConditionFalse,
   231  							Reason:             "KubeletHasSufficientMemory",
   232  							Message:            "kubelet has sufficient memory available",
   233  							LastHeartbeatTime:  metav1.Time{},
   234  							LastTransitionTime: metav1.Time{},
   235  						},
   236  						{
   237  							Type:               v1.NodeDiskPressure,
   238  							Status:             v1.ConditionFalse,
   239  							Reason:             "KubeletHasNoDiskPressure",
   240  							Message:            "kubelet has no disk pressure",
   241  							LastHeartbeatTime:  metav1.Time{},
   242  							LastTransitionTime: metav1.Time{},
   243  						},
   244  						{
   245  							Type:               v1.NodePIDPressure,
   246  							Status:             v1.ConditionFalse,
   247  							Reason:             "KubeletHasSufficientPID",
   248  							Message:            "kubelet has sufficient PID available",
   249  							LastHeartbeatTime:  metav1.Time{},
   250  							LastTransitionTime: metav1.Time{},
   251  						},
   252  						{
   253  							Type:               v1.NodeReady,
   254  							Status:             v1.ConditionTrue,
   255  							Reason:             "KubeletReady",
   256  							Message:            "kubelet is posting ready status",
   257  							LastHeartbeatTime:  metav1.Time{},
   258  							LastTransitionTime: metav1.Time{},
   259  						},
   260  					},
   261  					NodeInfo: v1.NodeSystemInfo{
   262  						MachineID:               "123",
   263  						SystemUUID:              "abc",
   264  						BootID:                  "1b3",
   265  						KernelVersion:           cadvisortest.FakeKernelVersion,
   266  						OSImage:                 cadvisortest.FakeContainerOSVersion,
   267  						OperatingSystem:         goruntime.GOOS,
   268  						Architecture:            goruntime.GOARCH,
   269  						ContainerRuntimeVersion: "test://1.5.0",
   270  						KubeletVersion:          version.Get().String(),
   271  						KubeProxyVersion:        version.Get().String(),
   272  					},
   273  					Capacity: v1.ResourceList{
   274  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   275  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   276  						v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   277  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   278  					},
   279  					Allocatable: v1.ResourceList{
   280  						v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   281  						v1.ResourceMemory:           *resource.NewQuantity(9900e6, resource.BinarySI),
   282  						v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   283  						v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
   284  					},
   285  					Addresses: []v1.NodeAddress{
   286  						{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   287  						{Type: v1.NodeHostName, Address: testKubeletHostname},
   288  					},
   289  					Images: expectedImageList,
   290  				},
   291  			}
   292  
   293  			kubelet.updateRuntimeUp()
   294  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
   295  			actions := kubeClient.Actions()
   296  			require.Len(t, actions, 2)
   297  			require.True(t, actions[1].Matches("patch", "nodes"))
   298  			require.Equal(t, actions[1].GetSubresource(), "status")
   299  
   300  			updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
   301  			assert.NoError(t, err)
   302  			for i, cond := range updatedNode.Status.Conditions {
   303  				assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
   304  				assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
   305  				updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   306  				updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   307  			}
   308  
   309  			// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   310  			assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   311  				"NotReady should be last")
   312  			assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
   313  			assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   314  		})
   315  	}
   316  }
   317  
   318  func TestUpdateExistingNodeStatus(t *testing.T) {
   319  	ctx := context.Background()
   320  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   321  	defer testKubelet.Cleanup()
   322  	kubelet := testKubelet.kubelet
   323  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   324  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   325  	kubelet.containerManager = &localCM{
   326  		ContainerManager: cm.NewStubContainerManager(),
   327  		allocatableReservation: v1.ResourceList{
   328  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   329  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   330  		},
   331  		capacity: v1.ResourceList{
   332  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   333  			v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   334  			v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   335  		},
   336  	}
   337  	// Since this test retroactively overrides the stub container manager,
   338  	// we have to regenerate default status setters.
   339  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   340  
   341  	kubeClient := testKubelet.fakeKubeClient
   342  	existingNode := v1.Node{
   343  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
   344  		Spec:       v1.NodeSpec{},
   345  		Status: v1.NodeStatus{
   346  			Conditions: []v1.NodeCondition{
   347  				{
   348  					Type:               v1.NodeMemoryPressure,
   349  					Status:             v1.ConditionFalse,
   350  					Reason:             "KubeletHasSufficientMemory",
   351  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   352  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   353  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   354  				},
   355  				{
   356  					Type:               v1.NodeDiskPressure,
   357  					Status:             v1.ConditionFalse,
   358  					Reason:             "KubeletHasSufficientDisk",
   359  					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
   360  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   361  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   362  				},
   363  				{
   364  					Type:               v1.NodePIDPressure,
   365  					Status:             v1.ConditionFalse,
   366  					Reason:             "KubeletHasSufficientPID",
   367  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   368  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   369  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   370  				},
   371  				{
   372  					Type:               v1.NodeReady,
   373  					Status:             v1.ConditionTrue,
   374  					Reason:             "KubeletReady",
   375  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   376  					LastHeartbeatTime:  metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   377  					LastTransitionTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
   378  				},
   379  			},
   380  			Capacity: v1.ResourceList{
   381  				v1.ResourceCPU:    *resource.NewMilliQuantity(3000, resource.DecimalSI),
   382  				v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
   383  				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
   384  			},
   385  			Allocatable: v1.ResourceList{
   386  				v1.ResourceCPU:    *resource.NewMilliQuantity(2800, resource.DecimalSI),
   387  				v1.ResourceMemory: *resource.NewQuantity(19900e6, resource.BinarySI),
   388  				v1.ResourcePods:   *resource.NewQuantity(0, resource.DecimalSI),
   389  			},
   390  		},
   391  	}
   392  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   393  	machineInfo := &cadvisorapi.MachineInfo{
   394  		MachineID:      "123",
   395  		SystemUUID:     "abc",
   396  		BootID:         "1b3",
   397  		NumCores:       2,
   398  		MemoryCapacity: 20e9,
   399  	}
   400  	kubelet.setCachedMachineInfo(machineInfo)
   401  
   402  	expectedNode := &v1.Node{
   403  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   404  		Spec:       v1.NodeSpec{},
   405  		Status: v1.NodeStatus{
   406  			Conditions: []v1.NodeCondition{
   407  				{
   408  					Type:               v1.NodeMemoryPressure,
   409  					Status:             v1.ConditionFalse,
   410  					Reason:             "KubeletHasSufficientMemory",
   411  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   412  					LastHeartbeatTime:  metav1.Time{},
   413  					LastTransitionTime: metav1.Time{},
   414  				},
   415  				{
   416  					Type:               v1.NodeDiskPressure,
   417  					Status:             v1.ConditionFalse,
   418  					Reason:             "KubeletHasSufficientDisk",
   419  					Message:            fmt.Sprintf("kubelet has sufficient disk space available"),
   420  					LastHeartbeatTime:  metav1.Time{},
   421  					LastTransitionTime: metav1.Time{},
   422  				},
   423  				{
   424  					Type:               v1.NodePIDPressure,
   425  					Status:             v1.ConditionFalse,
   426  					Reason:             "KubeletHasSufficientPID",
   427  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   428  					LastHeartbeatTime:  metav1.Time{},
   429  					LastTransitionTime: metav1.Time{},
   430  				},
   431  				{
   432  					Type:               v1.NodeReady,
   433  					Status:             v1.ConditionTrue,
   434  					Reason:             "KubeletReady",
   435  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   436  					LastHeartbeatTime:  metav1.Time{}, // placeholder
   437  					LastTransitionTime: metav1.Time{}, // placeholder
   438  				},
   439  			},
   440  			NodeInfo: v1.NodeSystemInfo{
   441  				MachineID:               "123",
   442  				SystemUUID:              "abc",
   443  				BootID:                  "1b3",
   444  				KernelVersion:           cadvisortest.FakeKernelVersion,
   445  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   446  				OperatingSystem:         goruntime.GOOS,
   447  				Architecture:            goruntime.GOARCH,
   448  				ContainerRuntimeVersion: "test://1.5.0",
   449  				KubeletVersion:          version.Get().String(),
   450  				KubeProxyVersion:        version.Get().String(),
   451  			},
   452  			Capacity: v1.ResourceList{
   453  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   454  				v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   455  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   456  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   457  			},
   458  			Allocatable: v1.ResourceList{
   459  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   460  				v1.ResourceMemory:           *resource.NewQuantity(19900e6, resource.BinarySI),
   461  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   462  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   463  			},
   464  			Addresses: []v1.NodeAddress{
   465  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   466  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   467  			},
   468  			// images will be sorted from max to min in node status.
   469  			Images: []v1.ContainerImage{
   470  				{
   471  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   472  					SizeBytes: 123,
   473  				},
   474  				{
   475  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   476  					SizeBytes: 456,
   477  				},
   478  			},
   479  		},
   480  	}
   481  
   482  	kubelet.updateRuntimeUp()
   483  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   484  
   485  	actions := kubeClient.Actions()
   486  	assert.Len(t, actions, 2)
   487  
   488  	assert.IsType(t, core.PatchActionImpl{}, actions[1])
   489  	patchAction := actions[1].(core.PatchActionImpl)
   490  
   491  	updatedNode, err := applyNodeStatusPatch(&existingNode, patchAction.GetPatch())
   492  	require.NoError(t, err)
   493  
   494  	for i, cond := range updatedNode.Status.Conditions {
   495  		old := metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC).Time
   496  		// Expect LastHearbeat to be updated to Now, while LastTransitionTime to be the same.
   497  		assert.NotEqual(t, old, cond.LastHeartbeatTime.Rfc3339Copy().UTC(), "LastHeartbeatTime for condition %v", cond.Type)
   498  		assert.EqualValues(t, old, cond.LastTransitionTime.Rfc3339Copy().UTC(), "LastTransitionTime for condition %v", cond.Type)
   499  
   500  		updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   501  		updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   502  	}
   503  
   504  	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   505  	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   506  		"NodeReady should be the last condition")
   507  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   508  }
   509  
   510  func TestUpdateExistingNodeStatusTimeout(t *testing.T) {
   511  	ctx := context.Background()
   512  	if testing.Short() {
   513  		t.Skip("skipping test in short mode.")
   514  	}
   515  
   516  	attempts := int64(0)
   517  	failureCallbacks := int64(0)
   518  
   519  	// set up a listener that hangs connections
   520  	ln, err := net.Listen("tcp", "127.0.0.1:0")
   521  	assert.NoError(t, err)
   522  	defer ln.Close()
   523  	go func() {
   524  		// accept connections and just let them hang
   525  		for {
   526  			_, err := ln.Accept()
   527  			if err != nil {
   528  				t.Log(err)
   529  				return
   530  			}
   531  			t.Log("accepted connection")
   532  			atomic.AddInt64(&attempts, 1)
   533  		}
   534  	}()
   535  
   536  	config := &rest.Config{
   537  		Host:    "http://" + ln.Addr().String(),
   538  		QPS:     -1,
   539  		Timeout: time.Second,
   540  	}
   541  	assert.NoError(t, err)
   542  
   543  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   544  	defer testKubelet.Cleanup()
   545  	kubelet := testKubelet.kubelet
   546  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
   547  	kubelet.heartbeatClient, err = clientset.NewForConfig(config)
   548  	require.NoError(t, err)
   549  	kubelet.onRepeatedHeartbeatFailure = func() {
   550  		atomic.AddInt64(&failureCallbacks, 1)
   551  	}
   552  	kubelet.containerManager = &localCM{
   553  		ContainerManager: cm.NewStubContainerManager(),
   554  		allocatableReservation: v1.ResourceList{
   555  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   556  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   557  		},
   558  		capacity: v1.ResourceList{
   559  			v1.ResourceCPU:    *resource.NewMilliQuantity(2000, resource.DecimalSI),
   560  			v1.ResourceMemory: *resource.NewQuantity(20e9, resource.BinarySI),
   561  		},
   562  	}
   563  
   564  	// should return an error, but not hang
   565  	assert.Error(t, kubelet.updateNodeStatus(ctx))
   566  
   567  	// should have attempted multiple times
   568  	if actualAttempts := atomic.LoadInt64(&attempts); actualAttempts < nodeStatusUpdateRetry {
   569  		t.Errorf("Expected at least %d attempts, got %d", nodeStatusUpdateRetry, actualAttempts)
   570  	}
   571  	// should have gotten multiple failure callbacks
   572  	if actualFailureCallbacks := atomic.LoadInt64(&failureCallbacks); actualFailureCallbacks < (nodeStatusUpdateRetry - 1) {
   573  		t.Errorf("Expected %d failure callbacks, got %d", (nodeStatusUpdateRetry - 1), actualFailureCallbacks)
   574  	}
   575  }
   576  
   577  func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
   578  	ctx := context.Background()
   579  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   580  	defer testKubelet.Cleanup()
   581  	kubelet := testKubelet.kubelet
   582  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   583  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   584  	kubelet.containerManager = &localCM{
   585  		ContainerManager: cm.NewStubContainerManager(),
   586  		allocatableReservation: v1.ResourceList{
   587  			v1.ResourceCPU:              *resource.NewMilliQuantity(200, resource.DecimalSI),
   588  			v1.ResourceMemory:           *resource.NewQuantity(100e6, resource.BinarySI),
   589  			v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
   590  		},
   591  		capacity: v1.ResourceList{
   592  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   593  			v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   594  			v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
   595  		},
   596  	}
   597  	// Since this test retroactively overrides the stub container manager,
   598  	// we have to regenerate default status setters.
   599  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   600  
   601  	clock := testKubelet.fakeClock
   602  	kubeClient := testKubelet.fakeKubeClient
   603  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   604  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
   605  	machineInfo := &cadvisorapi.MachineInfo{
   606  		MachineID:      "123",
   607  		SystemUUID:     "abc",
   608  		BootID:         "1b3",
   609  		NumCores:       2,
   610  		MemoryCapacity: 10e9,
   611  	}
   612  	kubelet.setCachedMachineInfo(machineInfo)
   613  
   614  	expectedNode := &v1.Node{
   615  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   616  		Spec:       v1.NodeSpec{},
   617  		Status: v1.NodeStatus{
   618  			Conditions: []v1.NodeCondition{
   619  				{
   620  					Type:               v1.NodeMemoryPressure,
   621  					Status:             v1.ConditionFalse,
   622  					Reason:             "KubeletHasSufficientMemory",
   623  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   624  					LastHeartbeatTime:  metav1.Time{},
   625  					LastTransitionTime: metav1.Time{},
   626  				},
   627  				{
   628  					Type:               v1.NodeDiskPressure,
   629  					Status:             v1.ConditionFalse,
   630  					Reason:             "KubeletHasNoDiskPressure",
   631  					Message:            fmt.Sprintf("kubelet has no disk pressure"),
   632  					LastHeartbeatTime:  metav1.Time{},
   633  					LastTransitionTime: metav1.Time{},
   634  				},
   635  				{
   636  					Type:               v1.NodePIDPressure,
   637  					Status:             v1.ConditionFalse,
   638  					Reason:             "KubeletHasSufficientPID",
   639  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   640  					LastHeartbeatTime:  metav1.Time{},
   641  					LastTransitionTime: metav1.Time{},
   642  				},
   643  				{}, //placeholder
   644  			},
   645  			NodeInfo: v1.NodeSystemInfo{
   646  				MachineID:               "123",
   647  				SystemUUID:              "abc",
   648  				BootID:                  "1b3",
   649  				KernelVersion:           cadvisortest.FakeKernelVersion,
   650  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   651  				OperatingSystem:         goruntime.GOOS,
   652  				Architecture:            goruntime.GOARCH,
   653  				ContainerRuntimeVersion: "test://1.5.0",
   654  				KubeletVersion:          version.Get().String(),
   655  				KubeProxyVersion:        version.Get().String(),
   656  			},
   657  			Capacity: v1.ResourceList{
   658  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   659  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
   660  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   661  				v1.ResourceEphemeralStorage: *resource.NewQuantity(20e9, resource.BinarySI),
   662  			},
   663  			Allocatable: v1.ResourceList{
   664  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   665  				v1.ResourceMemory:           *resource.NewQuantity(9900e6, resource.BinarySI),
   666  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   667  				v1.ResourceEphemeralStorage: *resource.NewQuantity(10e9, resource.BinarySI),
   668  			},
   669  			Addresses: []v1.NodeAddress{
   670  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   671  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   672  			},
   673  			Images: []v1.ContainerImage{
   674  				{
   675  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   676  					SizeBytes: 123,
   677  				},
   678  				{
   679  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   680  					SizeBytes: 456,
   681  				},
   682  			},
   683  		},
   684  	}
   685  
   686  	checkNodeStatus := func(status v1.ConditionStatus, reason string) {
   687  		kubeClient.ClearActions()
   688  		assert.NoError(t, kubelet.updateNodeStatus(ctx))
   689  		actions := kubeClient.Actions()
   690  		require.Len(t, actions, 2)
   691  		require.True(t, actions[1].Matches("patch", "nodes"))
   692  		require.Equal(t, actions[1].GetSubresource(), "status")
   693  
   694  		updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
   695  		require.NoError(t, err, "can't apply node status patch")
   696  
   697  		for i, cond := range updatedNode.Status.Conditions {
   698  			assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
   699  			assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition  is zero", cond.Type)
   700  			updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
   701  			updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
   702  		}
   703  
   704  		// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   705  		lastIndex := len(updatedNode.Status.Conditions) - 1
   706  		assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[lastIndex].Type, "NodeReady should be the last condition")
   707  		assert.NotEmpty(t, updatedNode.Status.Conditions[lastIndex].Message)
   708  
   709  		updatedNode.Status.Conditions[lastIndex].Message = ""
   710  		expectedNode.Status.Conditions[lastIndex] = v1.NodeCondition{
   711  			Type:               v1.NodeReady,
   712  			Status:             status,
   713  			Reason:             reason,
   714  			LastHeartbeatTime:  metav1.Time{},
   715  			LastTransitionTime: metav1.Time{},
   716  		}
   717  		assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   718  	}
   719  
   720  	// TODO(random-liu): Refactor the unit test to be table driven test.
   721  	// Should report kubelet not ready if the runtime check is out of date
   722  	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
   723  	kubelet.updateRuntimeUp()
   724  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   725  
   726  	// Should report kubelet ready if the runtime check is updated
   727  	clock.SetTime(time.Now())
   728  	kubelet.updateRuntimeUp()
   729  	checkNodeStatus(v1.ConditionTrue, "KubeletReady")
   730  
   731  	// Should report kubelet not ready if the runtime check is out of date
   732  	clock.SetTime(time.Now().Add(-maxWaitForContainerRuntime))
   733  	kubelet.updateRuntimeUp()
   734  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   735  
   736  	// Should report kubelet not ready if the runtime check failed
   737  	fakeRuntime := testKubelet.fakeRuntime
   738  	// Inject error into fake runtime status check, node should be NotReady
   739  	fakeRuntime.StatusErr = fmt.Errorf("injected runtime status error")
   740  	clock.SetTime(time.Now())
   741  	kubelet.updateRuntimeUp()
   742  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   743  
   744  	fakeRuntime.StatusErr = nil
   745  
   746  	// Should report node not ready if runtime status is nil.
   747  	fakeRuntime.RuntimeStatus = nil
   748  	kubelet.updateRuntimeUp()
   749  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   750  
   751  	// Should report node not ready if runtime status is empty.
   752  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{}
   753  	kubelet.updateRuntimeUp()
   754  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   755  
   756  	// Should report node not ready if RuntimeReady is false.
   757  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   758  		Conditions: []kubecontainer.RuntimeCondition{
   759  			{Type: kubecontainer.RuntimeReady, Status: false},
   760  			{Type: kubecontainer.NetworkReady, Status: true},
   761  		},
   762  	}
   763  	kubelet.updateRuntimeUp()
   764  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   765  
   766  	// Should report node ready if RuntimeReady is true.
   767  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   768  		Conditions: []kubecontainer.RuntimeCondition{
   769  			{Type: kubecontainer.RuntimeReady, Status: true},
   770  			{Type: kubecontainer.NetworkReady, Status: true},
   771  		},
   772  	}
   773  	kubelet.updateRuntimeUp()
   774  	checkNodeStatus(v1.ConditionTrue, "KubeletReady")
   775  
   776  	// Should report node not ready if NetworkReady is false.
   777  	fakeRuntime.RuntimeStatus = &kubecontainer.RuntimeStatus{
   778  		Conditions: []kubecontainer.RuntimeCondition{
   779  			{Type: kubecontainer.RuntimeReady, Status: true},
   780  			{Type: kubecontainer.NetworkReady, Status: false},
   781  		},
   782  	}
   783  	kubelet.updateRuntimeUp()
   784  	checkNodeStatus(v1.ConditionFalse, "KubeletNotReady")
   785  }
   786  
   787  func TestUpdateNodeStatusError(t *testing.T) {
   788  	ctx := context.Background()
   789  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   790  	defer testKubelet.Cleanup()
   791  	kubelet := testKubelet.kubelet
   792  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
   793  	// No matching node for the kubelet
   794  	testKubelet.fakeKubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{}}).ReactionChain
   795  	assert.Error(t, kubelet.updateNodeStatus(ctx))
   796  	assert.Len(t, testKubelet.fakeKubeClient.Actions(), nodeStatusUpdateRetry)
   797  }
   798  
   799  func TestUpdateNodeStatusWithLease(t *testing.T) {
   800  	ctx := context.Background()
   801  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
   802  	defer testKubelet.Cleanup()
   803  	clock := testKubelet.fakeClock
   804  	kubelet := testKubelet.kubelet
   805  	kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
   806  	kubelet.kubeClient = nil        // ensure only the heartbeat client is used
   807  	kubelet.containerManager = &localCM{
   808  		ContainerManager: cm.NewStubContainerManager(),
   809  		allocatableReservation: v1.ResourceList{
   810  			v1.ResourceCPU:    *resource.NewMilliQuantity(200, resource.DecimalSI),
   811  			v1.ResourceMemory: *resource.NewQuantity(100e6, resource.BinarySI),
   812  		},
   813  		capacity: v1.ResourceList{
   814  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   815  			v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   816  			v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   817  		},
   818  	}
   819  	// Since this test retroactively overrides the stub container manager,
   820  	// we have to regenerate default status setters.
   821  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
   822  	kubelet.nodeStatusReportFrequency = time.Minute
   823  
   824  	kubeClient := testKubelet.fakeKubeClient
   825  	existingNode := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
   826  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*existingNode}}).ReactionChain
   827  	machineInfo := &cadvisorapi.MachineInfo{
   828  		MachineID:      "123",
   829  		SystemUUID:     "abc",
   830  		BootID:         "1b3",
   831  		NumCores:       2,
   832  		MemoryCapacity: 20e9,
   833  	}
   834  	kubelet.setCachedMachineInfo(machineInfo)
   835  
   836  	now := metav1.NewTime(clock.Now()).Rfc3339Copy()
   837  	expectedNode := &v1.Node{
   838  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
   839  		Spec:       v1.NodeSpec{},
   840  		Status: v1.NodeStatus{
   841  			Conditions: []v1.NodeCondition{
   842  				{
   843  					Type:               v1.NodeMemoryPressure,
   844  					Status:             v1.ConditionFalse,
   845  					Reason:             "KubeletHasSufficientMemory",
   846  					Message:            fmt.Sprintf("kubelet has sufficient memory available"),
   847  					LastHeartbeatTime:  now,
   848  					LastTransitionTime: now,
   849  				},
   850  				{
   851  					Type:               v1.NodeDiskPressure,
   852  					Status:             v1.ConditionFalse,
   853  					Reason:             "KubeletHasNoDiskPressure",
   854  					Message:            fmt.Sprintf("kubelet has no disk pressure"),
   855  					LastHeartbeatTime:  now,
   856  					LastTransitionTime: now,
   857  				},
   858  				{
   859  					Type:               v1.NodePIDPressure,
   860  					Status:             v1.ConditionFalse,
   861  					Reason:             "KubeletHasSufficientPID",
   862  					Message:            fmt.Sprintf("kubelet has sufficient PID available"),
   863  					LastHeartbeatTime:  now,
   864  					LastTransitionTime: now,
   865  				},
   866  				{
   867  					Type:               v1.NodeReady,
   868  					Status:             v1.ConditionTrue,
   869  					Reason:             "KubeletReady",
   870  					Message:            fmt.Sprintf("kubelet is posting ready status"),
   871  					LastHeartbeatTime:  now,
   872  					LastTransitionTime: now,
   873  				},
   874  			},
   875  			NodeInfo: v1.NodeSystemInfo{
   876  				MachineID:               "123",
   877  				SystemUUID:              "abc",
   878  				BootID:                  "1b3",
   879  				KernelVersion:           cadvisortest.FakeKernelVersion,
   880  				OSImage:                 cadvisortest.FakeContainerOSVersion,
   881  				OperatingSystem:         goruntime.GOOS,
   882  				Architecture:            goruntime.GOARCH,
   883  				ContainerRuntimeVersion: "test://1.5.0",
   884  				KubeletVersion:          version.Get().String(),
   885  				KubeProxyVersion:        version.Get().String(),
   886  			},
   887  			Capacity: v1.ResourceList{
   888  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
   889  				v1.ResourceMemory:           *resource.NewQuantity(20e9, resource.BinarySI),
   890  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   891  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   892  			},
   893  			Allocatable: v1.ResourceList{
   894  				v1.ResourceCPU:              *resource.NewMilliQuantity(1800, resource.DecimalSI),
   895  				v1.ResourceMemory:           *resource.NewQuantity(19900e6, resource.BinarySI),
   896  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
   897  				v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
   898  			},
   899  			Addresses: []v1.NodeAddress{
   900  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
   901  				{Type: v1.NodeHostName, Address: testKubeletHostname},
   902  			},
   903  			// images will be sorted from max to min in node status.
   904  			Images: []v1.ContainerImage{
   905  				{
   906  					Names:     []string{"registry.k8s.io:v1", "registry.k8s.io:v2"},
   907  					SizeBytes: 123,
   908  				},
   909  				{
   910  					Names:     []string{"registry.k8s.io:v3", "registry.k8s.io:v4"},
   911  					SizeBytes: 456,
   912  				},
   913  			},
   914  		},
   915  	}
   916  
   917  	// Update node status when node status is created.
   918  	// Report node status.
   919  	kubelet.updateRuntimeUp()
   920  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   921  
   922  	actions := kubeClient.Actions()
   923  	assert.Len(t, actions, 2)
   924  	assert.IsType(t, core.GetActionImpl{}, actions[0])
   925  	assert.IsType(t, core.PatchActionImpl{}, actions[1])
   926  	patchAction := actions[1].(core.PatchActionImpl)
   927  
   928  	updatedNode, err := applyNodeStatusPatch(existingNode, patchAction.GetPatch())
   929  	require.NoError(t, err)
   930  	for _, cond := range updatedNode.Status.Conditions {
   931  		cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
   932  		cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
   933  	}
   934  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   935  
   936  	// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
   937  	assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
   938  		"NodeReady should be the last condition")
   939  
   940  	// Update node status again when nothing is changed (except heartbeat time).
   941  	// Report node status if it has exceeded the duration of nodeStatusReportFrequency.
   942  	clock.Step(time.Minute)
   943  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   944  
   945  	// 2 more action (There were 2 actions before).
   946  	actions = kubeClient.Actions()
   947  	assert.Len(t, actions, 4)
   948  	assert.IsType(t, core.GetActionImpl{}, actions[2])
   949  	assert.IsType(t, core.PatchActionImpl{}, actions[3])
   950  	patchAction = actions[3].(core.PatchActionImpl)
   951  
   952  	updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
   953  	require.NoError(t, err)
   954  	for _, cond := range updatedNode.Status.Conditions {
   955  		cond.LastHeartbeatTime = cond.LastHeartbeatTime.Rfc3339Copy()
   956  		cond.LastTransitionTime = cond.LastTransitionTime.Rfc3339Copy()
   957  	}
   958  
   959  	// Expect LastHeartbeat updated, other things unchanged.
   960  	for i, cond := range expectedNode.Status.Conditions {
   961  		expectedNode.Status.Conditions[i].LastHeartbeatTime = metav1.NewTime(cond.LastHeartbeatTime.Time.Add(time.Minute)).Rfc3339Copy()
   962  	}
   963  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", cmp.Diff(expectedNode, updatedNode))
   964  
   965  	// Update node status again when nothing is changed (except heartbeat time).
   966  	// Do not report node status if it is within the duration of nodeStatusReportFrequency.
   967  	clock.Step(10 * time.Second)
   968  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   969  
   970  	// Only 1 more action (There were 4 actions before).
   971  	actions = kubeClient.Actions()
   972  	assert.Len(t, actions, 5)
   973  	assert.IsType(t, core.GetActionImpl{}, actions[4])
   974  
   975  	// Update node status again when something is changed.
   976  	// Report node status even if it is still within the duration of nodeStatusReportFrequency.
   977  	clock.Step(10 * time.Second)
   978  	var newMemoryCapacity int64 = 40e9
   979  	oldMachineInfo, err := kubelet.GetCachedMachineInfo()
   980  	if err != nil {
   981  		t.Fatal(err)
   982  	}
   983  	newMachineInfo := oldMachineInfo.Clone()
   984  	newMachineInfo.MemoryCapacity = uint64(newMemoryCapacity)
   985  	kubelet.setCachedMachineInfo(newMachineInfo)
   986  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
   987  
   988  	// 2 more action (There were 5 actions before).
   989  	actions = kubeClient.Actions()
   990  	assert.Len(t, actions, 7)
   991  	assert.IsType(t, core.GetActionImpl{}, actions[5])
   992  	assert.IsType(t, core.PatchActionImpl{}, actions[6])
   993  	patchAction = actions[6].(core.PatchActionImpl)
   994  
   995  	updatedNode, err = applyNodeStatusPatch(updatedNode, patchAction.GetPatch())
   996  	require.NoError(t, err)
   997  	memCapacity := updatedNode.Status.Capacity[v1.ResourceMemory]
   998  	updatedMemoryCapacity, _ := (&memCapacity).AsInt64()
   999  	assert.Equal(t, newMemoryCapacity, updatedMemoryCapacity, "Memory capacity")
  1000  
  1001  	now = metav1.NewTime(clock.Now()).Rfc3339Copy()
  1002  	for _, cond := range updatedNode.Status.Conditions {
  1003  		// Expect LastHearbeat updated, while LastTransitionTime unchanged.
  1004  		assert.Equal(t, now, cond.LastHeartbeatTime.Rfc3339Copy(),
  1005  			"LastHeartbeatTime for condition %v", cond.Type)
  1006  		assert.Equal(t, now, metav1.NewTime(cond.LastTransitionTime.Time.Add(time.Minute+20*time.Second)).Rfc3339Copy(),
  1007  			"LastTransitionTime for condition %v", cond.Type)
  1008  	}
  1009  
  1010  	// Update node status when changing pod CIDR.
  1011  	// Report node status if it is still within the duration of nodeStatusReportFrequency.
  1012  	clock.Step(10 * time.Second)
  1013  	assert.Equal(t, "", kubelet.runtimeState.podCIDR(), "Pod CIDR should be empty")
  1014  	podCIDRs := []string{"10.0.0.0/24", "2000::/10"}
  1015  	updatedNode.Spec.PodCIDR = podCIDRs[0]
  1016  	updatedNode.Spec.PodCIDRs = podCIDRs
  1017  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*updatedNode}}).ReactionChain
  1018  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1019  	assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should be updated now")
  1020  	// 2 more action (There were 7 actions before).
  1021  	actions = kubeClient.Actions()
  1022  	assert.Len(t, actions, 9)
  1023  	assert.IsType(t, core.GetActionImpl{}, actions[7])
  1024  	assert.IsType(t, core.PatchActionImpl{}, actions[8])
  1025  
  1026  	// Update node status when keeping the pod CIDR.
  1027  	// Do not report node status if it is within the duration of nodeStatusReportFrequency.
  1028  	clock.Step(10 * time.Second)
  1029  	assert.Equal(t, strings.Join(podCIDRs, ","), kubelet.runtimeState.podCIDR(), "Pod CIDR should already be updated")
  1030  
  1031  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1032  	// Only 1 more action (There were 9 actions before).
  1033  	actions = kubeClient.Actions()
  1034  	assert.Len(t, actions, 10)
  1035  	assert.IsType(t, core.GetActionImpl{}, actions[9])
  1036  }
  1037  
  1038  func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
  1039  	cases := []struct {
  1040  		desc                  string
  1041  		existingVolumes       []v1.UniqueVolumeName // volumes to initially populate volumeManager
  1042  		existingNode          *v1.Node              // existing node object
  1043  		expectedNode          *v1.Node              // new node object after patch
  1044  		expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
  1045  	}{
  1046  		{
  1047  			desc:         "no volumes and no update",
  1048  			existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
  1049  		},
  1050  		{
  1051  			desc:            "volumes inuse on node and volumeManager",
  1052  			existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1053  			existingNode: &v1.Node{
  1054  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  1055  				Status: v1.NodeStatus{
  1056  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1057  				},
  1058  			},
  1059  			expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1060  		},
  1061  		{
  1062  			desc: "volumes inuse on node but not in volumeManager",
  1063  			existingNode: &v1.Node{
  1064  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1065  				Status: v1.NodeStatus{
  1066  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1067  				},
  1068  			},
  1069  			expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}}},
  1070  		},
  1071  		{
  1072  			desc:            "volumes inuse in volumeManager but not on node",
  1073  			existingVolumes: []v1.UniqueVolumeName{"vol1"},
  1074  			existingNode:    &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
  1075  			expectedNode: &v1.Node{
  1076  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  1077  				Status: v1.NodeStatus{
  1078  					VolumesInUse: []v1.UniqueVolumeName{"vol1"},
  1079  				},
  1080  			},
  1081  			expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
  1082  		},
  1083  	}
  1084  
  1085  	for _, tc := range cases {
  1086  		t.Run(tc.desc, func(t *testing.T) {
  1087  			ctx := context.Background()
  1088  			// Setup
  1089  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1090  			defer testKubelet.Cleanup()
  1091  
  1092  			kubelet := testKubelet.kubelet
  1093  			kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1094  			kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
  1095  			kubelet.lastStatusReportTime = kubelet.clock.Now()
  1096  			kubelet.nodeStatusReportFrequency = time.Hour
  1097  			kubelet.setCachedMachineInfo(&cadvisorapi.MachineInfo{})
  1098  
  1099  			// override test volumeManager
  1100  			fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
  1101  			kubelet.volumeManager = fakeVolumeManager
  1102  
  1103  			// Only test VolumesInUse setter
  1104  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
  1105  				nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
  1106  					kubelet.volumeManager.GetVolumesInUse),
  1107  			}
  1108  
  1109  			kubeClient := testKubelet.fakeKubeClient
  1110  			kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
  1111  
  1112  			// Execute
  1113  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1114  
  1115  			// Validate
  1116  			actions := kubeClient.Actions()
  1117  			if tc.expectedNode != nil {
  1118  				assert.Len(t, actions, 2)
  1119  				assert.IsType(t, core.GetActionImpl{}, actions[0])
  1120  				assert.IsType(t, core.PatchActionImpl{}, actions[1])
  1121  				patchAction := actions[1].(core.PatchActionImpl)
  1122  
  1123  				updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1124  				require.NoError(t, err)
  1125  				assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", cmp.Diff(tc.expectedNode, updatedNode))
  1126  			} else {
  1127  				assert.Len(t, actions, 1)
  1128  				assert.IsType(t, core.GetActionImpl{}, actions[0])
  1129  			}
  1130  
  1131  			reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
  1132  			assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", cmp.Diff(tc.expectedReportedInUse, reportedInUse))
  1133  		})
  1134  	}
  1135  }
  1136  
  1137  func TestFastStatusUpdateOnce(t *testing.T) {
  1138  	tests := []struct {
  1139  		name            string
  1140  		beforeMarkReady int
  1141  		beforeNextReady int
  1142  		beforeTimeout   int
  1143  		wantCalls       int
  1144  		patchFailures   int
  1145  		wantPatches     int
  1146  	}{
  1147  		{
  1148  			name:            "timeout after third loop",
  1149  			beforeMarkReady: 9,
  1150  			beforeNextReady: 9,
  1151  			beforeTimeout:   2,
  1152  			wantCalls:       3,
  1153  		},
  1154  		{
  1155  			name:            "already ready on third loop",
  1156  			beforeMarkReady: 9,
  1157  			beforeNextReady: 1,
  1158  			beforeTimeout:   9,
  1159  			wantCalls:       2,
  1160  		},
  1161  		{
  1162  			name:            "turns ready on third loop",
  1163  			beforeMarkReady: 2,
  1164  			beforeNextReady: 9,
  1165  			beforeTimeout:   9,
  1166  			wantCalls:       3,
  1167  			wantPatches:     1,
  1168  		},
  1169  		{
  1170  			name:            "turns ready on second loop then first patch fails",
  1171  			beforeMarkReady: 1,
  1172  			beforeNextReady: 9,
  1173  			beforeTimeout:   9,
  1174  			wantCalls:       3,
  1175  			patchFailures:   1,
  1176  			wantPatches:     2,
  1177  		},
  1178  		{
  1179  			name:            "turns ready on second loop then all patches fail",
  1180  			beforeMarkReady: 1,
  1181  			beforeNextReady: 9,
  1182  			beforeTimeout:   9,
  1183  			wantCalls:       nodeStatusUpdateRetry + 2,
  1184  			patchFailures:   nodeStatusUpdateRetry + 2,
  1185  			wantPatches:     nodeStatusUpdateRetry + 1,
  1186  		},
  1187  	}
  1188  
  1189  	for _, tc := range tests {
  1190  		t.Run(tc.name, func(t *testing.T) {
  1191  			testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1192  			defer testKubelet.Cleanup()
  1193  			kubelet := testKubelet.kubelet
  1194  			// Ensure we capture actions on the heartbeat client only.
  1195  			// We don't set it to nil or GetNode() doesn't read from nodeLister.
  1196  			kubelet.kubeClient = &fake.Clientset{}
  1197  			kubeClient := testKubelet.fakeKubeClient
  1198  
  1199  			node := &v1.Node{
  1200  				ObjectMeta: metav1.ObjectMeta{
  1201  					Name: string(kubelet.nodeName),
  1202  				},
  1203  				Status: v1.NodeStatus{
  1204  					Conditions: []v1.NodeCondition{
  1205  						{
  1206  							Type:    v1.NodeReady,
  1207  							Status:  v1.ConditionFalse,
  1208  							Reason:  "NotReady",
  1209  							Message: "Node not ready",
  1210  						},
  1211  					},
  1212  				},
  1213  			}
  1214  
  1215  			nodeLister := testNodeLister{[]*v1.Node{node.DeepCopy()}}
  1216  			kubelet.nodeLister = nodeLister
  1217  
  1218  			callCount := 0
  1219  			// The original node status functions turn the node ready.
  1220  			nodeStatusFuncs := kubelet.setNodeStatusFuncs
  1221  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{func(ctx context.Context, node *v1.Node) error {
  1222  				assert.False(t, kubelet.containerRuntimeReadyExpected)
  1223  				callCount++
  1224  				var lastErr error
  1225  				if callCount > tc.beforeMarkReady {
  1226  					for _, f := range nodeStatusFuncs {
  1227  						if err := f(ctx, node); err != nil {
  1228  							lastErr = err
  1229  						}
  1230  					}
  1231  				}
  1232  				if callCount > tc.beforeNextReady {
  1233  					nodeLister.nodes[0].Status.Conditions[0].Status = v1.ConditionTrue
  1234  				}
  1235  				if callCount > tc.beforeTimeout {
  1236  					testKubelet.fakeClock.Step(nodeReadyGracePeriod)
  1237  				}
  1238  				return lastErr
  1239  			}}
  1240  
  1241  			patchCount := 0
  1242  			kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1243  				assert.False(t, kubelet.containerRuntimeReadyExpected)
  1244  				patchCount++
  1245  				if patchCount > tc.patchFailures {
  1246  					return false, nil, nil
  1247  				}
  1248  				return true, nil, fmt.Errorf("try again")
  1249  			})
  1250  
  1251  			kubelet.fastStatusUpdateOnce()
  1252  
  1253  			assert.True(t, kubelet.containerRuntimeReadyExpected)
  1254  			assert.Equal(t, tc.wantCalls, callCount)
  1255  			assert.Equal(t, tc.wantPatches, patchCount)
  1256  
  1257  			actions := kubeClient.Actions()
  1258  			if tc.wantPatches == 0 {
  1259  				require.Len(t, actions, 0)
  1260  				return
  1261  			}
  1262  
  1263  			// patch, get, patch, get, patch, ... up to initial patch + nodeStatusUpdateRetry patches
  1264  			require.Len(t, actions, 2*tc.wantPatches-1)
  1265  
  1266  			for i, action := range actions {
  1267  				if i%2 == 1 {
  1268  					require.IsType(t, core.GetActionImpl{}, action)
  1269  					continue
  1270  				}
  1271  
  1272  				require.IsType(t, core.PatchActionImpl{}, action)
  1273  				patchAction := action.(core.PatchActionImpl)
  1274  
  1275  				updatedNode, err := applyNodeStatusPatch(node, patchAction.GetPatch())
  1276  				require.NoError(t, err)
  1277  				seenNodeReady := false
  1278  				for _, c := range updatedNode.Status.Conditions {
  1279  					if c.Type == v1.NodeReady {
  1280  						assert.Equal(t, v1.ConditionTrue, c.Status)
  1281  						seenNodeReady = true
  1282  					}
  1283  				}
  1284  				assert.True(t, seenNodeReady)
  1285  			}
  1286  		})
  1287  	}
  1288  }
  1289  
  1290  func TestRegisterWithApiServer(t *testing.T) {
  1291  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1292  	defer testKubelet.Cleanup()
  1293  	kubelet := testKubelet.kubelet
  1294  	kubeClient := testKubelet.fakeKubeClient
  1295  	kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1296  		// Return an error on create.
  1297  		return true, &v1.Node{}, &apierrors.StatusError{
  1298  			ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1299  		}
  1300  	})
  1301  	kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1302  		// Return an existing (matching) node on get.
  1303  		return true, &v1.Node{
  1304  			ObjectMeta: metav1.ObjectMeta{
  1305  				Name: testKubeletHostname,
  1306  				Labels: map[string]string{
  1307  					v1.LabelHostname:      testKubeletHostname,
  1308  					v1.LabelOSStable:      goruntime.GOOS,
  1309  					v1.LabelArchStable:    goruntime.GOARCH,
  1310  					kubeletapis.LabelOS:   goruntime.GOOS,
  1311  					kubeletapis.LabelArch: goruntime.GOARCH,
  1312  				},
  1313  			},
  1314  		}, nil
  1315  	})
  1316  
  1317  	kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1318  		if action.GetSubresource() == "status" {
  1319  			return true, nil, nil
  1320  		}
  1321  		return notImplemented(action)
  1322  	})
  1323  
  1324  	addNotImplatedReaction(kubeClient)
  1325  
  1326  	machineInfo := &cadvisorapi.MachineInfo{
  1327  		MachineID:      "123",
  1328  		SystemUUID:     "abc",
  1329  		BootID:         "1b3",
  1330  		NumCores:       2,
  1331  		MemoryCapacity: 1024,
  1332  	}
  1333  	kubelet.setCachedMachineInfo(machineInfo)
  1334  
  1335  	done := make(chan struct{})
  1336  	go func() {
  1337  		kubelet.registerWithAPIServer()
  1338  		done <- struct{}{}
  1339  	}()
  1340  	select {
  1341  	case <-time.After(wait.ForeverTestTimeout):
  1342  		assert.Fail(t, "timed out waiting for registration")
  1343  	case <-done:
  1344  		return
  1345  	}
  1346  }
  1347  
  1348  func TestTryRegisterWithApiServer(t *testing.T) {
  1349  	alreadyExists := &apierrors.StatusError{
  1350  		ErrStatus: metav1.Status{Reason: metav1.StatusReasonAlreadyExists},
  1351  	}
  1352  
  1353  	conflict := &apierrors.StatusError{
  1354  		ErrStatus: metav1.Status{Reason: metav1.StatusReasonConflict},
  1355  	}
  1356  
  1357  	newNode := func(cmad bool) *v1.Node {
  1358  		node := &v1.Node{
  1359  			ObjectMeta: metav1.ObjectMeta{
  1360  				Labels: map[string]string{
  1361  					v1.LabelHostname:      testKubeletHostname,
  1362  					v1.LabelOSStable:      goruntime.GOOS,
  1363  					v1.LabelArchStable:    goruntime.GOARCH,
  1364  					kubeletapis.LabelOS:   goruntime.GOOS,
  1365  					kubeletapis.LabelArch: goruntime.GOARCH,
  1366  				},
  1367  			},
  1368  		}
  1369  
  1370  		if cmad {
  1371  			node.Annotations = make(map[string]string)
  1372  			node.Annotations[util.ControllerManagedAttachAnnotation] = "true"
  1373  		}
  1374  
  1375  		return node
  1376  	}
  1377  
  1378  	cases := []struct {
  1379  		name            string
  1380  		newNode         *v1.Node
  1381  		existingNode    *v1.Node
  1382  		createError     error
  1383  		getError        error
  1384  		patchError      error
  1385  		deleteError     error
  1386  		expectedResult  bool
  1387  		expectedActions int
  1388  		testSavedNode   bool
  1389  		savedNodeIndex  int
  1390  		savedNodeCMAD   bool
  1391  	}{
  1392  		{
  1393  			name:            "success case - new node",
  1394  			newNode:         &v1.Node{},
  1395  			expectedResult:  true,
  1396  			expectedActions: 1,
  1397  		},
  1398  		{
  1399  			name:            "success case - existing node - no change in CMAD",
  1400  			newNode:         newNode(true),
  1401  			createError:     alreadyExists,
  1402  			existingNode:    newNode(true),
  1403  			expectedResult:  true,
  1404  			expectedActions: 2,
  1405  		},
  1406  		{
  1407  			name:            "success case - existing node - CMAD disabled",
  1408  			newNode:         newNode(false),
  1409  			createError:     alreadyExists,
  1410  			existingNode:    newNode(true),
  1411  			expectedResult:  true,
  1412  			expectedActions: 3,
  1413  			testSavedNode:   true,
  1414  			savedNodeIndex:  2,
  1415  			savedNodeCMAD:   false,
  1416  		},
  1417  		{
  1418  			name:            "success case - existing node - CMAD enabled",
  1419  			newNode:         newNode(true),
  1420  			createError:     alreadyExists,
  1421  			existingNode:    newNode(false),
  1422  			expectedResult:  true,
  1423  			expectedActions: 3,
  1424  			testSavedNode:   true,
  1425  			savedNodeIndex:  2,
  1426  			savedNodeCMAD:   true,
  1427  		},
  1428  		{
  1429  			name:            "create failed",
  1430  			newNode:         newNode(false),
  1431  			createError:     conflict,
  1432  			expectedResult:  false,
  1433  			expectedActions: 1,
  1434  		},
  1435  		{
  1436  			name:            "get existing node failed",
  1437  			newNode:         newNode(false),
  1438  			createError:     alreadyExists,
  1439  			getError:        conflict,
  1440  			expectedResult:  false,
  1441  			expectedActions: 2,
  1442  		},
  1443  		{
  1444  			name:            "update existing node failed",
  1445  			newNode:         newNode(false),
  1446  			createError:     alreadyExists,
  1447  			existingNode:    newNode(true),
  1448  			patchError:      conflict,
  1449  			expectedResult:  false,
  1450  			expectedActions: 3,
  1451  		},
  1452  	}
  1453  
  1454  	for _, tc := range cases {
  1455  		testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled is a don't-care for this test */)
  1456  		defer testKubelet.Cleanup()
  1457  		kubelet := testKubelet.kubelet
  1458  		kubeClient := testKubelet.fakeKubeClient
  1459  
  1460  		kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1461  			return true, nil, tc.createError
  1462  		})
  1463  		kubeClient.AddReactor("get", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1464  			// Return an existing (matching) node on get.
  1465  			return true, tc.existingNode, tc.getError
  1466  		})
  1467  		kubeClient.AddReactor("patch", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1468  			if action.GetSubresource() == "status" {
  1469  				return true, nil, tc.patchError
  1470  			}
  1471  			return notImplemented(action)
  1472  		})
  1473  		kubeClient.AddReactor("delete", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  1474  			return true, nil, tc.deleteError
  1475  		})
  1476  		addNotImplatedReaction(kubeClient)
  1477  
  1478  		result := kubelet.tryRegisterWithAPIServer(tc.newNode)
  1479  		require.Equal(t, tc.expectedResult, result, "test [%s]", tc.name)
  1480  
  1481  		actions := kubeClient.Actions()
  1482  		assert.Len(t, actions, tc.expectedActions, "test [%s]", tc.name)
  1483  
  1484  		if tc.testSavedNode {
  1485  			var savedNode *v1.Node
  1486  
  1487  			t.Logf("actions: %v: %+v", len(actions), actions)
  1488  			action := actions[tc.savedNodeIndex]
  1489  			if action.GetVerb() == "create" {
  1490  				createAction := action.(core.CreateAction)
  1491  				obj := createAction.GetObject()
  1492  				require.IsType(t, &v1.Node{}, obj)
  1493  				savedNode = obj.(*v1.Node)
  1494  			} else if action.GetVerb() == "patch" {
  1495  				patchAction := action.(core.PatchActionImpl)
  1496  				var err error
  1497  				savedNode, err = applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
  1498  				require.NoError(t, err)
  1499  			}
  1500  
  1501  			actualCMAD, _ := strconv.ParseBool(savedNode.Annotations[util.ControllerManagedAttachAnnotation])
  1502  			assert.Equal(t, tc.savedNodeCMAD, actualCMAD, "test [%s]", tc.name)
  1503  		}
  1504  	}
  1505  }
  1506  
  1507  func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
  1508  	ctx := context.Background()
  1509  	const nodeStatusMaxImages = 5
  1510  
  1511  	// generate one more in inputImageList than we configure the Kubelet to report
  1512  	inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
  1513  	testKubelet := newTestKubeletWithImageList(
  1514  		t, inputImageList, false /* controllerAttachDetachEnabled */, true /* initFakeVolumePlugin */, true)
  1515  	defer testKubelet.Cleanup()
  1516  	kubelet := testKubelet.kubelet
  1517  	kubelet.nodeStatusMaxImages = nodeStatusMaxImages
  1518  	kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1519  	kubelet.containerManager = &localCM{
  1520  		ContainerManager: cm.NewStubContainerManager(),
  1521  		allocatableReservation: v1.ResourceList{
  1522  			v1.ResourceCPU:              *resource.NewMilliQuantity(40000, resource.DecimalSI),
  1523  			v1.ResourceEphemeralStorage: *resource.NewQuantity(1000, resource.BinarySI),
  1524  		},
  1525  		capacity: v1.ResourceList{
  1526  			v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1527  			v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1528  			v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1529  		},
  1530  	}
  1531  	// Since this test retroactively overrides the stub container manager,
  1532  	// we have to regenerate default status setters.
  1533  	kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
  1534  
  1535  	kubeClient := testKubelet.fakeKubeClient
  1536  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  1537  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  1538  	machineInfo := &cadvisorapi.MachineInfo{
  1539  		MachineID:      "123",
  1540  		SystemUUID:     "abc",
  1541  		BootID:         "1b3",
  1542  		NumCores:       2,
  1543  		MemoryCapacity: 10e9, // 10G
  1544  	}
  1545  	kubelet.setCachedMachineInfo(machineInfo)
  1546  
  1547  	expectedNode := &v1.Node{
  1548  		ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  1549  		Spec:       v1.NodeSpec{},
  1550  		Status: v1.NodeStatus{
  1551  			Capacity: v1.ResourceList{
  1552  				v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1553  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1554  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
  1555  				v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
  1556  			},
  1557  			Allocatable: v1.ResourceList{
  1558  				v1.ResourceCPU:              *resource.NewMilliQuantity(0, resource.DecimalSI),
  1559  				v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1560  				v1.ResourcePods:             *resource.NewQuantity(0, resource.DecimalSI),
  1561  				v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
  1562  			},
  1563  		},
  1564  	}
  1565  
  1566  	kubelet.updateRuntimeUp()
  1567  	assert.NoError(t, kubelet.updateNodeStatus(ctx))
  1568  	actions := kubeClient.Actions()
  1569  	require.Len(t, actions, 2)
  1570  	require.True(t, actions[1].Matches("patch", "nodes"))
  1571  	require.Equal(t, actions[1].GetSubresource(), "status")
  1572  
  1573  	updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
  1574  	assert.NoError(t, err)
  1575  	assert.True(t, apiequality.Semantic.DeepEqual(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable), "%s", cmp.Diff(expectedNode.Status.Allocatable, updatedNode.Status.Allocatable))
  1576  }
  1577  
  1578  func TestUpdateDefaultLabels(t *testing.T) {
  1579  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  1580  	testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  1581  
  1582  	cases := []struct {
  1583  		name         string
  1584  		initialNode  *v1.Node
  1585  		existingNode *v1.Node
  1586  		needsUpdate  bool
  1587  		finalLabels  map[string]string
  1588  	}{
  1589  		{
  1590  			name: "make sure default labels exist",
  1591  			initialNode: &v1.Node{
  1592  				ObjectMeta: metav1.ObjectMeta{
  1593  					Labels: map[string]string{
  1594  						v1.LabelHostname:                "new-hostname",
  1595  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1596  						v1.LabelTopologyRegion:          "new-zone-region",
  1597  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1598  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1599  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1600  						v1.LabelInstanceType:            "new-instance-type",
  1601  						v1.LabelOSStable:                "new-os",
  1602  						v1.LabelArchStable:              "new-arch",
  1603  					},
  1604  				},
  1605  			},
  1606  			existingNode: &v1.Node{
  1607  				ObjectMeta: metav1.ObjectMeta{
  1608  					Labels: map[string]string{},
  1609  				},
  1610  			},
  1611  			needsUpdate: true,
  1612  			finalLabels: map[string]string{
  1613  				v1.LabelHostname:                "new-hostname",
  1614  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1615  				v1.LabelTopologyRegion:          "new-zone-region",
  1616  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1617  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1618  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1619  				v1.LabelInstanceType:            "new-instance-type",
  1620  				v1.LabelOSStable:                "new-os",
  1621  				v1.LabelArchStable:              "new-arch",
  1622  			},
  1623  		},
  1624  		{
  1625  			name: "make sure default labels are up to date",
  1626  			initialNode: &v1.Node{
  1627  				ObjectMeta: metav1.ObjectMeta{
  1628  					Labels: map[string]string{
  1629  						v1.LabelHostname:                "new-hostname",
  1630  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1631  						v1.LabelTopologyRegion:          "new-zone-region",
  1632  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1633  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1634  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1635  						v1.LabelInstanceType:            "new-instance-type",
  1636  						v1.LabelOSStable:                "new-os",
  1637  						v1.LabelArchStable:              "new-arch",
  1638  					},
  1639  				},
  1640  			},
  1641  			existingNode: &v1.Node{
  1642  				ObjectMeta: metav1.ObjectMeta{
  1643  					Labels: map[string]string{
  1644  						v1.LabelHostname:                "old-hostname",
  1645  						v1.LabelTopologyZone:            "old-zone-failure-domain",
  1646  						v1.LabelTopologyRegion:          "old-zone-region",
  1647  						v1.LabelFailureDomainBetaZone:   "old-zone-failure-domain",
  1648  						v1.LabelFailureDomainBetaRegion: "old-zone-region",
  1649  						v1.LabelInstanceTypeStable:      "old-instance-type",
  1650  						v1.LabelInstanceType:            "old-instance-type",
  1651  						v1.LabelOSStable:                "old-os",
  1652  						v1.LabelArchStable:              "old-arch",
  1653  					},
  1654  				},
  1655  			},
  1656  			needsUpdate: true,
  1657  			finalLabels: map[string]string{
  1658  				v1.LabelHostname:                "new-hostname",
  1659  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1660  				v1.LabelTopologyRegion:          "new-zone-region",
  1661  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1662  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1663  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1664  				v1.LabelInstanceType:            "new-instance-type",
  1665  				v1.LabelOSStable:                "new-os",
  1666  				v1.LabelArchStable:              "new-arch",
  1667  			},
  1668  		},
  1669  		{
  1670  			name: "make sure existing labels do not get deleted",
  1671  			initialNode: &v1.Node{
  1672  				ObjectMeta: metav1.ObjectMeta{
  1673  					Labels: map[string]string{
  1674  						v1.LabelHostname:                "new-hostname",
  1675  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1676  						v1.LabelTopologyRegion:          "new-zone-region",
  1677  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1678  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1679  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1680  						v1.LabelInstanceType:            "new-instance-type",
  1681  						v1.LabelOSStable:                "new-os",
  1682  						v1.LabelArchStable:              "new-arch",
  1683  					},
  1684  				},
  1685  			},
  1686  			existingNode: &v1.Node{
  1687  				ObjectMeta: metav1.ObjectMeta{
  1688  					Labels: map[string]string{
  1689  						v1.LabelHostname:                "new-hostname",
  1690  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1691  						v1.LabelTopologyRegion:          "new-zone-region",
  1692  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1693  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1694  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1695  						v1.LabelInstanceType:            "new-instance-type",
  1696  						v1.LabelOSStable:                "new-os",
  1697  						v1.LabelArchStable:              "new-arch",
  1698  						"please-persist":                "foo",
  1699  					},
  1700  				},
  1701  			},
  1702  			needsUpdate: false,
  1703  			finalLabels: map[string]string{
  1704  				v1.LabelHostname:                "new-hostname",
  1705  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1706  				v1.LabelTopologyRegion:          "new-zone-region",
  1707  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1708  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1709  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1710  				v1.LabelInstanceType:            "new-instance-type",
  1711  				v1.LabelOSStable:                "new-os",
  1712  				v1.LabelArchStable:              "new-arch",
  1713  				"please-persist":                "foo",
  1714  			},
  1715  		},
  1716  		{
  1717  			name: "make sure existing labels do not get deleted when initial node has no opinion",
  1718  			initialNode: &v1.Node{
  1719  				ObjectMeta: metav1.ObjectMeta{
  1720  					Labels: map[string]string{},
  1721  				},
  1722  			},
  1723  			existingNode: &v1.Node{
  1724  				ObjectMeta: metav1.ObjectMeta{
  1725  					Labels: map[string]string{
  1726  						v1.LabelHostname:                "new-hostname",
  1727  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1728  						v1.LabelTopologyRegion:          "new-zone-region",
  1729  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1730  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1731  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1732  						v1.LabelInstanceType:            "new-instance-type",
  1733  						v1.LabelOSStable:                "new-os",
  1734  						v1.LabelArchStable:              "new-arch",
  1735  						"please-persist":                "foo",
  1736  					},
  1737  				},
  1738  			},
  1739  			needsUpdate: false,
  1740  			finalLabels: map[string]string{
  1741  				v1.LabelHostname:                "new-hostname",
  1742  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1743  				v1.LabelTopologyRegion:          "new-zone-region",
  1744  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1745  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1746  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1747  				v1.LabelInstanceType:            "new-instance-type",
  1748  				v1.LabelOSStable:                "new-os",
  1749  				v1.LabelArchStable:              "new-arch",
  1750  				"please-persist":                "foo",
  1751  			},
  1752  		},
  1753  		{
  1754  			name: "no update needed",
  1755  			initialNode: &v1.Node{
  1756  				ObjectMeta: metav1.ObjectMeta{
  1757  					Labels: map[string]string{
  1758  						v1.LabelHostname:                "new-hostname",
  1759  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1760  						v1.LabelTopologyRegion:          "new-zone-region",
  1761  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1762  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1763  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1764  						v1.LabelInstanceType:            "new-instance-type",
  1765  						v1.LabelOSStable:                "new-os",
  1766  						v1.LabelArchStable:              "new-arch",
  1767  					},
  1768  				},
  1769  			},
  1770  			existingNode: &v1.Node{
  1771  				ObjectMeta: metav1.ObjectMeta{
  1772  					Labels: map[string]string{
  1773  						v1.LabelHostname:                "new-hostname",
  1774  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1775  						v1.LabelTopologyRegion:          "new-zone-region",
  1776  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1777  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1778  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1779  						v1.LabelInstanceType:            "new-instance-type",
  1780  						v1.LabelOSStable:                "new-os",
  1781  						v1.LabelArchStable:              "new-arch",
  1782  					},
  1783  				},
  1784  			},
  1785  			needsUpdate: false,
  1786  			finalLabels: map[string]string{
  1787  				v1.LabelHostname:                "new-hostname",
  1788  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1789  				v1.LabelTopologyRegion:          "new-zone-region",
  1790  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1791  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1792  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1793  				v1.LabelInstanceType:            "new-instance-type",
  1794  				v1.LabelOSStable:                "new-os",
  1795  				v1.LabelArchStable:              "new-arch",
  1796  			},
  1797  		},
  1798  		{
  1799  			name: "not panic when existing node has nil labels",
  1800  			initialNode: &v1.Node{
  1801  				ObjectMeta: metav1.ObjectMeta{
  1802  					Labels: map[string]string{
  1803  						v1.LabelHostname:                "new-hostname",
  1804  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1805  						v1.LabelTopologyRegion:          "new-zone-region",
  1806  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1807  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1808  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1809  						v1.LabelInstanceType:            "new-instance-type",
  1810  						v1.LabelOSStable:                "new-os",
  1811  						v1.LabelArchStable:              "new-arch",
  1812  					},
  1813  				},
  1814  			},
  1815  			existingNode: &v1.Node{
  1816  				ObjectMeta: metav1.ObjectMeta{},
  1817  			},
  1818  			needsUpdate: true,
  1819  			finalLabels: map[string]string{
  1820  				v1.LabelHostname:                "new-hostname",
  1821  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1822  				v1.LabelTopologyRegion:          "new-zone-region",
  1823  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1824  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1825  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1826  				v1.LabelInstanceType:            "new-instance-type",
  1827  				v1.LabelOSStable:                "new-os",
  1828  				v1.LabelArchStable:              "new-arch",
  1829  			},
  1830  		},
  1831  		{
  1832  			name: "backfill required for new stable labels for os/arch/zones/regions/instance-type",
  1833  			initialNode: &v1.Node{
  1834  				ObjectMeta: metav1.ObjectMeta{
  1835  					Labels: map[string]string{
  1836  						v1.LabelHostname:                "new-hostname",
  1837  						v1.LabelTopologyZone:            "new-zone-failure-domain",
  1838  						v1.LabelTopologyRegion:          "new-zone-region",
  1839  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1840  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1841  						v1.LabelInstanceTypeStable:      "new-instance-type",
  1842  						v1.LabelInstanceType:            "new-instance-type",
  1843  						v1.LabelOSStable:                "new-os",
  1844  						v1.LabelArchStable:              "new-arch",
  1845  					},
  1846  				},
  1847  			},
  1848  			existingNode: &v1.Node{
  1849  				ObjectMeta: metav1.ObjectMeta{
  1850  					Labels: map[string]string{
  1851  						v1.LabelHostname:                "new-hostname",
  1852  						v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1853  						v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1854  						v1.LabelInstanceType:            "new-instance-type",
  1855  					},
  1856  				},
  1857  			},
  1858  			needsUpdate: true,
  1859  			finalLabels: map[string]string{
  1860  				v1.LabelHostname:                "new-hostname",
  1861  				v1.LabelTopologyZone:            "new-zone-failure-domain",
  1862  				v1.LabelTopologyRegion:          "new-zone-region",
  1863  				v1.LabelFailureDomainBetaZone:   "new-zone-failure-domain",
  1864  				v1.LabelFailureDomainBetaRegion: "new-zone-region",
  1865  				v1.LabelInstanceTypeStable:      "new-instance-type",
  1866  				v1.LabelInstanceType:            "new-instance-type",
  1867  				v1.LabelOSStable:                "new-os",
  1868  				v1.LabelArchStable:              "new-arch",
  1869  			},
  1870  		},
  1871  	}
  1872  
  1873  	for _, tc := range cases {
  1874  		defer testKubelet.Cleanup()
  1875  		kubelet := testKubelet.kubelet
  1876  
  1877  		needsUpdate := kubelet.updateDefaultLabels(tc.initialNode, tc.existingNode)
  1878  		assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  1879  		assert.Equal(t, tc.finalLabels, tc.existingNode.Labels, tc.name)
  1880  	}
  1881  }
  1882  
  1883  func TestUpdateDefaultResources(t *testing.T) {
  1884  	cases := []struct {
  1885  		name         string
  1886  		initialNode  *v1.Node
  1887  		existingNode *v1.Node
  1888  		expectedNode *v1.Node
  1889  		needsUpdate  bool
  1890  	}{
  1891  		{
  1892  			name: "no update needed when capacity and allocatable of the existing node are not nil",
  1893  			initialNode: &v1.Node{
  1894  				Status: v1.NodeStatus{
  1895  					Capacity: v1.ResourceList{
  1896  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1897  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1898  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1899  					},
  1900  					Allocatable: v1.ResourceList{
  1901  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1902  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1903  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1904  					},
  1905  				},
  1906  			},
  1907  			existingNode: &v1.Node{
  1908  				Status: v1.NodeStatus{
  1909  					Capacity: v1.ResourceList{
  1910  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1911  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1912  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1913  					},
  1914  					Allocatable: v1.ResourceList{
  1915  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1916  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1917  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1918  					},
  1919  				},
  1920  			},
  1921  			expectedNode: &v1.Node{
  1922  				Status: v1.NodeStatus{
  1923  					Capacity: v1.ResourceList{
  1924  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1925  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1926  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1927  					},
  1928  					Allocatable: v1.ResourceList{
  1929  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1930  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1931  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1932  					},
  1933  				},
  1934  			},
  1935  			needsUpdate: false,
  1936  		}, {
  1937  			name:        "no update needed when capacity and allocatable of the initial node are nil",
  1938  			initialNode: &v1.Node{},
  1939  			existingNode: &v1.Node{
  1940  				Status: v1.NodeStatus{
  1941  					Capacity: v1.ResourceList{
  1942  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1943  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1944  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1945  					},
  1946  					Allocatable: v1.ResourceList{
  1947  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1948  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1949  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1950  					},
  1951  				},
  1952  			},
  1953  			expectedNode: &v1.Node{
  1954  				Status: v1.NodeStatus{
  1955  					Capacity: v1.ResourceList{
  1956  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1957  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1958  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1959  					},
  1960  					Allocatable: v1.ResourceList{
  1961  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1962  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1963  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1964  					},
  1965  				},
  1966  			},
  1967  			needsUpdate: false,
  1968  		}, {
  1969  			name: "update needed when capacity and allocatable of the existing node are nil and capacity and allocatable of the initial node are not nil",
  1970  			initialNode: &v1.Node{
  1971  				Status: v1.NodeStatus{
  1972  					Capacity: v1.ResourceList{
  1973  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1974  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1975  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1976  					},
  1977  					Allocatable: v1.ResourceList{
  1978  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1979  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1980  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1981  					},
  1982  				},
  1983  			},
  1984  			existingNode: &v1.Node{},
  1985  			expectedNode: &v1.Node{
  1986  				Status: v1.NodeStatus{
  1987  					Capacity: v1.ResourceList{
  1988  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1989  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1990  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1991  					},
  1992  					Allocatable: v1.ResourceList{
  1993  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  1994  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  1995  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  1996  					},
  1997  				},
  1998  			},
  1999  			needsUpdate: true,
  2000  		}, {
  2001  			name: "update needed when capacity of the existing node is nil and capacity of the initial node is not nil",
  2002  			initialNode: &v1.Node{
  2003  				Status: v1.NodeStatus{
  2004  					Capacity: v1.ResourceList{
  2005  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2006  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2007  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2008  					},
  2009  				},
  2010  			},
  2011  			existingNode: &v1.Node{
  2012  				Status: v1.NodeStatus{
  2013  					Allocatable: v1.ResourceList{
  2014  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2015  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2016  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2017  					},
  2018  				},
  2019  			},
  2020  			expectedNode: &v1.Node{
  2021  				Status: v1.NodeStatus{
  2022  					Capacity: v1.ResourceList{
  2023  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2024  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2025  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2026  					},
  2027  					Allocatable: v1.ResourceList{
  2028  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2029  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2030  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2031  					},
  2032  				},
  2033  			},
  2034  			needsUpdate: true,
  2035  		}, {
  2036  			name: "update needed when allocatable of the existing node is nil and allocatable of the initial node is not nil",
  2037  			initialNode: &v1.Node{
  2038  				Status: v1.NodeStatus{
  2039  					Allocatable: v1.ResourceList{
  2040  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2041  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2042  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2043  					},
  2044  				},
  2045  			},
  2046  			existingNode: &v1.Node{
  2047  				Status: v1.NodeStatus{
  2048  					Capacity: v1.ResourceList{
  2049  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2050  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2051  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2052  					},
  2053  				},
  2054  			},
  2055  			expectedNode: &v1.Node{
  2056  				Status: v1.NodeStatus{
  2057  					Capacity: v1.ResourceList{
  2058  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2059  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2060  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2061  					},
  2062  					Allocatable: v1.ResourceList{
  2063  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2064  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2065  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2066  					},
  2067  				},
  2068  			},
  2069  			needsUpdate: true,
  2070  		}, {
  2071  			name:         "no update needed but capacity and allocatable of existing node should be initialized",
  2072  			initialNode:  &v1.Node{},
  2073  			existingNode: &v1.Node{},
  2074  			expectedNode: &v1.Node{
  2075  				Status: v1.NodeStatus{
  2076  					Capacity:    v1.ResourceList{},
  2077  					Allocatable: v1.ResourceList{},
  2078  				},
  2079  			},
  2080  			needsUpdate: false,
  2081  		},
  2082  	}
  2083  
  2084  	for _, tc := range cases {
  2085  		t.Run(tc.name, func(T *testing.T) {
  2086  			needsUpdate := updateDefaultResources(tc.initialNode, tc.existingNode)
  2087  			assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2088  			assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2089  		})
  2090  	}
  2091  }
  2092  
  2093  func TestReconcileHugePageResource(t *testing.T) {
  2094  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2095  	hugePageResourceName64Ki := v1.ResourceName("hugepages-64Ki")
  2096  	hugePageResourceName2Mi := v1.ResourceName("hugepages-2Mi")
  2097  	hugePageResourceName1Gi := v1.ResourceName("hugepages-1Gi")
  2098  
  2099  	cases := []struct {
  2100  		name         string
  2101  		testKubelet  *TestKubelet
  2102  		initialNode  *v1.Node
  2103  		existingNode *v1.Node
  2104  		expectedNode *v1.Node
  2105  		needsUpdate  bool
  2106  	}{
  2107  		{
  2108  			name:        "no update needed when all huge page resources are similar",
  2109  			testKubelet: testKubelet,
  2110  			needsUpdate: false,
  2111  			initialNode: &v1.Node{
  2112  				Status: v1.NodeStatus{
  2113  					Capacity: v1.ResourceList{
  2114  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2115  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2116  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2117  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2118  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2119  					},
  2120  					Allocatable: v1.ResourceList{
  2121  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2122  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2123  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2124  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2125  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2126  					},
  2127  				},
  2128  			},
  2129  			existingNode: &v1.Node{
  2130  				Status: v1.NodeStatus{
  2131  					Capacity: v1.ResourceList{
  2132  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2133  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2134  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2135  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2136  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2137  					},
  2138  					Allocatable: v1.ResourceList{
  2139  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2140  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2141  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2142  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2143  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2144  					},
  2145  				},
  2146  			},
  2147  			expectedNode: &v1.Node{
  2148  				Status: v1.NodeStatus{
  2149  					Capacity: v1.ResourceList{
  2150  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2151  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2152  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2153  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2154  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2155  					},
  2156  					Allocatable: v1.ResourceList{
  2157  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2158  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2159  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2160  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2161  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2162  					},
  2163  				},
  2164  			},
  2165  		}, {
  2166  			name:        "update needed when new huge page resources is supported",
  2167  			testKubelet: testKubelet,
  2168  			needsUpdate: true,
  2169  			initialNode: &v1.Node{
  2170  				Status: v1.NodeStatus{
  2171  					Capacity: v1.ResourceList{
  2172  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2173  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2174  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2175  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2176  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2177  					},
  2178  					Allocatable: v1.ResourceList{
  2179  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2180  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2181  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2182  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2183  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2184  					},
  2185  				},
  2186  			},
  2187  			existingNode: &v1.Node{
  2188  				Status: v1.NodeStatus{
  2189  					Capacity: v1.ResourceList{
  2190  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2191  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2192  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2193  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2194  					},
  2195  					Allocatable: v1.ResourceList{
  2196  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2197  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2198  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2199  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2200  					},
  2201  				},
  2202  			},
  2203  			expectedNode: &v1.Node{
  2204  				Status: v1.NodeStatus{
  2205  					Capacity: v1.ResourceList{
  2206  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2207  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2208  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2209  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2210  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2211  					},
  2212  					Allocatable: v1.ResourceList{
  2213  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2214  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2215  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2216  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2217  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2218  					},
  2219  				},
  2220  			},
  2221  		}, {
  2222  			name:        "update needed when huge page resource quantity has changed",
  2223  			testKubelet: testKubelet,
  2224  			needsUpdate: true,
  2225  			initialNode: &v1.Node{
  2226  				Status: v1.NodeStatus{
  2227  					Capacity: v1.ResourceList{
  2228  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2229  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2230  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2231  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2232  					},
  2233  					Allocatable: v1.ResourceList{
  2234  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2235  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2236  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2237  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2238  					},
  2239  				},
  2240  			},
  2241  			existingNode: &v1.Node{
  2242  				Status: v1.NodeStatus{
  2243  					Capacity: v1.ResourceList{
  2244  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2245  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2246  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2247  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2248  					},
  2249  					Allocatable: v1.ResourceList{
  2250  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2251  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2252  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2253  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2254  					},
  2255  				},
  2256  			},
  2257  			expectedNode: &v1.Node{
  2258  				Status: v1.NodeStatus{
  2259  					Capacity: v1.ResourceList{
  2260  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2261  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2262  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2263  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2264  					},
  2265  					Allocatable: v1.ResourceList{
  2266  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2267  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2268  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2269  						hugePageResourceName1Gi:     resource.MustParse("4Gi"),
  2270  					},
  2271  				},
  2272  			},
  2273  		}, {
  2274  			name:        "update needed when a huge page resources is no longer supported",
  2275  			testKubelet: testKubelet,
  2276  			needsUpdate: true,
  2277  			initialNode: &v1.Node{
  2278  				Status: v1.NodeStatus{
  2279  					Capacity: v1.ResourceList{
  2280  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2281  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2282  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2283  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2284  					},
  2285  					Allocatable: v1.ResourceList{
  2286  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2287  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2288  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2289  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2290  					},
  2291  				},
  2292  			},
  2293  			existingNode: &v1.Node{
  2294  				Status: v1.NodeStatus{
  2295  					Capacity: v1.ResourceList{
  2296  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2297  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2298  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2299  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2300  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2301  					},
  2302  					Allocatable: v1.ResourceList{
  2303  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2304  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2305  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2306  						hugePageResourceName2Mi:     *resource.NewQuantity(0, resource.BinarySI),
  2307  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2308  					},
  2309  				},
  2310  			},
  2311  			expectedNode: &v1.Node{
  2312  				Status: v1.NodeStatus{
  2313  					Capacity: v1.ResourceList{
  2314  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2315  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2316  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2317  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2318  					},
  2319  					Allocatable: v1.ResourceList{
  2320  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2321  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2322  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2323  						hugePageResourceName1Gi:     resource.MustParse("2Gi"),
  2324  					},
  2325  				},
  2326  			},
  2327  		}, {
  2328  			name:        "not panic when capacity or allocatable of existing node is nil",
  2329  			testKubelet: testKubelet,
  2330  			needsUpdate: true,
  2331  			initialNode: &v1.Node{
  2332  				Status: v1.NodeStatus{
  2333  					Capacity: v1.ResourceList{
  2334  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2335  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2336  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2337  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2338  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2339  					},
  2340  					Allocatable: v1.ResourceList{
  2341  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2342  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2343  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2344  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2345  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2346  					},
  2347  				},
  2348  			},
  2349  			existingNode: &v1.Node{
  2350  				Status: v1.NodeStatus{},
  2351  			},
  2352  			expectedNode: &v1.Node{
  2353  				Status: v1.NodeStatus{
  2354  					Capacity: v1.ResourceList{
  2355  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2356  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2357  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2358  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2359  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2360  					},
  2361  					Allocatable: v1.ResourceList{
  2362  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2363  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2364  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2365  						hugePageResourceName2Mi:     resource.MustParse("100Mi"),
  2366  						hugePageResourceName64Ki:    *resource.NewQuantity(0, resource.BinarySI),
  2367  					},
  2368  				},
  2369  			},
  2370  		},
  2371  	}
  2372  
  2373  	for _, tc := range cases {
  2374  		t.Run(tc.name, func(T *testing.T) {
  2375  			defer testKubelet.Cleanup()
  2376  			kubelet := testKubelet.kubelet
  2377  
  2378  			needsUpdate := kubelet.reconcileHugePageResource(tc.initialNode, tc.existingNode)
  2379  			assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2380  			assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2381  		})
  2382  	}
  2383  
  2384  }
  2385  func TestReconcileExtendedResource(t *testing.T) {
  2386  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2387  	testKubelet.kubelet.kubeClient = nil // ensure only the heartbeat client is used
  2388  	testKubelet.kubelet.containerManager = cm.NewStubContainerManagerWithExtendedResource(true /* shouldResetExtendedResourceCapacity*/)
  2389  	testKubeletNoReset := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2390  	defer testKubeletNoReset.Cleanup()
  2391  	extendedResourceName1 := v1.ResourceName("test.com/resource1")
  2392  	extendedResourceName2 := v1.ResourceName("test.com/resource2")
  2393  
  2394  	cases := []struct {
  2395  		name         string
  2396  		testKubelet  *TestKubelet
  2397  		initialNode  *v1.Node
  2398  		existingNode *v1.Node
  2399  		expectedNode *v1.Node
  2400  		needsUpdate  bool
  2401  	}{
  2402  		{
  2403  			name:        "no update needed without extended resource",
  2404  			testKubelet: testKubelet,
  2405  			initialNode: &v1.Node{
  2406  				Status: v1.NodeStatus{
  2407  					Capacity: v1.ResourceList{
  2408  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2409  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2410  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2411  					},
  2412  					Allocatable: v1.ResourceList{
  2413  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2414  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2415  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2416  					},
  2417  				},
  2418  			},
  2419  			existingNode: &v1.Node{
  2420  				Status: v1.NodeStatus{
  2421  					Capacity: v1.ResourceList{
  2422  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2423  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2424  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2425  					},
  2426  					Allocatable: v1.ResourceList{
  2427  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2428  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2429  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2430  					},
  2431  				},
  2432  			},
  2433  			expectedNode: &v1.Node{
  2434  				Status: v1.NodeStatus{
  2435  					Capacity: v1.ResourceList{
  2436  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2437  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2438  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2439  					},
  2440  					Allocatable: v1.ResourceList{
  2441  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2442  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2443  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2444  					},
  2445  				},
  2446  			},
  2447  			needsUpdate: false,
  2448  		},
  2449  		{
  2450  			name:        "extended resource capacity is zeroed",
  2451  			testKubelet: testKubeletNoReset,
  2452  			initialNode: &v1.Node{
  2453  				Status: v1.NodeStatus{
  2454  					Capacity: v1.ResourceList{
  2455  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2456  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2457  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2458  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2459  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2460  					},
  2461  					Allocatable: v1.ResourceList{
  2462  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2463  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2464  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2465  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2466  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2467  					},
  2468  				},
  2469  			},
  2470  			existingNode: &v1.Node{
  2471  				Status: v1.NodeStatus{
  2472  					Capacity: v1.ResourceList{
  2473  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2474  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2475  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2476  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2477  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2478  					},
  2479  					Allocatable: v1.ResourceList{
  2480  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2481  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2482  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2483  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2484  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2485  					},
  2486  				},
  2487  			},
  2488  			expectedNode: &v1.Node{
  2489  				Status: v1.NodeStatus{
  2490  					Capacity: v1.ResourceList{
  2491  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2492  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2493  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2494  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2495  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2496  					},
  2497  					Allocatable: v1.ResourceList{
  2498  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2499  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2500  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2501  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2502  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2503  					},
  2504  				},
  2505  			},
  2506  			needsUpdate: true,
  2507  		},
  2508  		{
  2509  			name:        "not panic when allocatable of existing node is nil",
  2510  			testKubelet: testKubelet,
  2511  			initialNode: &v1.Node{
  2512  				Status: v1.NodeStatus{
  2513  					Capacity: v1.ResourceList{
  2514  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2515  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2516  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2517  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2518  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2519  					},
  2520  					Allocatable: v1.ResourceList{
  2521  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2522  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2523  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2524  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2525  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2526  					},
  2527  				},
  2528  			},
  2529  			existingNode: &v1.Node{
  2530  				Status: v1.NodeStatus{
  2531  					Capacity: v1.ResourceList{
  2532  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2533  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2534  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2535  						extendedResourceName1:       *resource.NewQuantity(int64(2), resource.DecimalSI),
  2536  						extendedResourceName2:       *resource.NewQuantity(int64(10), resource.DecimalSI),
  2537  					},
  2538  				},
  2539  			},
  2540  			expectedNode: &v1.Node{
  2541  				Status: v1.NodeStatus{
  2542  					Capacity: v1.ResourceList{
  2543  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2544  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2545  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2546  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2547  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2548  					},
  2549  					Allocatable: v1.ResourceList{
  2550  						v1.ResourceCPU:              *resource.NewMilliQuantity(2000, resource.DecimalSI),
  2551  						v1.ResourceMemory:           *resource.NewQuantity(10e9, resource.BinarySI),
  2552  						v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
  2553  						extendedResourceName1:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2554  						extendedResourceName2:       *resource.NewQuantity(int64(0), resource.DecimalSI),
  2555  					},
  2556  				},
  2557  			},
  2558  			needsUpdate: true,
  2559  		},
  2560  	}
  2561  
  2562  	for _, tc := range cases {
  2563  		defer testKubelet.Cleanup()
  2564  		kubelet := testKubelet.kubelet
  2565  
  2566  		needsUpdate := kubelet.reconcileExtendedResource(tc.initialNode, tc.existingNode)
  2567  		assert.Equal(t, tc.needsUpdate, needsUpdate, tc.name)
  2568  		assert.Equal(t, tc.expectedNode, tc.existingNode, tc.name)
  2569  	}
  2570  
  2571  }
  2572  
  2573  func TestValidateNodeIPParam(t *testing.T) {
  2574  	type test struct {
  2575  		nodeIP   string
  2576  		success  bool
  2577  		testName string
  2578  	}
  2579  	tests := []test{
  2580  		{
  2581  			nodeIP:   "",
  2582  			success:  false,
  2583  			testName: "IP not set",
  2584  		},
  2585  		{
  2586  			nodeIP:   "127.0.0.1",
  2587  			success:  false,
  2588  			testName: "IPv4 loopback address",
  2589  		},
  2590  		{
  2591  			nodeIP:   "::1",
  2592  			success:  false,
  2593  			testName: "IPv6 loopback address",
  2594  		},
  2595  		{
  2596  			nodeIP:   "224.0.0.1",
  2597  			success:  false,
  2598  			testName: "multicast IPv4 address",
  2599  		},
  2600  		{
  2601  			nodeIP:   "ff00::1",
  2602  			success:  false,
  2603  			testName: "multicast IPv6 address",
  2604  		},
  2605  		{
  2606  			nodeIP:   "169.254.0.1",
  2607  			success:  false,
  2608  			testName: "IPv4 link-local unicast address",
  2609  		},
  2610  		{
  2611  			nodeIP:   "fe80::0202:b3ff:fe1e:8329",
  2612  			success:  false,
  2613  			testName: "IPv6 link-local unicast address",
  2614  		},
  2615  		{
  2616  			nodeIP:   "0.0.0.0",
  2617  			success:  false,
  2618  			testName: "Unspecified IPv4 address",
  2619  		},
  2620  		{
  2621  			nodeIP:   "::",
  2622  			success:  false,
  2623  			testName: "Unspecified IPv6 address",
  2624  		},
  2625  		{
  2626  			nodeIP:   "1.2.3.4",
  2627  			success:  false,
  2628  			testName: "IPv4 address that doesn't belong to host",
  2629  		},
  2630  	}
  2631  	addrs, err := net.InterfaceAddrs()
  2632  	if err != nil {
  2633  		assert.Error(t, err, fmt.Sprintf(
  2634  			"Unable to obtain a list of the node's unicast interface addresses."))
  2635  	}
  2636  	for _, addr := range addrs {
  2637  		var ip net.IP
  2638  		switch v := addr.(type) {
  2639  		case *net.IPNet:
  2640  			ip = v.IP
  2641  		case *net.IPAddr:
  2642  			ip = v.IP
  2643  		}
  2644  		if ip.IsLoopback() || ip.IsLinkLocalUnicast() {
  2645  			continue
  2646  		}
  2647  		successTest := test{
  2648  			nodeIP:   ip.String(),
  2649  			success:  true,
  2650  			testName: fmt.Sprintf("Success test case for address %s", ip.String()),
  2651  		}
  2652  		tests = append(tests, successTest)
  2653  	}
  2654  	for _, test := range tests {
  2655  		err := validateNodeIP(netutils.ParseIPSloppy(test.nodeIP))
  2656  		if test.success {
  2657  			assert.NoError(t, err, "test %s", test.testName)
  2658  		} else {
  2659  			assert.Error(t, err, fmt.Sprintf("test %s", test.testName))
  2660  		}
  2661  	}
  2662  }
  2663  
  2664  func TestRegisterWithApiServerWithTaint(t *testing.T) {
  2665  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2666  	defer testKubelet.Cleanup()
  2667  	kubelet := testKubelet.kubelet
  2668  	kubeClient := testKubelet.fakeKubeClient
  2669  
  2670  	machineInfo := &cadvisorapi.MachineInfo{
  2671  		MachineID:      "123",
  2672  		SystemUUID:     "abc",
  2673  		BootID:         "1b3",
  2674  		NumCores:       2,
  2675  		MemoryCapacity: 1024,
  2676  	}
  2677  	kubelet.setCachedMachineInfo(machineInfo)
  2678  
  2679  	var gotNode runtime.Object
  2680  	kubeClient.AddReactor("create", "nodes", func(action core.Action) (bool, runtime.Object, error) {
  2681  		createAction := action.(core.CreateAction)
  2682  		gotNode = createAction.GetObject()
  2683  		return true, gotNode, nil
  2684  	})
  2685  
  2686  	addNotImplatedReaction(kubeClient)
  2687  
  2688  	// Make node to be unschedulable.
  2689  	kubelet.registerSchedulable = false
  2690  
  2691  	// Reset kubelet status for each test.
  2692  	kubelet.registrationCompleted = false
  2693  
  2694  	// Register node to apiserver.
  2695  	kubelet.registerWithAPIServer()
  2696  
  2697  	// Check the unschedulable taint.
  2698  	got := gotNode.(*v1.Node)
  2699  	unschedulableTaint := &v1.Taint{
  2700  		Key:    v1.TaintNodeUnschedulable,
  2701  		Effect: v1.TaintEffectNoSchedule,
  2702  	}
  2703  
  2704  	require.Equal(t,
  2705  		true,
  2706  		taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
  2707  		"test unschedulable taint for TaintNodesByCondition")
  2708  }
  2709  
  2710  func TestNodeStatusHasChanged(t *testing.T) {
  2711  	fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
  2712  	fakeFuture := metav1.Time{Time: fakeNow.Time.Add(time.Minute)}
  2713  	readyCondition := v1.NodeCondition{
  2714  		Type:               v1.NodeReady,
  2715  		Status:             v1.ConditionTrue,
  2716  		LastHeartbeatTime:  fakeNow,
  2717  		LastTransitionTime: fakeNow,
  2718  	}
  2719  	readyConditionAtDiffHearbeatTime := v1.NodeCondition{
  2720  		Type:               v1.NodeReady,
  2721  		Status:             v1.ConditionTrue,
  2722  		LastHeartbeatTime:  fakeFuture,
  2723  		LastTransitionTime: fakeNow,
  2724  	}
  2725  	readyConditionAtDiffTransitionTime := v1.NodeCondition{
  2726  		Type:               v1.NodeReady,
  2727  		Status:             v1.ConditionTrue,
  2728  		LastHeartbeatTime:  fakeFuture,
  2729  		LastTransitionTime: fakeFuture,
  2730  	}
  2731  	notReadyCondition := v1.NodeCondition{
  2732  		Type:               v1.NodeReady,
  2733  		Status:             v1.ConditionFalse,
  2734  		LastHeartbeatTime:  fakeNow,
  2735  		LastTransitionTime: fakeNow,
  2736  	}
  2737  	memoryPressureCondition := v1.NodeCondition{
  2738  		Type:               v1.NodeMemoryPressure,
  2739  		Status:             v1.ConditionFalse,
  2740  		LastHeartbeatTime:  fakeNow,
  2741  		LastTransitionTime: fakeNow,
  2742  	}
  2743  	testcases := []struct {
  2744  		name           string
  2745  		originalStatus *v1.NodeStatus
  2746  		status         *v1.NodeStatus
  2747  		expectChange   bool
  2748  	}{
  2749  		{
  2750  			name:           "Node status does not change with nil status.",
  2751  			originalStatus: nil,
  2752  			status:         nil,
  2753  			expectChange:   false,
  2754  		},
  2755  		{
  2756  			name:           "Node status does not change with default status.",
  2757  			originalStatus: &v1.NodeStatus{},
  2758  			status:         &v1.NodeStatus{},
  2759  			expectChange:   false,
  2760  		},
  2761  		{
  2762  			name:           "Node status changes with nil and default status.",
  2763  			originalStatus: nil,
  2764  			status:         &v1.NodeStatus{},
  2765  			expectChange:   true,
  2766  		},
  2767  		{
  2768  			name:           "Node status changes with nil and status.",
  2769  			originalStatus: nil,
  2770  			status: &v1.NodeStatus{
  2771  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2772  			},
  2773  			expectChange: true,
  2774  		},
  2775  		{
  2776  			name:           "Node status does not change with empty conditions.",
  2777  			originalStatus: &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  2778  			status:         &v1.NodeStatus{Conditions: []v1.NodeCondition{}},
  2779  			expectChange:   false,
  2780  		},
  2781  		{
  2782  			name: "Node status does not change",
  2783  			originalStatus: &v1.NodeStatus{
  2784  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2785  			},
  2786  			status: &v1.NodeStatus{
  2787  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2788  			},
  2789  			expectChange: false,
  2790  		},
  2791  		{
  2792  			name: "Node status does not change even if heartbeat time changes.",
  2793  			originalStatus: &v1.NodeStatus{
  2794  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2795  			},
  2796  			status: &v1.NodeStatus{
  2797  				Conditions: []v1.NodeCondition{readyConditionAtDiffHearbeatTime, memoryPressureCondition},
  2798  			},
  2799  			expectChange: false,
  2800  		},
  2801  		{
  2802  			name: "Node status does not change even if the orders of conditions are different.",
  2803  			originalStatus: &v1.NodeStatus{
  2804  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2805  			},
  2806  			status: &v1.NodeStatus{
  2807  				Conditions: []v1.NodeCondition{memoryPressureCondition, readyConditionAtDiffHearbeatTime},
  2808  			},
  2809  			expectChange: false,
  2810  		},
  2811  		{
  2812  			name: "Node status changes if condition status differs.",
  2813  			originalStatus: &v1.NodeStatus{
  2814  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2815  			},
  2816  			status: &v1.NodeStatus{
  2817  				Conditions: []v1.NodeCondition{notReadyCondition, memoryPressureCondition},
  2818  			},
  2819  			expectChange: true,
  2820  		},
  2821  		{
  2822  			name: "Node status changes if transition time changes.",
  2823  			originalStatus: &v1.NodeStatus{
  2824  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2825  			},
  2826  			status: &v1.NodeStatus{
  2827  				Conditions: []v1.NodeCondition{readyConditionAtDiffTransitionTime, memoryPressureCondition},
  2828  			},
  2829  			expectChange: true,
  2830  		},
  2831  		{
  2832  			name: "Node status changes with different number of conditions.",
  2833  			originalStatus: &v1.NodeStatus{
  2834  				Conditions: []v1.NodeCondition{readyCondition},
  2835  			},
  2836  			status: &v1.NodeStatus{
  2837  				Conditions: []v1.NodeCondition{readyCondition, memoryPressureCondition},
  2838  			},
  2839  			expectChange: true,
  2840  		},
  2841  		{
  2842  			name: "Node status changes with different phase.",
  2843  			originalStatus: &v1.NodeStatus{
  2844  				Phase:      v1.NodePending,
  2845  				Conditions: []v1.NodeCondition{readyCondition},
  2846  			},
  2847  			status: &v1.NodeStatus{
  2848  				Phase:      v1.NodeRunning,
  2849  				Conditions: []v1.NodeCondition{readyCondition},
  2850  			},
  2851  			expectChange: true,
  2852  		},
  2853  	}
  2854  	for _, tc := range testcases {
  2855  		t.Run(tc.name, func(t *testing.T) {
  2856  			originalStatusCopy := tc.originalStatus.DeepCopy()
  2857  			statusCopy := tc.status.DeepCopy()
  2858  			changed := nodeStatusHasChanged(tc.originalStatus, tc.status)
  2859  			assert.Equal(t, tc.expectChange, changed, "Expect node status change to be %t, but got %t.", tc.expectChange, changed)
  2860  			assert.True(t, apiequality.Semantic.DeepEqual(originalStatusCopy, tc.originalStatus), "%s", cmp.Diff(originalStatusCopy, tc.originalStatus))
  2861  			assert.True(t, apiequality.Semantic.DeepEqual(statusCopy, tc.status), "%s", cmp.Diff(statusCopy, tc.status))
  2862  		})
  2863  	}
  2864  }
  2865  
  2866  func TestUpdateNodeAddresses(t *testing.T) {
  2867  	testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
  2868  	defer testKubelet.Cleanup()
  2869  	kubelet := testKubelet.kubelet
  2870  	kubeClient := testKubelet.fakeKubeClient
  2871  
  2872  	existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
  2873  	kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
  2874  
  2875  	tests := []struct {
  2876  		Name   string
  2877  		Before []v1.NodeAddress
  2878  		After  []v1.NodeAddress
  2879  	}{
  2880  		{
  2881  			Name:   "nil to populated",
  2882  			Before: nil,
  2883  			After: []v1.NodeAddress{
  2884  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2885  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2886  			},
  2887  		},
  2888  		{
  2889  			Name:   "empty to populated",
  2890  			Before: []v1.NodeAddress{},
  2891  			After: []v1.NodeAddress{
  2892  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2893  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2894  			},
  2895  		},
  2896  		{
  2897  			Name: "populated to nil",
  2898  			Before: []v1.NodeAddress{
  2899  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2900  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2901  			},
  2902  			After: nil,
  2903  		},
  2904  		{
  2905  			Name: "populated to empty",
  2906  			Before: []v1.NodeAddress{
  2907  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2908  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2909  			},
  2910  			After: []v1.NodeAddress{},
  2911  		},
  2912  		{
  2913  			Name: "multiple addresses of same type, no change",
  2914  			Before: []v1.NodeAddress{
  2915  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2916  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2917  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2918  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2919  			},
  2920  			After: []v1.NodeAddress{
  2921  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2922  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2923  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2924  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2925  			},
  2926  		},
  2927  		{
  2928  			Name: "1 InternalIP to 2 InternalIP",
  2929  			Before: []v1.NodeAddress{
  2930  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2931  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2932  			},
  2933  			After: []v1.NodeAddress{
  2934  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2935  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2936  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2937  			},
  2938  		},
  2939  		{
  2940  			Name: "2 InternalIP to 1 InternalIP",
  2941  			Before: []v1.NodeAddress{
  2942  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2943  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2944  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2945  			},
  2946  			After: []v1.NodeAddress{
  2947  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2948  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2949  			},
  2950  		},
  2951  		{
  2952  			Name: "2 InternalIP to 2 different InternalIP",
  2953  			Before: []v1.NodeAddress{
  2954  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2955  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2956  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2957  			},
  2958  			After: []v1.NodeAddress{
  2959  				{Type: v1.NodeInternalIP, Address: "127.0.0.3"},
  2960  				{Type: v1.NodeInternalIP, Address: "127.0.0.4"},
  2961  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2962  			},
  2963  		},
  2964  		{
  2965  			Name: "2 InternalIP to reversed order",
  2966  			Before: []v1.NodeAddress{
  2967  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2968  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2969  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2970  			},
  2971  			After: []v1.NodeAddress{
  2972  				{Type: v1.NodeInternalIP, Address: "127.0.0.2"},
  2973  				{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
  2974  				{Type: v1.NodeHostName, Address: testKubeletHostname},
  2975  			},
  2976  		},
  2977  	}
  2978  
  2979  	for _, test := range tests {
  2980  		t.Run(test.Name, func(t *testing.T) {
  2981  			ctx := context.Background()
  2982  			oldNode := &v1.Node{
  2983  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
  2984  				Spec:       v1.NodeSpec{},
  2985  				Status: v1.NodeStatus{
  2986  					Addresses: test.Before,
  2987  				},
  2988  			}
  2989  			expectedNode := &v1.Node{
  2990  				ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS, v1.LabelArchStable: goruntime.GOARCH}},
  2991  				Spec:       v1.NodeSpec{},
  2992  				Status: v1.NodeStatus{
  2993  					Addresses: test.After,
  2994  				},
  2995  			}
  2996  
  2997  			_, err := kubeClient.CoreV1().Nodes().Update(ctx, oldNode, metav1.UpdateOptions{})
  2998  			assert.NoError(t, err)
  2999  			kubelet.setNodeStatusFuncs = []func(context.Context, *v1.Node) error{
  3000  				func(_ context.Context, node *v1.Node) error {
  3001  					node.Status.Addresses = expectedNode.Status.Addresses
  3002  					return nil
  3003  				},
  3004  			}
  3005  			assert.NoError(t, kubelet.updateNodeStatus(ctx))
  3006  
  3007  			actions := kubeClient.Actions()
  3008  			lastAction := actions[len(actions)-1]
  3009  			assert.IsType(t, core.PatchActionImpl{}, lastAction)
  3010  			patchAction := lastAction.(core.PatchActionImpl)
  3011  
  3012  			updatedNode, err := applyNodeStatusPatch(oldNode, patchAction.GetPatch())
  3013  			require.NoError(t, err)
  3014  
  3015  			assert.True(t, apiequality.Semantic.DeepEqual(updatedNode, expectedNode), "%s", cmp.Diff(expectedNode, updatedNode))
  3016  		})
  3017  	}
  3018  }
  3019  

View as plain text