...

Source file src/k8s.io/kubernetes/pkg/kubelet/lifecycle/predicate_test.go

Documentation: k8s.io/kubernetes/pkg/kubelet/lifecycle

     1  /*
     2  Copyright 2018 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package lifecycle
    18  
    19  import (
    20  	goruntime "runtime"
    21  	"testing"
    22  
    23  	"github.com/google/go-cmp/cmp"
    24  	v1 "k8s.io/api/core/v1"
    25  	"k8s.io/apimachinery/pkg/api/resource"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
    28  	"k8s.io/kubernetes/pkg/kubelet/types"
    29  	schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
    30  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
    31  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
    32  	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
    33  )
    34  
    35  var (
    36  	quantity = *resource.NewQuantity(1, resource.DecimalSI)
    37  )
    38  
    39  func TestRemoveMissingExtendedResources(t *testing.T) {
    40  	for _, test := range []struct {
    41  		desc string
    42  		pod  *v1.Pod
    43  		node *v1.Node
    44  
    45  		expectedPod *v1.Pod
    46  	}{
    47  		{
    48  			desc: "requests in Limits should be ignored",
    49  			pod: makeTestPod(
    50  				v1.ResourceList{},                        // Requests
    51  				v1.ResourceList{"foo.com/bar": quantity}, // Limits
    52  			),
    53  			node: makeTestNode(
    54  				v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
    55  			),
    56  			expectedPod: makeTestPod(
    57  				v1.ResourceList{},                        // Requests
    58  				v1.ResourceList{"foo.com/bar": quantity}, // Limits
    59  			),
    60  		},
    61  		{
    62  			desc: "requests for resources available in node should not be removed",
    63  			pod: makeTestPod(
    64  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    65  				v1.ResourceList{},                        // Limits
    66  			),
    67  			node: makeTestNode(
    68  				v1.ResourceList{"foo.com/bar": quantity}, // Allocatable
    69  			),
    70  			expectedPod: makeTestPod(
    71  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    72  				v1.ResourceList{}),                       // Limits
    73  		},
    74  		{
    75  			desc: "requests for resources unavailable in node should be removed",
    76  			pod: makeTestPod(
    77  				v1.ResourceList{"foo.com/bar": quantity}, // Requests
    78  				v1.ResourceList{},                        // Limits
    79  			),
    80  			node: makeTestNode(
    81  				v1.ResourceList{"foo.com/baz": quantity}, // Allocatable
    82  			),
    83  			expectedPod: makeTestPod(
    84  				v1.ResourceList{}, // Requests
    85  				v1.ResourceList{}, // Limits
    86  			),
    87  		},
    88  	} {
    89  		nodeInfo := schedulerframework.NewNodeInfo()
    90  		nodeInfo.SetNode(test.node)
    91  		pod := removeMissingExtendedResources(test.pod, nodeInfo)
    92  		if diff := cmp.Diff(test.expectedPod, pod); diff != "" {
    93  			t.Errorf("unexpected pod (-want, +got):\n%s", diff)
    94  		}
    95  	}
    96  }
    97  
    98  func makeTestPod(requests, limits v1.ResourceList) *v1.Pod {
    99  	return &v1.Pod{
   100  		Spec: v1.PodSpec{
   101  			Containers: []v1.Container{
   102  				{
   103  					Resources: v1.ResourceRequirements{
   104  						Requests: requests,
   105  						Limits:   limits,
   106  					},
   107  				},
   108  			},
   109  		},
   110  	}
   111  }
   112  
   113  func makeTestNode(allocatable v1.ResourceList) *v1.Node {
   114  	return &v1.Node{
   115  		Status: v1.NodeStatus{
   116  			Allocatable: allocatable,
   117  		},
   118  	}
   119  }
   120  
   121  var (
   122  	extendedResourceA = v1.ResourceName("example.com/aaa")
   123  	hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
   124  )
   125  
   126  func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
   127  	return v1.ResourceList{
   128  		v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
   129  		v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
   130  		v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
   131  		extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI),
   132  		v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
   133  		hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI),
   134  	}
   135  }
   136  
   137  func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList {
   138  	return v1.ResourceList{
   139  		v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
   140  		v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI),
   141  		v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI),
   142  		extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI),
   143  		v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI),
   144  		hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI),
   145  	}
   146  }
   147  
   148  func newResourcePod(containerResources ...v1.ResourceList) *v1.Pod {
   149  	containers := []v1.Container{}
   150  	for _, rl := range containerResources {
   151  		containers = append(containers, v1.Container{
   152  			Resources: v1.ResourceRequirements{Requests: rl},
   153  		})
   154  	}
   155  	return &v1.Pod{
   156  		Spec: v1.PodSpec{
   157  			Containers: containers,
   158  		},
   159  	}
   160  }
   161  
   162  func newPodWithPort(hostPorts ...int) *v1.Pod {
   163  	networkPorts := []v1.ContainerPort{}
   164  	for _, port := range hostPorts {
   165  		networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)})
   166  	}
   167  	return &v1.Pod{
   168  		Spec: v1.PodSpec{
   169  			Containers: []v1.Container{
   170  				{
   171  					Ports: networkPorts,
   172  				},
   173  			},
   174  		},
   175  	}
   176  }
   177  
   178  func TestGeneralPredicates(t *testing.T) {
   179  	resourceTests := []struct {
   180  		pod      *v1.Pod
   181  		nodeInfo *schedulerframework.NodeInfo
   182  		node     *v1.Node
   183  		name     string
   184  		reasons  []PredicateFailureReason
   185  	}{
   186  		{
   187  			pod: &v1.Pod{},
   188  			nodeInfo: schedulerframework.NewNodeInfo(
   189  				newResourcePod(v1.ResourceList{
   190  					v1.ResourceCPU:    *resource.NewMilliQuantity(9, resource.DecimalSI),
   191  					v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
   192  				})),
   193  			node: &v1.Node{
   194  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   195  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   196  			},
   197  			name: "no resources/port/host requested always fits",
   198  		},
   199  		{
   200  			pod: newResourcePod(v1.ResourceList{
   201  				v1.ResourceCPU:    *resource.NewMilliQuantity(8, resource.DecimalSI),
   202  				v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
   203  			}),
   204  			nodeInfo: schedulerframework.NewNodeInfo(
   205  				newResourcePod(v1.ResourceList{
   206  					v1.ResourceCPU:    *resource.NewMilliQuantity(5, resource.DecimalSI),
   207  					v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
   208  				})),
   209  			node: &v1.Node{
   210  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   211  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   212  			},
   213  			reasons: []PredicateFailureReason{
   214  				&InsufficientResourceError{ResourceName: v1.ResourceCPU, Requested: 8, Used: 5, Capacity: 10},
   215  				&InsufficientResourceError{ResourceName: v1.ResourceMemory, Requested: 10, Used: 19, Capacity: 20},
   216  			},
   217  			name: "not enough cpu and memory resource",
   218  		},
   219  		{
   220  			pod: &v1.Pod{
   221  				Spec: v1.PodSpec{
   222  					NodeName: "machine2",
   223  				},
   224  			},
   225  			nodeInfo: schedulerframework.NewNodeInfo(),
   226  			node: &v1.Node{
   227  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   228  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   229  			},
   230  			reasons: []PredicateFailureReason{&PredicateFailureError{nodename.Name, nodename.ErrReason}},
   231  			name:    "host not match",
   232  		},
   233  		{
   234  			pod:      newPodWithPort(123),
   235  			nodeInfo: schedulerframework.NewNodeInfo(newPodWithPort(123)),
   236  			node: &v1.Node{
   237  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   238  				Status:     v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   239  			},
   240  			reasons: []PredicateFailureReason{&PredicateFailureError{nodeports.Name, nodeports.ErrReason}},
   241  			name:    "hostport conflict",
   242  		},
   243  		{
   244  			pod: &v1.Pod{
   245  				Spec: v1.PodSpec{
   246  					Tolerations: []v1.Toleration{
   247  						{Key: "foo"},
   248  						{Key: "bar"},
   249  					},
   250  				},
   251  			},
   252  			nodeInfo: schedulerframework.NewNodeInfo(),
   253  			node: &v1.Node{
   254  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   255  				Spec: v1.NodeSpec{
   256  					Taints: []v1.Taint{
   257  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   258  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   259  					},
   260  				},
   261  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   262  			},
   263  			name: "taint/toleration match",
   264  		},
   265  		{
   266  			pod:      &v1.Pod{},
   267  			nodeInfo: schedulerframework.NewNodeInfo(),
   268  			node: &v1.Node{
   269  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   270  				Spec: v1.NodeSpec{
   271  					Taints: []v1.Taint{
   272  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   273  					},
   274  				},
   275  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   276  			},
   277  			name: "NoSchedule taint/toleration not match",
   278  		},
   279  		{
   280  			pod:      &v1.Pod{},
   281  			nodeInfo: schedulerframework.NewNodeInfo(),
   282  			node: &v1.Node{
   283  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   284  				Spec: v1.NodeSpec{
   285  					Taints: []v1.Taint{
   286  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   287  					},
   288  				},
   289  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   290  			},
   291  			reasons: []PredicateFailureReason{&PredicateFailureError{tainttoleration.Name, tainttoleration.ErrReasonNotMatch}},
   292  			name:    "NoExecute taint/toleration not match",
   293  		},
   294  		{
   295  			pod:      &v1.Pod{},
   296  			nodeInfo: schedulerframework.NewNodeInfo(),
   297  			node: &v1.Node{
   298  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   299  				Spec: v1.NodeSpec{
   300  					Taints: []v1.Taint{
   301  						{Key: "baz", Effect: v1.TaintEffectPreferNoSchedule},
   302  					},
   303  				},
   304  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   305  			},
   306  			name: "PreferNoSchedule taint/toleration not match",
   307  		},
   308  		{
   309  			pod: &v1.Pod{
   310  				ObjectMeta: metav1.ObjectMeta{
   311  					Annotations: map[string]string{
   312  						types.ConfigSourceAnnotationKey: types.FileSource,
   313  					},
   314  				},
   315  			},
   316  			nodeInfo: schedulerframework.NewNodeInfo(),
   317  			node: &v1.Node{
   318  				ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
   319  				Spec: v1.NodeSpec{
   320  					Taints: []v1.Taint{
   321  						{Key: "foo", Effect: v1.TaintEffectNoSchedule},
   322  						{Key: "bar", Effect: v1.TaintEffectNoExecute},
   323  					},
   324  				},
   325  				Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0), Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
   326  			},
   327  			name: "static pods ignore taints",
   328  		},
   329  	}
   330  	for _, test := range resourceTests {
   331  		t.Run(test.name, func(t *testing.T) {
   332  			test.nodeInfo.SetNode(test.node)
   333  			reasons := generalFilter(test.pod, test.nodeInfo)
   334  			if diff := cmp.Diff(test.reasons, reasons); diff != "" {
   335  				t.Errorf("unexpected failure reasons (-want, +got):\n%s", diff)
   336  			}
   337  		})
   338  	}
   339  }
   340  
   341  func TestRejectPodAdmissionBasedOnOSSelector(t *testing.T) {
   342  	tests := []struct {
   343  		name            string
   344  		pod             *v1.Pod
   345  		node            *v1.Node
   346  		expectRejection bool
   347  	}{
   348  		{
   349  			name:            "OS label match",
   350  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   351  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   352  			expectRejection: false,
   353  		},
   354  		{
   355  			name:            "dummyOS label, but the underlying OS matches",
   356  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: goruntime.GOOS}}},
   357  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   358  			expectRejection: false,
   359  		},
   360  		{
   361  			name:            "dummyOS label, but the underlying OS doesn't match",
   362  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   363  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   364  			expectRejection: true,
   365  		},
   366  		{
   367  			name:            "dummyOS label, but the underlying OS doesn't match",
   368  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   369  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   370  			expectRejection: true,
   371  		},
   372  		{
   373  			name:            "OS field mismatch, OS label on node object would be reset to correct value",
   374  			pod:             &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   375  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   376  			expectRejection: true,
   377  		},
   378  		{
   379  			name:            "No label selector on the pod, should be admitted",
   380  			pod:             &v1.Pod{},
   381  			node:            &v1.Node{Spec: v1.NodeSpec{}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{v1.LabelOSStable: "dummyOS"}}},
   382  			expectRejection: false,
   383  		},
   384  	}
   385  	for _, test := range tests {
   386  		t.Run(test.name, func(t *testing.T) {
   387  			actualResult := rejectPodAdmissionBasedOnOSSelector(test.pod, test.node)
   388  			if test.expectRejection != actualResult {
   389  				t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
   390  			}
   391  		})
   392  	}
   393  }
   394  
   395  func TestRejectPodAdmissionBasedOnOSField(t *testing.T) {
   396  	tests := []struct {
   397  		name            string
   398  		pod             *v1.Pod
   399  		expectRejection bool
   400  	}{
   401  		{
   402  			name:            "OS field match",
   403  			pod:             &v1.Pod{Spec: v1.PodSpec{OS: &v1.PodOS{Name: v1.OSName(goruntime.GOOS)}}},
   404  			expectRejection: false,
   405  		},
   406  		{
   407  			name:            "OS field mismatch",
   408  			pod:             &v1.Pod{Spec: v1.PodSpec{OS: &v1.PodOS{Name: "dummyOS"}}},
   409  			expectRejection: true,
   410  		},
   411  		{
   412  			name:            "no OS field",
   413  			pod:             &v1.Pod{Spec: v1.PodSpec{}},
   414  			expectRejection: false,
   415  		},
   416  	}
   417  	for _, test := range tests {
   418  		t.Run(test.name, func(t *testing.T) {
   419  			actualResult := rejectPodAdmissionBasedOnOSField(test.pod)
   420  			if test.expectRejection != actualResult {
   421  				t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
   422  			}
   423  		})
   424  	}
   425  }
   426  

View as plain text