1
16
17 package noderesources
18
19 import (
20 "context"
21
22 v1 "k8s.io/api/core/v1"
23 "k8s.io/apimachinery/pkg/api/resource"
24 utilfeature "k8s.io/apiserver/pkg/util/feature"
25 "k8s.io/klog/v2"
26
27 resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
28 "k8s.io/kubernetes/pkg/features"
29 "k8s.io/kubernetes/pkg/scheduler/apis/config"
30 "k8s.io/kubernetes/pkg/scheduler/framework"
31 schedutil "k8s.io/kubernetes/pkg/scheduler/util"
32 )
33
34
35 type scorer func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer
36
37
38 type resourceAllocationScorer struct {
39 Name string
40
41
42 useRequested bool
43 scorer func(requested, allocable []int64) int64
44 resources []config.ResourceSpec
45 }
46
47
48 func (r *resourceAllocationScorer) score(
49 ctx context.Context,
50 pod *v1.Pod,
51 nodeInfo *framework.NodeInfo,
52 podRequests []int64) (int64, *framework.Status) {
53 logger := klog.FromContext(ctx)
54 node := nodeInfo.Node()
55
56
57 if len(r.resources) == 0 {
58 return 0, framework.NewStatus(framework.Error, "resources not found")
59 }
60
61 requested := make([]int64, len(r.resources))
62 allocatable := make([]int64, len(r.resources))
63 for i := range r.resources {
64 alloc, req := r.calculateResourceAllocatableRequest(logger, nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i])
65
66 if alloc == 0 {
67 continue
68 }
69 allocatable[i] = alloc
70 requested[i] = req
71 }
72
73 score := r.scorer(requested, allocatable)
74
75 if loggerV := logger.V(10); loggerV.Enabled() {
76 loggerV.Info("Listed internal info for allocatable resources, requested resources and score", "pod",
77 klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
78 "allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
79 )
80 }
81
82 return score, nil
83 }
84
85
86
87
88
89 func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger klog.Logger, nodeInfo *framework.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) {
90 requested := nodeInfo.NonZeroRequested
91 if r.useRequested {
92 requested = nodeInfo.Requested
93 }
94
95
96
97 if podRequest == 0 && schedutil.IsScalarResourceName(resource) {
98 return 0, 0
99 }
100 switch resource {
101 case v1.ResourceCPU:
102 return nodeInfo.Allocatable.MilliCPU, (requested.MilliCPU + podRequest)
103 case v1.ResourceMemory:
104 return nodeInfo.Allocatable.Memory, (requested.Memory + podRequest)
105 case v1.ResourceEphemeralStorage:
106 return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest)
107 default:
108 if _, exists := nodeInfo.Allocatable.ScalarResources[resource]; exists {
109 return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest)
110 }
111 }
112 logger.V(10).Info("Requested resource is omitted for node score calculation", "resourceName", resource)
113 return 0, 0
114 }
115
116
117
118 func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
119
120 opts := resourcehelper.PodResourcesOptions{
121 InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
122 }
123 if !r.useRequested {
124 opts.NonMissingContainerRequests = v1.ResourceList{
125 v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
126 v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
127 }
128 }
129
130 requests := resourcehelper.PodRequests(pod, opts)
131
132 quantity := requests[resourceName]
133 if resourceName == v1.ResourceCPU {
134 return quantity.MilliValue()
135 }
136 return quantity.Value()
137 }
138
139 func (r *resourceAllocationScorer) calculatePodResourceRequestList(pod *v1.Pod, resources []config.ResourceSpec) []int64 {
140 podRequests := make([]int64, len(resources))
141 for i := range resources {
142 podRequests[i] = r.calculatePodResourceRequest(pod, v1.ResourceName(resources[i].Name))
143 }
144 return podRequests
145 }
146
View as plain text