1
16
17 package cm
18
19 import (
20 "fmt"
21 "strconv"
22 "strings"
23 "time"
24
25 "k8s.io/apimachinery/pkg/types"
26 "k8s.io/apimachinery/pkg/util/sets"
27
28
29 v1 "k8s.io/api/core/v1"
30 internalapi "k8s.io/cri-api/pkg/apis"
31 podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
32 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
33 "k8s.io/kubernetes/pkg/kubelet/apis/podresources"
34 "k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
35 "k8s.io/kubernetes/pkg/kubelet/config"
36 kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
37 evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
38 "k8s.io/kubernetes/pkg/kubelet/lifecycle"
39 "k8s.io/kubernetes/pkg/kubelet/pluginmanager/cache"
40 "k8s.io/kubernetes/pkg/kubelet/status"
41 schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
42 "k8s.io/utils/cpuset"
43 )
44
45 type ActivePodsFunc func() []*v1.Pod
46
47
48 type ContainerManager interface {
49
50
51
52 Start(*v1.Node, ActivePodsFunc, config.SourcesReady, status.PodStatusProvider, internalapi.RuntimeService, bool) error
53
54
55
56 SystemCgroupsLimit() v1.ResourceList
57
58
59 GetNodeConfig() NodeConfig
60
61
62 Status() Status
63
64
65
66 NewPodContainerManager() PodContainerManager
67
68
69 GetMountedSubsystems() *CgroupSubsystems
70
71
72 GetQOSContainersInfo() QOSContainersInfo
73
74
75 GetNodeAllocatableReservation() v1.ResourceList
76
77
78 GetCapacity(localStorageCapacityIsolation bool) v1.ResourceList
79
80
81
82
83 GetDevicePluginResourceCapacity() (v1.ResourceList, v1.ResourceList, []string)
84
85
86
87 UpdateQOSCgroups() error
88
89
90
91 GetResources(pod *v1.Pod, container *v1.Container) (*kubecontainer.RunContainerOptions, error)
92
93
94
95
96
97
98 UpdatePluginResources(*schedulerframework.NodeInfo, *lifecycle.PodAdmitAttributes) error
99
100 InternalContainerLifecycle() InternalContainerLifecycle
101
102
103 GetPodCgroupRoot() string
104
105
106
107
108 GetPluginRegistrationHandler() cache.PluginHandler
109
110
111
112 ShouldResetExtendedResourceCapacity() bool
113
114
115 GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler
116
117
118 GetNodeAllocatableAbsolute() v1.ResourceList
119
120
121 PrepareDynamicResources(*v1.Pod) error
122
123
124 UnprepareDynamicResources(*v1.Pod) error
125
126
127
128 PodMightNeedToUnprepareResources(UID types.UID) bool
129
130
131 podresources.CPUsProvider
132 podresources.DevicesProvider
133 podresources.MemoryProvider
134 podresources.DynamicResourcesProvider
135 }
136
137 type NodeConfig struct {
138 NodeName types.NodeName
139 RuntimeCgroupsName string
140 SystemCgroupsName string
141 KubeletCgroupsName string
142 KubeletOOMScoreAdj int32
143 ContainerRuntime string
144 CgroupsPerQOS bool
145 CgroupRoot string
146 CgroupDriver string
147 KubeletRootDir string
148 ProtectKernelDefaults bool
149 NodeAllocatableConfig
150 QOSReserved map[v1.ResourceName]int64
151 CPUManagerPolicy string
152 CPUManagerPolicyOptions map[string]string
153 TopologyManagerScope string
154 CPUManagerReconcilePeriod time.Duration
155 ExperimentalMemoryManagerPolicy string
156 ExperimentalMemoryManagerReservedMemory []kubeletconfig.MemoryReservation
157 PodPidsLimit int64
158 EnforceCPULimits bool
159 CPUCFSQuotaPeriod time.Duration
160 TopologyManagerPolicy string
161 TopologyManagerPolicyOptions map[string]string
162 }
163
164 type NodeAllocatableConfig struct {
165 KubeReservedCgroupName string
166 SystemReservedCgroupName string
167 ReservedSystemCPUs cpuset.CPUSet
168 EnforceNodeAllocatable sets.Set[string]
169 KubeReserved v1.ResourceList
170 SystemReserved v1.ResourceList
171 HardEvictionThresholds []evictionapi.Threshold
172 }
173
174 type Status struct {
175
176 SoftRequirements error
177 }
178
179
180 func parsePercentage(v string) (int64, error) {
181 if !strings.HasSuffix(v, "%") {
182 return 0, fmt.Errorf("percentage expected, got '%s'", v)
183 }
184 percentage, err := strconv.ParseInt(strings.TrimRight(v, "%"), 10, 0)
185 if err != nil {
186 return 0, fmt.Errorf("invalid number in percentage '%s'", v)
187 }
188 if percentage < 0 || percentage > 100 {
189 return 0, fmt.Errorf("percentage must be between 0 and 100")
190 }
191 return percentage, nil
192 }
193
194
195 func ParseQOSReserved(m map[string]string) (*map[v1.ResourceName]int64, error) {
196 reservations := make(map[v1.ResourceName]int64)
197 for k, v := range m {
198 switch v1.ResourceName(k) {
199
200 case v1.ResourceMemory:
201 q, err := parsePercentage(v)
202 if err != nil {
203 return nil, fmt.Errorf("failed to parse percentage %q for %q resource: %w", v, k, err)
204 }
205 reservations[v1.ResourceName(k)] = q
206 default:
207 return nil, fmt.Errorf("cannot reserve %q resource", k)
208 }
209 }
210 return &reservations, nil
211 }
212
213 func containerDevicesFromResourceDeviceInstances(devs devicemanager.ResourceDeviceInstances) []*podresourcesapi.ContainerDevices {
214 var respDevs []*podresourcesapi.ContainerDevices
215
216 for resourceName, resourceDevs := range devs {
217 for devID, dev := range resourceDevs {
218 topo := dev.GetTopology()
219 if topo == nil {
220
221
222
223 respDevs = append(respDevs, &podresourcesapi.ContainerDevices{
224 ResourceName: resourceName,
225 DeviceIds: []string{devID},
226 })
227 continue
228 }
229
230 for _, node := range topo.GetNodes() {
231 respDevs = append(respDevs, &podresourcesapi.ContainerDevices{
232 ResourceName: resourceName,
233 DeviceIds: []string{devID},
234 Topology: &podresourcesapi.TopologyInfo{
235 Nodes: []*podresourcesapi.NUMANode{
236 {
237 ID: node.GetID(),
238 },
239 },
240 },
241 })
242 }
243 }
244 }
245
246 return respDevs
247 }
248
View as plain text