1
16
17 package nodeports
18
19 import (
20 "fmt"
21 "reflect"
22 "strconv"
23 "strings"
24 "testing"
25
26 "github.com/google/go-cmp/cmp"
27 "github.com/stretchr/testify/require"
28 v1 "k8s.io/api/core/v1"
29 "k8s.io/klog/v2/ktesting"
30 _ "k8s.io/klog/v2/ktesting/init"
31 "k8s.io/kubernetes/pkg/scheduler/framework"
32 st "k8s.io/kubernetes/pkg/scheduler/testing"
33 )
34
35 func newPod(host string, hostPortInfos ...string) *v1.Pod {
36 networkPorts := []v1.ContainerPort{}
37 for _, portInfo := range hostPortInfos {
38 splited := strings.Split(portInfo, "/")
39 hostPort, _ := strconv.Atoi(splited[2])
40
41 networkPorts = append(networkPorts, v1.ContainerPort{
42 HostIP: splited[1],
43 HostPort: int32(hostPort),
44 Protocol: v1.Protocol(splited[0]),
45 })
46 }
47 return st.MakePod().Node(host).ContainerPort(networkPorts).Obj()
48 }
49
50 func TestNodePorts(t *testing.T) {
51 tests := []struct {
52 pod *v1.Pod
53 nodeInfo *framework.NodeInfo
54 name string
55 wantPreFilterStatus *framework.Status
56 wantFilterStatus *framework.Status
57 }{
58 {
59 pod: &v1.Pod{},
60 nodeInfo: framework.NewNodeInfo(),
61 name: "skip filter",
62 wantPreFilterStatus: framework.NewStatus(framework.Skip),
63 },
64 {
65 pod: newPod("m1", "UDP/127.0.0.1/8080"),
66 nodeInfo: framework.NewNodeInfo(
67 newPod("m1", "UDP/127.0.0.1/9090")),
68 name: "other port",
69 },
70 {
71 pod: newPod("m1", "UDP/127.0.0.1/8080"),
72 nodeInfo: framework.NewNodeInfo(
73 newPod("m1", "UDP/127.0.0.1/8080")),
74 name: "same udp port",
75 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
76 },
77 {
78 pod: newPod("m1", "TCP/127.0.0.1/8080"),
79 nodeInfo: framework.NewNodeInfo(
80 newPod("m1", "TCP/127.0.0.1/8080")),
81 name: "same tcp port",
82 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
83 },
84 {
85 pod: newPod("m1", "TCP/127.0.0.1/8080"),
86 nodeInfo: framework.NewNodeInfo(
87 newPod("m1", "TCP/127.0.0.2/8080")),
88 name: "different host ip",
89 },
90 {
91 pod: newPod("m1", "UDP/127.0.0.1/8080"),
92 nodeInfo: framework.NewNodeInfo(
93 newPod("m1", "TCP/127.0.0.1/8080")),
94 name: "different protocol",
95 },
96 {
97 pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
98 nodeInfo: framework.NewNodeInfo(
99 newPod("m1", "UDP/127.0.0.1/8080")),
100 name: "second udp port conflict",
101 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
102 },
103 {
104 pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
105 nodeInfo: framework.NewNodeInfo(
106 newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
107 name: "first tcp port conflict",
108 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
109 },
110 {
111 pod: newPod("m1", "TCP/0.0.0.0/8001"),
112 nodeInfo: framework.NewNodeInfo(
113 newPod("m1", "TCP/127.0.0.1/8001")),
114 name: "first tcp port conflict due to 0.0.0.0 hostIP",
115 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
116 },
117 {
118 pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
119 nodeInfo: framework.NewNodeInfo(
120 newPod("m1", "TCP/127.0.0.1/8001")),
121 name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
122 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
123 },
124 {
125 pod: newPod("m1", "TCP/127.0.0.1/8001"),
126 nodeInfo: framework.NewNodeInfo(
127 newPod("m1", "TCP/0.0.0.0/8001")),
128 name: "second tcp port conflict to 0.0.0.0 hostIP",
129 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
130 },
131 {
132 pod: newPod("m1", "UDP/127.0.0.1/8001"),
133 nodeInfo: framework.NewNodeInfo(
134 newPod("m1", "TCP/0.0.0.0/8001")),
135 name: "second different protocol",
136 },
137 {
138 pod: newPod("m1", "UDP/127.0.0.1/8001"),
139 nodeInfo: framework.NewNodeInfo(
140 newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
141 name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
142 wantFilterStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
143 },
144 }
145
146 for _, test := range tests {
147 t.Run(test.name, func(t *testing.T) {
148 _, ctx := ktesting.NewTestContext(t)
149 p, err := New(ctx, nil, nil)
150 if err != nil {
151 t.Fatalf("creating plugin: %v", err)
152 }
153 cycleState := framework.NewCycleState()
154 _, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
155 if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
156 t.Errorf("preFilter status does not match (-want,+got): %s", diff)
157 }
158 if preFilterStatus.IsSkip() {
159 return
160 }
161 if !preFilterStatus.IsSuccess() {
162 t.Errorf("prefilter failed with status: %v", preFilterStatus)
163 }
164 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, test.nodeInfo)
165 if diff := cmp.Diff(test.wantFilterStatus, gotStatus); diff != "" {
166 t.Errorf("filter status does not match (-want, +got): %s", diff)
167 }
168 })
169 }
170 }
171
172 func TestPreFilterDisabled(t *testing.T) {
173 _, ctx := ktesting.NewTestContext(t)
174 pod := &v1.Pod{}
175 nodeInfo := framework.NewNodeInfo()
176 node := v1.Node{}
177 nodeInfo.SetNode(&node)
178 p, err := New(ctx, nil, nil)
179 if err != nil {
180 t.Fatalf("creating plugin: %v", err)
181 }
182 cycleState := framework.NewCycleState()
183 gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
184 wantStatus := framework.AsStatus(fmt.Errorf(`reading "PreFilterNodePorts" from cycleState: %w`, framework.ErrNotFound))
185 if !reflect.DeepEqual(gotStatus, wantStatus) {
186 t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
187 }
188 }
189
190 func TestGetContainerPorts(t *testing.T) {
191 tests := []struct {
192 pod1 *v1.Pod
193 pod2 *v1.Pod
194 expected []*v1.ContainerPort
195 }{
196 {
197 pod1: st.MakePod().ContainerPort([]v1.ContainerPort{
198 {
199 ContainerPort: 8001,
200 HostPort: 8001,
201 Protocol: v1.ProtocolTCP,
202 },
203 {
204 ContainerPort: 8002,
205 HostPort: 8002,
206 Protocol: v1.ProtocolTCP,
207 }}).ContainerPort([]v1.ContainerPort{
208 {
209 ContainerPort: 8003,
210 HostPort: 8003,
211 Protocol: v1.ProtocolTCP,
212 },
213 {
214 ContainerPort: 8004,
215 HostPort: 8004,
216 Protocol: v1.ProtocolTCP,
217 }}).ContainerPort([]v1.ContainerPort{
218 {
219 ContainerPort: 8005,
220 Protocol: v1.ProtocolTCP,
221 },
222 }).Obj(),
223 pod2: st.MakePod().ContainerPort([]v1.ContainerPort{
224 {
225 ContainerPort: 8011,
226 HostPort: 8011,
227 Protocol: v1.ProtocolTCP,
228 },
229 {
230 ContainerPort: 8012,
231 HostPort: 8012,
232 Protocol: v1.ProtocolTCP,
233 }}).ContainerPort([]v1.ContainerPort{
234 {
235 ContainerPort: 8013,
236 HostPort: 8013,
237 Protocol: v1.ProtocolTCP,
238 },
239 {
240 ContainerPort: 8014,
241 HostPort: 8014,
242 Protocol: v1.ProtocolTCP,
243 }}).ContainerPort([]v1.ContainerPort{
244 {
245 ContainerPort: 8015,
246 Protocol: v1.ProtocolTCP,
247 },
248 }).Obj(),
249 expected: []*v1.ContainerPort{
250 {
251 ContainerPort: 8001,
252 HostPort: 8001,
253 Protocol: v1.ProtocolTCP,
254 },
255 {
256 ContainerPort: 8002,
257 HostPort: 8002,
258 Protocol: v1.ProtocolTCP,
259 },
260 {
261 ContainerPort: 8003,
262 HostPort: 8003,
263 Protocol: v1.ProtocolTCP,
264 },
265 {
266 ContainerPort: 8004,
267 HostPort: 8004,
268 Protocol: v1.ProtocolTCP,
269 },
270 {
271 ContainerPort: 8011,
272 HostPort: 8011,
273 Protocol: v1.ProtocolTCP,
274 },
275 {
276 ContainerPort: 8012,
277 HostPort: 8012,
278 Protocol: v1.ProtocolTCP,
279 },
280 {
281 ContainerPort: 8013,
282 HostPort: 8013,
283 Protocol: v1.ProtocolTCP,
284 },
285 {
286 ContainerPort: 8014,
287 HostPort: 8014,
288 Protocol: v1.ProtocolTCP,
289 },
290 },
291 },
292 }
293
294 for i, test := range tests {
295 t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
296 result := getContainerPorts(test.pod1, test.pod2)
297 if diff := cmp.Diff(test.expected, result); diff != "" {
298 t.Errorf("container ports: container ports does not match (-want,+got): %s", diff)
299 }
300 })
301 }
302 }
303
304 func Test_isSchedulableAfterPodDeleted(t *testing.T) {
305 podWithHostPort := st.MakePod().HostPort(8080)
306
307 testcases := map[string]struct {
308 pod *v1.Pod
309 oldObj interface{}
310 expectedHint framework.QueueingHint
311 expectedErr bool
312 }{
313 "backoff-wrong-old-object": {
314 pod: podWithHostPort.Obj(),
315 oldObj: "not-a-pod",
316 expectedHint: framework.Queue,
317 expectedErr: true,
318 },
319 "skip-queue-on-unscheduled": {
320 pod: podWithHostPort.Obj(),
321 oldObj: st.MakePod().Obj(),
322 expectedHint: framework.QueueSkip,
323 },
324 "skip-queue-on-non-hostport": {
325 pod: podWithHostPort.Obj(),
326 oldObj: st.MakePod().Node("fake-node").Obj(),
327 expectedHint: framework.QueueSkip,
328 },
329 "skip-queue-on-unrelated-hostport": {
330 pod: podWithHostPort.Obj(),
331 oldObj: st.MakePod().Node("fake-node").HostPort(8081).Obj(),
332 expectedHint: framework.QueueSkip,
333 },
334 "queue-on-released-hostport": {
335 pod: podWithHostPort.Obj(),
336 oldObj: st.MakePod().Node("fake-node").HostPort(8080).Obj(),
337 expectedHint: framework.Queue,
338 },
339 }
340
341 for name, tc := range testcases {
342 t.Run(name, func(t *testing.T) {
343 logger, ctx := ktesting.NewTestContext(t)
344 p, err := New(ctx, nil, nil)
345 if err != nil {
346 t.Fatalf("Creating plugin: %v", err)
347 }
348 actualHint, err := p.(*NodePorts).isSchedulableAfterPodDeleted(logger, tc.pod, tc.oldObj, nil)
349 if tc.expectedErr {
350 require.Error(t, err)
351 return
352 }
353 require.NoError(t, err)
354 require.Equal(t, tc.expectedHint, actualHint)
355 })
356 }
357 }
358
View as plain text