1
16
17 package network
18
19 import (
20 "context"
21 "fmt"
22 "strings"
23 "time"
24
25 v1 "k8s.io/api/core/v1"
26 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27 "k8s.io/apimachinery/pkg/util/intstr"
28 "k8s.io/apimachinery/pkg/util/wait"
29 clientset "k8s.io/client-go/kubernetes"
30 "k8s.io/kubernetes/test/e2e/framework"
31 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
32 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
33 e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
34 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
35 "k8s.io/kubernetes/test/e2e/network/common"
36 imageutils "k8s.io/kubernetes/test/utils/image"
37 admissionapi "k8s.io/pod-security-admission/api"
38
39 "github.com/onsi/ginkgo/v2"
40 "github.com/onsi/gomega"
41 )
42
43 const (
44 serviceName = "svc-udp"
45 podClient = "pod-client"
46 podBackend1 = "pod-server-1"
47 podBackend2 = "pod-server-2"
48 srcPort = 12345
49 )
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70 var _ = common.SIGDescribe("Conntrack", func() {
71
72 fr := framework.NewDefaultFramework("conntrack")
73 fr.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
74
75 type nodeInfo struct {
76 name string
77 nodeIP string
78 }
79
80 var (
81 cs clientset.Interface
82 ns string
83 clientNodeInfo, serverNodeInfo nodeInfo
84 )
85
86 logContainsFn := func(text, podName string) wait.ConditionWithContextFunc {
87 return func(ctx context.Context) (bool, error) {
88 logs, err := e2epod.GetPodLogs(ctx, cs, ns, podName, podName)
89 if err != nil {
90
91 return false, nil
92 }
93 if !strings.Contains(string(logs), text) {
94 return false, nil
95 }
96 return true, nil
97 }
98 }
99
100 ginkgo.BeforeEach(func(ctx context.Context) {
101 cs = fr.ClientSet
102 ns = fr.Namespace.Name
103
104 nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
105 framework.ExpectNoError(err)
106 if len(nodes.Items) < 2 {
107 e2eskipper.Skipf(
108 "Test requires >= 2 Ready nodes, but there are only %v nodes",
109 len(nodes.Items))
110 }
111
112 family := v1.IPv4Protocol
113 if framework.TestContext.ClusterIsIPv6() {
114 family = v1.IPv6Protocol
115 }
116
117 ips := e2enode.GetAddressesByTypeAndFamily(&nodes.Items[0], v1.NodeInternalIP, family)
118 gomega.Expect(ips).ToNot(gomega.BeEmpty())
119
120 clientNodeInfo = nodeInfo{
121 name: nodes.Items[0].Name,
122 nodeIP: ips[0],
123 }
124
125 ips = e2enode.GetAddressesByTypeAndFamily(&nodes.Items[1], v1.NodeInternalIP, family)
126 gomega.Expect(ips).ToNot(gomega.BeEmpty())
127
128 serverNodeInfo = nodeInfo{
129 name: nodes.Items[1].Name,
130 nodeIP: ips[0],
131 }
132 })
133
134 ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a NodePort service", func(ctx context.Context) {
135
136
137 udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
138 ginkgo.By("creating a UDP service " + serviceName + " with type=NodePort in " + ns)
139 udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
140 svc.Spec.Type = v1.ServiceTypeNodePort
141 svc.Spec.Ports = []v1.ServicePort{
142 {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
143 }
144 })
145 framework.ExpectNoError(err)
146
147
148 ginkgo.By("creating a client pod for probing the service " + serviceName)
149 clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
150 nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
151 e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
152 cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, serverNodeInfo.nodeIP, udpService.Spec.Ports[0].NodePort)
153 clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
154 clientPod.Spec.Containers[0].Name = podClient
155 e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
156
157
158 logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
159 framework.ExpectNoError(err)
160 framework.Logf("Pod client logs: %s", logs)
161
162
163 ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
164 serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
165 serverPod1.Labels = udpJig.Labels
166 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
167 e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
168 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
169
170 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
171
172
173
174
175
176
177 ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
178 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
179 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
180 framework.ExpectNoError(err)
181 framework.Logf("Pod client logs: %s", logs)
182 framework.Failf("Failed to connect to backend 1")
183 }
184
185
186 ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
187 serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
188 serverPod2.Labels = udpJig.Labels
189 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
190 e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
191 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
192
193
194 framework.Logf("Cleaning up %s pod", podBackend1)
195 e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
196
197 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
198
199
200
201 ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
202 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
203 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
204 framework.ExpectNoError(err)
205 framework.Logf("Pod client logs: %s", logs)
206 framework.Failf("Failed to connect to backend 2")
207 }
208 })
209
210 ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service", func(ctx context.Context) {
211
212
213 udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
214 ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
215 udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
216 svc.Spec.Type = v1.ServiceTypeClusterIP
217 svc.Spec.Ports = []v1.ServicePort{
218 {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
219 }
220 })
221 framework.ExpectNoError(err)
222
223
224 ginkgo.By("creating a client pod for probing the service " + serviceName)
225 clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
226 nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
227 e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
228 cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
229 clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
230 clientPod.Spec.Containers[0].Name = podClient
231 e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
232
233
234 logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
235 framework.ExpectNoError(err)
236 framework.Logf("Pod client logs: %s", logs)
237
238
239 ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
240 serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
241 serverPod1.Labels = udpJig.Labels
242 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
243 e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
244 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
245
246 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
247
248
249
250
251
252
253 ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
254 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
255 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
256 framework.ExpectNoError(err)
257 framework.Logf("Pod client logs: %s", logs)
258 framework.Failf("Failed to connect to backend 1")
259 }
260
261
262 ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
263 serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
264 serverPod2.Labels = udpJig.Labels
265 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
266 e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
267 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
268
269
270 framework.Logf("Cleaning up %s pod", podBackend1)
271 e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
272
273 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
274
275
276
277 ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
278 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
279 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
280 framework.ExpectNoError(err)
281 framework.Logf("Pod client logs: %s", logs)
282 framework.Failf("Failed to connect to backend 2")
283 }
284 })
285
286 ginkgo.It("should be able to preserve UDP traffic when server pod cycles for a ClusterIP service and client is hostNetwork", func(ctx context.Context) {
287
288
289 udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
290 ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
291 udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
292 svc.Spec.Type = v1.ServiceTypeClusterIP
293 svc.Spec.Ports = []v1.ServicePort{
294 {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
295 }
296 })
297 framework.ExpectNoError(err)
298
299
300 ginkgo.By("creating a client pod for probing the service " + serviceName)
301 clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
302 nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
303 e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
304 cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
305 clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
306 clientPod.Spec.Containers[0].Name = podClient
307 clientPod.Spec.HostNetwork = true
308 e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
309
310
311 logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
312 framework.ExpectNoError(err)
313 framework.Logf("Pod client logs: %s", logs)
314
315
316 ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
317 serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
318 serverPod1.Labels = udpJig.Labels
319 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
320 e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
321 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
322
323 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
324
325
326
327
328
329
330 ginkgo.By("checking client pod connected to the backend 1 on Node IP " + serverNodeInfo.nodeIP)
331 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
332 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
333 framework.ExpectNoError(err)
334 framework.Logf("Pod client logs: %s", logs)
335 framework.Failf("Failed to connect to backend 1")
336 }
337
338
339 ginkgo.By("creating a second backend pod " + podBackend2 + " for the service " + serviceName)
340 serverPod2 := e2epod.NewAgnhostPod(ns, podBackend2, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
341 serverPod2.Labels = udpJig.Labels
342 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
343 e2epod.SetNodeSelection(&serverPod2.Spec, nodeSelection)
344 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod2)
345
346
347 framework.Logf("Cleaning up %s pod", podBackend1)
348 e2epod.NewPodClient(fr).DeleteSync(ctx, podBackend1, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
349
350 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend2: {80}})
351
352
353
354 ginkgo.By("checking client pod connected to the backend 2 on Node IP " + serverNodeInfo.nodeIP)
355 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend2, podClient)); err != nil {
356 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
357 framework.ExpectNoError(err)
358 framework.Logf("Pod client logs: %s", logs)
359 framework.Failf("Failed to connect to backend 2")
360 }
361 })
362
363
364
365
366
367
368
369
370
371
372
373
374 ginkgo.It("should be able to preserve UDP traffic when initial unready endpoints get ready", func(ctx context.Context) {
375
376
377 udpJig := e2eservice.NewTestJig(cs, ns, serviceName)
378 ginkgo.By("creating a UDP service " + serviceName + " with type=ClusterIP in " + ns)
379 udpService, err := udpJig.CreateUDPService(ctx, func(svc *v1.Service) {
380 svc.Spec.Type = v1.ServiceTypeClusterIP
381 svc.Spec.Ports = []v1.ServicePort{
382 {Port: 80, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(80)},
383 }
384 })
385 framework.ExpectNoError(err)
386
387
388 ginkgo.By("creating a client pod for probing the service " + serviceName)
389 clientPod := e2epod.NewAgnhostPod(ns, podClient, nil, nil, nil)
390 nodeSelection := e2epod.NodeSelection{Name: clientNodeInfo.name}
391 e2epod.SetNodeSelection(&clientPod.Spec, nodeSelection)
392 cmd := fmt.Sprintf(`date; for i in $(seq 1 3000); do echo "$(date) Try: ${i}"; echo hostname | nc -u -w 5 -p %d %s %d; echo; done`, srcPort, udpService.Spec.ClusterIP, udpService.Spec.Ports[0].Port)
393 clientPod.Spec.Containers[0].Command = []string{"/bin/sh", "-c", cmd}
394 clientPod.Spec.Containers[0].Name = podClient
395 e2epod.NewPodClient(fr).CreateSync(ctx, clientPod)
396
397
398 logs, err := e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
399 framework.ExpectNoError(err)
400 framework.Logf("Pod client logs: %s", logs)
401
402
403 ginkgo.By("creating a backend pod " + podBackend1 + " for the service " + serviceName)
404 serverPod1 := e2epod.NewAgnhostPod(ns, podBackend1, nil, nil, nil, "netexec", fmt.Sprintf("--udp-port=%d", 80))
405 serverPod1.Labels = udpJig.Labels
406 nodeSelection = e2epod.NodeSelection{Name: serverNodeInfo.name}
407
408 serverPod1.Spec.InitContainers = []v1.Container{
409 {
410 Name: "init",
411 Image: imageutils.GetE2EImage(imageutils.BusyBox),
412 Command: []string{"/bin/sh", "-c", "echo Pausing start. && sleep 15"},
413 },
414 }
415 e2epod.SetNodeSelection(&serverPod1.Spec, nodeSelection)
416 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod1)
417
418
419 validateEndpointsPortsOrFail(ctx, cs, ns, serviceName, portsByPodName{podBackend1: {80}})
420
421
422
423
424
425
426 ginkgo.By("checking client pod connected to the backend on Node IP " + serverNodeInfo.nodeIP)
427 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn(podBackend1, podClient)); err != nil {
428 logs, err = e2epod.GetPodLogs(ctx, cs, ns, podClient, podClient)
429 framework.ExpectNoError(err)
430 framework.Logf("Pod client logs: %s", logs)
431 framework.Failf("Failed to connect to backend pod")
432 }
433
434 })
435
436
437
438
439
440
441
442
443
444 ginkgo.It("proxy implementation should not be vulnerable to the invalid conntrack state bug [Privileged]", func(ctx context.Context) {
445 serverLabel := map[string]string{
446 "app": "boom-server",
447 }
448
449 serverPod := &v1.Pod{
450 ObjectMeta: metav1.ObjectMeta{
451 Name: "boom-server",
452 Labels: serverLabel,
453 },
454 Spec: v1.PodSpec{
455 Containers: []v1.Container{
456 {
457 Name: "boom-server",
458 Image: imageutils.GetE2EImage(imageutils.RegressionIssue74839),
459 Ports: []v1.ContainerPort{
460 {
461 ContainerPort: 9000,
462 },
463 },
464 Env: []v1.EnvVar{
465 {
466 Name: "POD_IP",
467 ValueFrom: &v1.EnvVarSource{
468 FieldRef: &v1.ObjectFieldSelector{
469 APIVersion: "v1",
470 FieldPath: "status.podIP",
471 },
472 },
473 },
474 {
475 Name: "POD_IPS",
476 ValueFrom: &v1.EnvVarSource{
477 FieldRef: &v1.ObjectFieldSelector{
478 APIVersion: "v1",
479 FieldPath: "status.podIPs",
480 },
481 },
482 },
483 },
484 SecurityContext: &v1.SecurityContext{
485 Capabilities: &v1.Capabilities{
486 Add: []v1.Capability{"NET_RAW"},
487 },
488 },
489 },
490 },
491 },
492 }
493 nodeSelection := e2epod.NodeSelection{Name: serverNodeInfo.name}
494 e2epod.SetNodeSelection(&serverPod.Spec, nodeSelection)
495 e2epod.NewPodClient(fr).CreateSync(ctx, serverPod)
496 ginkgo.By("Server pod created on node " + serverNodeInfo.name)
497
498 svc := &v1.Service{
499 ObjectMeta: metav1.ObjectMeta{
500 Name: "boom-server",
501 },
502 Spec: v1.ServiceSpec{
503 Selector: serverLabel,
504 Ports: []v1.ServicePort{
505 {
506 Protocol: v1.ProtocolTCP,
507 Port: 9000,
508 },
509 },
510 },
511 }
512 _, err := fr.ClientSet.CoreV1().Services(fr.Namespace.Name).Create(ctx, svc, metav1.CreateOptions{})
513 framework.ExpectNoError(err)
514
515 ginkgo.By("Server service created")
516
517 pod := &v1.Pod{
518 ObjectMeta: metav1.ObjectMeta{
519 Name: "startup-script",
520 },
521 Spec: v1.PodSpec{
522 Containers: []v1.Container{
523 {
524 Name: "startup-script",
525 Image: imageutils.GetE2EImage(imageutils.BusyBox),
526 Command: []string{
527 "sh", "-c", "while true; do sleep 2; nc boom-server 9000& done",
528 },
529 },
530 },
531 RestartPolicy: v1.RestartPolicyNever,
532 },
533 }
534 nodeSelection = e2epod.NodeSelection{Name: clientNodeInfo.name}
535 e2epod.SetNodeSelection(&pod.Spec, nodeSelection)
536
537 e2epod.NewPodClient(fr).CreateSync(ctx, pod)
538 ginkgo.By("Client pod created")
539
540
541
542
543
544 ginkgo.By("checking client pod does not RST the TCP connection because it receives an out-of-window packet")
545 if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, time.Minute, true, logContainsFn("ERROR", "boom-server")); err == nil {
546 logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server")
547 framework.ExpectNoError(err)
548 framework.Logf("boom-server pod logs: %s", logs)
549 framework.Failf("boom-server pod received a RST from the client, enabling nf_conntrack_tcp_be_liberal or dropping packets marked invalid by conntrack might help here.")
550 }
551
552 logs, err := e2epod.GetPodLogs(ctx, cs, ns, "boom-server", "boom-server")
553 framework.ExpectNoError(err)
554 if !strings.Contains(logs, "connection established") {
555 framework.Logf("boom-server pod logs: %s", logs)
556 framework.Failf("boom-server pod did not send any bad packet to the client")
557 }
558 framework.Logf("boom-server pod logs: %s", logs)
559 framework.Logf("boom-server OK: did not receive any RST packet")
560 })
561 })
562
View as plain text