1
16
17
18
19 package kubectl
20
21 import (
22 "bytes"
23 "context"
24 "encoding/json"
25 "fmt"
26 "io"
27 "net"
28 "net/http"
29 "net/http/httptest"
30 "os"
31 "os/exec"
32 "path"
33 "path/filepath"
34 "regexp"
35 "sort"
36 "strconv"
37 "strings"
38 "time"
39
40 openapi_v2 "github.com/google/gnostic-models/openapiv2"
41 "github.com/google/go-cmp/cmp"
42
43 "sigs.k8s.io/yaml"
44
45 v1 "k8s.io/api/core/v1"
46 rbacv1 "k8s.io/api/rbac/v1"
47 apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
48 apierrors "k8s.io/apimachinery/pkg/api/errors"
49 "k8s.io/apimachinery/pkg/api/meta"
50 "k8s.io/apimachinery/pkg/api/resource"
51 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
52 "k8s.io/apimachinery/pkg/labels"
53 "k8s.io/apimachinery/pkg/runtime"
54 "k8s.io/apimachinery/pkg/runtime/schema"
55 utilnet "k8s.io/apimachinery/pkg/util/net"
56 utilnettesting "k8s.io/apimachinery/pkg/util/net/testing"
57 "k8s.io/apimachinery/pkg/util/uuid"
58 "k8s.io/apimachinery/pkg/util/wait"
59 "k8s.io/apiserver/pkg/authentication/serviceaccount"
60 genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
61 clientset "k8s.io/client-go/kubernetes"
62 "k8s.io/client-go/tools/clientcmd"
63 "k8s.io/kubectl/pkg/polymorphichelpers"
64 "k8s.io/kubernetes/pkg/controller"
65 commonutils "k8s.io/kubernetes/test/e2e/common"
66 "k8s.io/kubernetes/test/e2e/framework"
67 e2eauth "k8s.io/kubernetes/test/e2e/framework/auth"
68 e2edebug "k8s.io/kubernetes/test/e2e/framework/debug"
69 e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
70 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
71 e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
72 e2enode "k8s.io/kubernetes/test/e2e/framework/node"
73 e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
74 e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
75 e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
76 e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
77 e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
78 "k8s.io/kubernetes/test/e2e/scheduling"
79 testutils "k8s.io/kubernetes/test/utils"
80 "k8s.io/kubernetes/test/utils/crd"
81 imageutils "k8s.io/kubernetes/test/utils/image"
82 admissionapi "k8s.io/pod-security-admission/api"
83 uexec "k8s.io/utils/exec"
84 "k8s.io/utils/pointer"
85
86 "github.com/onsi/ginkgo/v2"
87 "github.com/onsi/gomega"
88 )
89
90 const (
91 updateDemoSelector = "name=update-demo"
92 guestbookStartupTimeout = 10 * time.Minute
93 guestbookResponseTimeout = 3 * time.Minute
94 simplePodSelector = "name=httpd"
95 simplePodName = "httpd"
96 simplePodResourceName = "pod/httpd"
97 httpdDefaultOutput = "It works!"
98 simplePodPort = 80
99 pausePodSelector = "name=pause"
100 pausePodName = "pause"
101 busyboxPodSelector = "app=busybox1"
102 busyboxPodName = "busybox1"
103 kubeCtlManifestPath = "test/e2e/testing-manifests/kubectl"
104 agnhostControllerFilename = "agnhost-primary-controller.json.in"
105 agnhostServiceFilename = "agnhost-primary-service.json"
106 httpdDeployment1Filename = "httpd-deployment1.yaml.in"
107 httpdDeployment2Filename = "httpd-deployment2.yaml.in"
108 httpdDeployment3Filename = "httpd-deployment3.yaml.in"
109 metaPattern = `"kind":"%s","apiVersion":"%s/%s","metadata":{"name":"%s"}`
110 )
111
112 func unknownFieldMetadataJSON(gvk schema.GroupVersionKind, name string) string {
113 return fmt.Sprintf(`"kind":"%s","apiVersion":"%s/%s","metadata":{"unknownMeta": "foo", "name":"%s"}`, gvk.Kind, gvk.Group, gvk.Version, name)
114 }
115
116 var (
117 nautilusImage = imageutils.GetE2EImage(imageutils.Nautilus)
118 httpdImage = imageutils.GetE2EImage(imageutils.Httpd)
119 busyboxImage = imageutils.GetE2EImage(imageutils.BusyBox)
120 agnhostImage = imageutils.GetE2EImage(imageutils.Agnhost)
121
122
123 podRunningTimeoutArg = fmt.Sprintf("--pod-running-timeout=%s", framework.PodStartShortTimeout.String())
124 )
125
126 var proxyRegexp = regexp.MustCompile("Starting to serve on 127.0.0.1:([0-9]+)")
127
128 var schemaFoo = []byte(`description: Foo CRD for Testing
129 type: object
130 properties:
131 spec:
132 type: object
133 description: Specification of Foo
134 properties:
135 bars:
136 description: List of Bars and their specs.
137 type: array
138 items:
139 type: object
140 required:
141 - name
142 properties:
143 name:
144 description: Name of Bar.
145 type: string
146 age:
147 description: Age of Bar.
148 type: string
149 bazs:
150 description: List of Bazs.
151 items:
152 type: string
153 type: array
154 status:
155 description: Status of Foo
156 type: object
157 properties:
158 bars:
159 description: List of Bars and their statuses.
160 type: array
161 items:
162 type: object
163 properties:
164 name:
165 description: Name of Bar.
166 type: string
167 available:
168 description: Whether the Bar is installed.
169 type: boolean
170 quxType:
171 description: Indicates to external qux type.
172 pattern: in-tree|out-of-tree
173 type: string`)
174
175 var schemaFooEmbedded = []byte(`description: Foo CRD with an embedded resource
176 type: object
177 properties:
178 spec:
179 type: object
180 properties:
181 template:
182 type: object
183 x-kubernetes-embedded-resource: true
184 properties:
185 metadata:
186 type: object
187 properties:
188 name:
189 type: string
190 spec:
191 type: object
192 metadata:
193 type: object
194 properties:
195 name:
196 type: string`)
197
198
199
200 func cleanupKubectlInputs(fileContents string, ns string, selectors ...string) {
201 ginkgo.By("using delete to clean up resources")
202
203
204 e2ekubectl.RunKubectlOrDieInput(ns, fileContents, "delete", "--grace-period=0", "--force", "-f", "-")
205 assertCleanup(ns, selectors...)
206 }
207
208
209 func assertCleanup(ns string, selectors ...string) {
210 var e error
211 verifyCleanupFunc := func() (bool, error) {
212 e = nil
213 for _, selector := range selectors {
214 resources := e2ekubectl.RunKubectlOrDie(ns, "get", "rc,svc", "-l", selector, "--no-headers")
215 if resources != "" {
216 e = fmt.Errorf("Resources left running after stop:\n%s", resources)
217 return false, nil
218 }
219 pods := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-l", selector, "-o", "go-template={{ range .items }}{{ if not .metadata.deletionTimestamp }}{{ .metadata.name }}{{ \"\\n\" }}{{ end }}{{ end }}")
220 if pods != "" {
221 e = fmt.Errorf("Pods left unterminated after stop:\n%s", pods)
222 return false, nil
223 }
224 }
225 return true, nil
226 }
227 err := wait.PollImmediate(500*time.Millisecond, 1*time.Minute, verifyCleanupFunc)
228 if err != nil {
229 framework.Failf(e.Error())
230 }
231 }
232
233 func readTestFileOrDie(file string) []byte {
234 data, err := e2etestfiles.Read(path.Join(kubeCtlManifestPath, file))
235 if err != nil {
236 framework.Fail(err.Error(), 1)
237 }
238 return data
239 }
240
241 func runKubectlRetryOrDie(ns string, args ...string) string {
242 var err error
243 var output string
244 for i := 0; i < 5; i++ {
245 output, err = e2ekubectl.RunKubectl(ns, args...)
246 if err == nil || (!strings.Contains(err.Error(), genericregistry.OptimisticLockErrorMsg) && !strings.Contains(err.Error(), "Operation cannot be fulfilled")) {
247 break
248 }
249 time.Sleep(time.Second)
250 }
251
252
253 framework.Logf("stdout: %q", output)
254 framework.ExpectNoError(err)
255 return output
256 }
257
258 var _ = SIGDescribe("Kubectl client", func() {
259 defer ginkgo.GinkgoRecover()
260 f := framework.NewDefaultFramework("kubectl")
261 f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
262
263
264 clusterState := func() *framework.ClusterVerification {
265 return f.NewClusterVerification(
266 f.Namespace,
267 framework.PodStateVerification{
268 Selectors: map[string]string{"app": "agnhost"},
269 ValidPhases: []v1.PodPhase{v1.PodRunning },
270 })
271 }
272 forEachPod := func(ctx context.Context, podFunc func(p v1.Pod)) {
273 _ = clusterState().ForEach(ctx, podFunc)
274 }
275 var c clientset.Interface
276 var ns string
277 ginkgo.BeforeEach(func() {
278 c = f.ClientSet
279 ns = f.Namespace.Name
280 })
281
282
283
284
285
286 waitForOrFailWithDebug := func(ctx context.Context, atLeast int) {
287 pods, err := clusterState().WaitFor(ctx, atLeast, framework.PodStartTimeout)
288 if err != nil || len(pods) < atLeast {
289
290 e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, ns)
291 framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
292 }
293 }
294
295 debugDiscovery := func() {
296 home := os.Getenv("HOME")
297 if len(home) == 0 {
298 framework.Logf("no $HOME envvar set")
299 return
300 }
301
302 cacheDir := filepath.Join(home, ".kube", "cache", "discovery")
303 err := filepath.Walk(cacheDir, func(path string, info os.FileInfo, err error) error {
304 if err != nil {
305 return err
306 }
307
308
309 subpath := strings.TrimPrefix(path, cacheDir+string(filepath.Separator))
310 parts := filepath.SplitList(subpath)
311 if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
312 return nil
313 }
314 framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
315
316 data, readError := os.ReadFile(path)
317 if readError != nil {
318 framework.Logf("%s error: %v", path, readError)
319 } else {
320 framework.Logf("%s content: %s", path, string(data))
321 }
322 return nil
323 })
324 framework.Logf("scanned %s for discovery docs: %v", home, err)
325 }
326
327 ginkgo.Describe("Update Demo", func() {
328 var nautilus string
329 ginkgo.BeforeEach(func() {
330 updateDemoRoot := "test/fixtures/doc-yaml/user-guide/update-demo"
331 data, err := e2etestfiles.Read(filepath.Join(updateDemoRoot, "nautilus-rc.yaml.in"))
332 if err != nil {
333 framework.Fail(err.Error())
334 }
335 nautilus = commonutils.SubstituteImageName(string(data))
336 })
337
342 framework.ConformanceIt("should create and stop a replication controller", func(ctx context.Context) {
343 defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
344
345 ginkgo.By("creating a replication controller")
346 e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
347 validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
348 })
349
350
355 framework.ConformanceIt("should scale a replication controller", func(ctx context.Context) {
356 defer cleanupKubectlInputs(nautilus, ns, updateDemoSelector)
357
358 ginkgo.By("creating a replication controller")
359 e2ekubectl.RunKubectlOrDieInput(ns, nautilus, "create", "-f", "-")
360 validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
361 ginkgo.By("scaling down the replication controller")
362 debugDiscovery()
363 e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=1", "--timeout=5m")
364 validateController(ctx, c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
365 ginkgo.By("scaling up the replication controller")
366 debugDiscovery()
367 e2ekubectl.RunKubectlOrDie(ns, "scale", "rc", "update-demo-nautilus", "--replicas=2", "--timeout=5m")
368 validateController(ctx, c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
369 })
370 })
371
372 ginkgo.Describe("Guestbook application", func() {
373 forEachGBFile := func(run func(s string)) {
374 guestbookRoot := "test/e2e/testing-manifests/guestbook"
375 for _, gbAppFile := range []string{
376 "agnhost-replica-service.yaml",
377 "agnhost-primary-service.yaml",
378 "frontend-service.yaml",
379 "frontend-deployment.yaml.in",
380 "agnhost-primary-deployment.yaml.in",
381 "agnhost-replica-deployment.yaml.in",
382 } {
383 data, err := e2etestfiles.Read(filepath.Join(guestbookRoot, gbAppFile))
384 if err != nil {
385 framework.Fail(err.Error())
386 }
387 contents := commonutils.SubstituteImageName(string(data))
388 run(contents)
389 }
390 }
391
392
397 framework.ConformanceIt("should create and stop a working application", func(ctx context.Context) {
398 defer forEachGBFile(func(contents string) {
399 cleanupKubectlInputs(contents, ns)
400 })
401 ginkgo.By("creating all guestbook components")
402 forEachGBFile(func(contents string) {
403 framework.Logf(contents)
404 e2ekubectl.RunKubectlOrDieInput(ns, contents, "create", "-f", "-")
405 })
406
407 ginkgo.By("validating guestbook app")
408 validateGuestbookApp(ctx, c, ns)
409 })
410 })
411
412 ginkgo.Describe("Simple pod", func() {
413 var podYaml string
414 ginkgo.BeforeEach(func(ctx context.Context) {
415 ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
416 podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
417 e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
418 framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, simplePodName, ns, framework.PodStartTimeout))
419 })
420 ginkgo.AfterEach(func() {
421 cleanupKubectlInputs(podYaml, ns, simplePodSelector)
422 })
423
424 ginkgo.It("should support exec", func(ctx context.Context) {
425 ginkgo.By("executing a command in the container")
426 execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container")
427 if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
428 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
429 }
430
431 ginkgo.By("executing a very long command in the container")
432 veryLongData := make([]rune, 20000)
433 for i := 0; i < len(veryLongData); i++ {
434 veryLongData[i] = 'a'
435 }
436 execOutput = e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", string(veryLongData))
437 gomega.Expect(string(veryLongData)).To(gomega.Equal(strings.TrimSpace(execOutput)), "Unexpected kubectl exec output")
438
439 ginkgo.By("executing a command in the container with noninteractive stdin")
440 execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "cat").
441 WithStdinData("abcd1234").
442 ExecOrDie(ns)
443 if e, a := "abcd1234", execOutput; e != a {
444 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
445 }
446
447
448 r, closer, err := newBlockingReader("echo hi\nexit\n")
449 if err != nil {
450 framework.Failf("Error creating blocking reader: %v", err)
451 }
452
453 defer closer.Close()
454
455 ginkgo.By("executing a command in the container with pseudo-interactive stdin")
456 execOutput = e2ekubectl.NewKubectlCommand(ns, "exec", "-i", podRunningTimeoutArg, simplePodName, "--", "sh").
457 WithStdinReader(r).
458 ExecOrDie(ns)
459 if e, a := "hi", strings.TrimSpace(execOutput); e != a {
460 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
461 }
462 })
463
464 ginkgo.It("should support exec using resource/name", func(ctx context.Context) {
465 ginkgo.By("executing a command in the container")
466 execOutput := e2ekubectl.RunKubectlOrDie(ns, "exec", podRunningTimeoutArg, simplePodResourceName, "--", "echo", "running", "in", "container")
467 if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
468 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
469 }
470 })
471
472 ginkgo.It("should support exec through an HTTP proxy", func(ctx context.Context) {
473 testContextHost := getTestContextHost()
474
475 ginkgo.By("Starting http_proxy")
476 var proxyLogs bytes.Buffer
477 testSrv := httptest.NewServer(utilnettesting.NewHTTPProxyHandler(ginkgo.GinkgoT(), func(req *http.Request) bool {
478 fmt.Fprintf(&proxyLogs, "Accepting %s to %s\n", req.Method, req.Host)
479 return true
480 }))
481 defer testSrv.Close()
482 proxyAddr := testSrv.URL
483
484 for _, proxyVar := range []string{"https_proxy", "HTTPS_PROXY"} {
485 proxyLogs.Reset()
486 ginkgo.By("Running kubectl via an HTTP proxy using " + proxyVar)
487 output := e2ekubectl.NewKubectlCommand(ns, "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container").
488 AppendEnv(append(os.Environ(), fmt.Sprintf("%s=%s", proxyVar, proxyAddr))).
489 ExecOrDie(ns)
490
491
492 expectedExecOutput := "running in container\n"
493 if output != expectedExecOutput {
494 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
495 }
496
497
498 expectedProxyLog := fmt.Sprintf("Accepting CONNECT to %s", strings.TrimSuffix(strings.TrimPrefix(testContextHost, "https://"), "/api"))
499
500 proxyLog := proxyLogs.String()
501 if !strings.Contains(proxyLog, expectedProxyLog) {
502 framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
503 }
504 }
505 })
506
507 ginkgo.It("should support exec through kubectl proxy", func(ctx context.Context) {
508 _ = getTestContextHost()
509
510 ginkgo.By("Starting kubectl proxy")
511 port, proxyCmd, err := startProxyServer(ns)
512 framework.ExpectNoError(err)
513 defer framework.TryKill(proxyCmd)
514
515
516 host := fmt.Sprintf("--server=http://127.0.0.1:%d", port)
517 ginkgo.By("Running kubectl via kubectl proxy using " + host)
518 output := e2ekubectl.NewKubectlCommand(
519 ns, host,
520 "exec", podRunningTimeoutArg, simplePodName, "--", "echo", "running", "in", "container",
521 ).ExecOrDie(ns)
522
523
524 expectedExecOutput := "running in container\n"
525 if output != expectedExecOutput {
526 framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
527 }
528 })
529
530 ginkgo.Context("should return command exit codes", func() {
531 ginkgo.It("execing into a container with a successful command", func(ctx context.Context) {
532 _, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 0").Exec()
533 framework.ExpectNoError(err)
534 })
535
536 ginkgo.It("execing into a container with a failing command", func(ctx context.Context) {
537 _, err := e2ekubectl.NewKubectlCommand(ns, "exec", simplePodName, podRunningTimeoutArg, "--", "/bin/sh", "-c", "exit 42").Exec()
538 ee, ok := err.(uexec.ExitError)
539 if !ok {
540 framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
541 }
542 gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42))
543 })
544
545 ginkgo.It("should support port-forward", func(ctx context.Context) {
546 ginkgo.By("forwarding the container port to a local port")
547 cmd := runPortForward(ns, simplePodName, simplePodPort)
548 defer cmd.Stop()
549
550 ginkgo.By("curling local port output")
551 localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
552 body, err := curl(localAddr)
553 framework.Logf("got: %s", body)
554 if err != nil {
555 framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
556 }
557 if !strings.Contains(body, httpdDefaultOutput) {
558 framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
559 }
560 })
561
562 ginkgo.It("should handle in-cluster config", func(ctx context.Context) {
563
564
565
566
567 e2eskipper.SkipIfProviderIs("gke")
568
569
570 e2eskipper.SkipUnlessNodeOSArchIs("amd64")
571
572 ginkgo.By("adding rbac permissions")
573
574 err := e2eauth.BindClusterRole(ctx, f.ClientSet.RbacV1(), "view", f.Namespace.Name,
575 rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
576 framework.ExpectNoError(err)
577
578 err = e2eauth.WaitForAuthorizationUpdate(ctx, f.ClientSet.AuthorizationV1(),
579 serviceaccount.MakeUsername(f.Namespace.Name, "default"),
580 f.Namespace.Name, "list", schema.GroupResource{Resource: "pods"}, true)
581 framework.ExpectNoError(err)
582
583 ginkgo.By("overriding icc with values provided by flags")
584 kubectlPath := framework.TestContext.KubectlPath
585
586 kubectlPathNormalizer := exec.Command("which", kubectlPath)
587 if strings.HasSuffix(kubectlPath, "kubectl.sh") {
588 kubectlPathNormalizer = exec.Command(kubectlPath, "path")
589 }
590 kubectlPathNormalized, err := kubectlPathNormalizer.Output()
591 framework.ExpectNoError(err)
592 kubectlPath = strings.TrimSpace(string(kubectlPathNormalized))
593
594 inClusterHost := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
595 inClusterPort := strings.TrimSpace(e2eoutput.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
596 inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
597 framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
598 e2ekubectl.RunKubectlOrDie(ns, "cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
599
600
601
602 tmpDir, err := os.MkdirTemp("", "icc-override")
603 overrideKubeconfigName := "icc-override.kubeconfig"
604 framework.ExpectNoError(err)
605 defer func() { os.Remove(tmpDir) }()
606 framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, overrideKubeconfigName), []byte(`
607 kind: Config
608 apiVersion: v1
609 clusters:
610 - cluster:
611 api-version: v1
612 server: https://kubernetes.default.svc:443
613 certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
614 name: kubeconfig-cluster
615 contexts:
616 - context:
617 cluster: kubeconfig-cluster
618 namespace: default
619 user: kubeconfig-user
620 name: kubeconfig-context
621 current-context: kubeconfig-context
622 users:
623 - name: kubeconfig-user
624 user:
625 tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
626 `), os.FileMode(0755)))
627 framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
628 e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
629
630 framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
631 kind: ConfigMap
632 apiVersion: v1
633 metadata:
634 name: "configmap with namespace and invalid name"
635 namespace: configmap-namespace
636 `), os.FileMode(0755)))
637 framework.ExpectNoError(os.WriteFile(filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), []byte(`
638 kind: ConfigMap
639 apiVersion: v1
640 metadata:
641 name: "configmap without namespace and invalid name"
642 `), os.FileMode(0755)))
643 framework.Logf("copying configmap manifests to the %s pod", simplePodName)
644 e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
645 e2ekubectl.RunKubectlOrDie(ns, "cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
646
647 ginkgo.By("getting pods with in-cluster configs")
648 execOutput := e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --v=6 2>&1")
649 gomega.Expect(execOutput).To(gomega.MatchRegexp("httpd +1/1 +Running"))
650 gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster namespace"))
651 gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
652
653 ginkgo.By("creating an object containing a namespace with in-cluster config")
654 _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-with-namespace.yaml --v=6 2>&1")
655 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
656 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
657
658 gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/configmap-namespace/configmaps", inClusterURL)))
659
660 ginkgo.By("creating an object not containing a namespace with in-cluster config")
661 _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl create -f /tmp/invalid-configmap-without-namespace.yaml --v=6 2>&1")
662 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
663 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
664 gomega.Expect(err).To(gomega.ContainSubstring(fmt.Sprintf("POST https://%s/api/v1/namespaces/%s/configmaps", inClusterURL, f.Namespace.Name)))
665
666 ginkgo.By("trying to use kubectl with invalid token")
667 _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
668 framework.Logf("got err %v", err)
669 gomega.Expect(err).To(gomega.HaveOccurred())
670 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
671 gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
672 gomega.Expect(err).To(gomega.ContainSubstring("Response Status: 401 Unauthorized"))
673
674 ginkgo.By("trying to use kubectl with invalid server")
675 _, err = e2eoutput.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
676 framework.Logf("got err %v", err)
677 gomega.Expect(err).To(gomega.HaveOccurred())
678 gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
679 gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
680
681 ginkgo.By("trying to use kubectl with invalid namespace")
682 execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --namespace=invalid --v=6 2>&1")
683 gomega.Expect(execOutput).To(gomega.ContainSubstring("No resources found"))
684 gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
685 gomega.Expect(execOutput).To(gomega.ContainSubstring("Using in-cluster configuration"))
686 gomega.Expect(execOutput).To(gomega.MatchRegexp(fmt.Sprintf("GET http[s]?://[\\[]?%s[\\]]?:%s/api/v1/namespaces/invalid/pods", inClusterHost, inClusterPort)))
687
688 ginkgo.By("trying to use kubectl with kubeconfig")
689 execOutput = e2eoutput.RunHostCmdOrDie(ns, simplePodName, "/tmp/kubectl get pods --kubeconfig=/tmp/"+overrideKubeconfigName+" --v=6 2>&1")
690 gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster namespace"))
691 gomega.Expect(execOutput).ToNot(gomega.ContainSubstring("Using in-cluster configuration"))
692 gomega.Expect(execOutput).To(gomega.ContainSubstring("GET https://kubernetes.default.svc:443/api/v1/namespaces/default/pods"))
693 })
694 })
695
696 ginkgo.Describe("Kubectl run", func() {
697 ginkgo.It("running a successful command", func(ctx context.Context) {
698 _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "success", "--", "/bin/sh", "-c", "exit 0").Exec()
699 framework.ExpectNoError(err)
700 })
701
702 ginkgo.It("running a failing command", func(ctx context.Context) {
703 _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
704 ee, ok := err.(uexec.ExitError)
705 if !ok {
706 framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
707 }
708 gomega.Expect(ee.ExitStatus()).To(gomega.Equal(42))
709 })
710
711 f.It(f.WithSlow(), "running a failing command without --restart=Never", func(ctx context.Context) {
712 _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
713 WithStdinData("abcd1234").
714 Exec()
715 ee, ok := err.(uexec.ExitError)
716 if !ok {
717 framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
718 }
719 if !strings.Contains(ee.String(), "timed out") {
720 framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
721 }
722 })
723
724 f.It(f.WithSlow(), "running a failing command without --restart=Never, but with --rm", func(ctx context.Context) {
725 _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", podRunningTimeoutArg, "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
726 WithStdinData("abcd1234").
727 Exec()
728 ee, ok := err.(uexec.ExitError)
729 if !ok {
730 framework.Failf("Got unexpected error type, expected uexec.ExitError, got %T: %v", err, err)
731 }
732 if !strings.Contains(ee.String(), "timed out") {
733 framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
734 }
735 framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, "failure-3", ns, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
736 })
737
738 f.It(f.WithSlow(), "running a failing command with --leave-stdin-open", func(ctx context.Context) {
739 _, err := e2ekubectl.NewKubectlCommand(ns, "run", "-i", "--image="+busyboxImage, "--restart=Never", podRunningTimeoutArg, "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
740 WithStdinData("abcd1234").
741 Exec()
742 framework.ExpectNoError(err)
743 })
744 })
745
746 ginkgo.It("should support inline execution and attach", func(ctx context.Context) {
747 waitForStdinContent := func(pod, content string) string {
748 var logOutput string
749 err := wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
750 logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
751 return strings.Contains(logOutput, content), nil
752 })
753
754 framework.ExpectNoError(err, "waiting for '%v' output", content)
755 return logOutput
756 }
757
758 ginkgo.By("executing a command with run and attach with stdin")
759
760 e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
761 WithStdinData("value\nabcd1234").
762 ExecOrDie(ns)
763
764 runOutput := waitForStdinContent("run-test", "stdin closed")
765 gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
766 gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
767 gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
768
769 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{}))
770
771 ginkgo.By("executing a command with run and attach without stdin")
772
773
774
775
776
777 e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
778 WithStdinData("abcd1234").
779 ExecOrDie(ns)
780
781 runOutput = waitForStdinContent("run-test-2", "stdin closed")
782 gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
783 gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
784
785 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
786
787 ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
788 e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
789 WithStdinData("abcd1234\n").
790 ExecOrDie(ns)
791
792 runOutput = waitForStdinContent("run-test-3", "abcd1234")
793 gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
794 gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
795
796 g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
797 runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
798 framework.ExpectNoError(err)
799 framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, runTestPod.Name, ns, time.Minute))
800
801 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{}))
802 })
803
804 ginkgo.It("should support inline execution and attach with websockets or fallback to spdy", func(ctx context.Context) {
805 waitForStdinContent := func(pod, content string) string {
806 var logOutput string
807 err := wait.PollUntilContextTimeout(ctx, 10*time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
808 logOutput = e2ekubectl.RunKubectlOrDie(ns, "logs", pod)
809 return strings.Contains(logOutput, content), nil
810 })
811 framework.ExpectNoError(err, "waiting for '%v' output", content)
812 return logOutput
813 }
814
815 ginkgo.By("executing a command with run and attach with stdin")
816
817 e2ekubectl.NewKubectlCommand(ns, "run", "run-test", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--stdin", "--", "sh", "-c", "echo -n read: && cat && echo 'stdin closed'").
818 WithStdinData("value\nabcd1234").
819 ExecOrDie(ns)
820
821 runOutput := waitForStdinContent("run-test", "stdin closed")
822 gomega.Expect(runOutput).To(gomega.ContainSubstring("read:value"))
823 gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
824 gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
825
826 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test", metav1.DeleteOptions{}))
827
828 ginkgo.By("executing a command with run and attach without stdin")
829
830
831
832
833
834 e2ekubectl.NewKubectlCommand(ns, "run", "run-test-2", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--", "sh", "-c", "cat && echo 'stdin closed'").
835 WithStdinData("abcd1234").
836 ExecOrDie(ns)
837
838 runOutput = waitForStdinContent("run-test-2", "stdin closed")
839 gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("abcd1234"))
840 gomega.Expect(runOutput).To(gomega.ContainSubstring("stdin closed"))
841
842 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-2", metav1.DeleteOptions{}))
843
844 ginkgo.By("executing a command with run and attach with stdin with open stdin should remain running")
845 e2ekubectl.NewKubectlCommand(ns, "run", "run-test-3", "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--attach=true", "--leave-stdin-open=true", "--stdin", "--", "sh", "-c", "cat && echo 'stdin closed'").
846 WithStdinData("abcd1234\n").
847 ExecOrDie(ns)
848
849 runOutput = waitForStdinContent("run-test-3", "abcd1234")
850 gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234"))
851 gomega.Expect(runOutput).ToNot(gomega.ContainSubstring("stdin closed"))
852
853 g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
854 runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
855 framework.ExpectNoError(err)
856 framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, runTestPod.Name, ns, time.Minute))
857
858 framework.ExpectNoError(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{}))
859 })
860
861 ginkgo.It("should contain last line of the log", func(ctx context.Context) {
862 podName := "run-log-test"
863
864 ginkgo.By("executing a command with run")
865 e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+busyboxImage, "--restart=OnFailure", podRunningTimeoutArg, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
866
867 if !e2epod.CheckPodsRunningReadyOrSucceeded(ctx, c, ns, []string{podName}, framework.PodStartTimeout) {
868 framework.Failf("Pod for run-log-test was not ready")
869 }
870
871 logOutput := e2ekubectl.RunKubectlOrDie(ns, "logs", "-f", "run-log-test")
872 gomega.Expect(logOutput).To(gomega.ContainSubstring("EOF"))
873 })
874 })
875
876 ginkgo.Describe("Kubectl api-versions", func() {
877
882 framework.ConformanceIt("should check if v1 is in available api versions", func(ctx context.Context) {
883 ginkgo.By("validating api versions")
884 output := e2ekubectl.RunKubectlOrDie(ns, "api-versions")
885 if !strings.Contains(output, "v1") {
886 framework.Failf("No v1 in kubectl api-versions")
887 }
888 })
889 })
890
891 ginkgo.Describe("Kubectl get componentstatuses", func() {
892 ginkgo.It("should get componentstatuses", func(ctx context.Context) {
893 ginkgo.By("getting list of componentstatuses")
894 output := e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", "-o", "jsonpath={.items[*].metadata.name}")
895 components := strings.Split(output, " ")
896 ginkgo.By("getting details of componentstatuses")
897 for _, component := range components {
898 ginkgo.By("getting status of " + component)
899 e2ekubectl.RunKubectlOrDie(ns, "get", "componentstatuses", component)
900 }
901 })
902 })
903
904 ginkgo.Describe("Kubectl prune with applyset", func() {
905 ginkgo.It("should apply and prune objects", func(ctx context.Context) {
906 framework.Logf("applying manifest1")
907 manifest1 := `
908 apiVersion: v1
909 kind: ConfigMap
910 metadata:
911 name: cm1
912 namespace: {{ns}}
913 ---
914 apiVersion: v1
915 kind: ConfigMap
916 metadata:
917 name: cm2
918 namespace: {{ns}}
919 `
920
921 manifest1 = strings.ReplaceAll(manifest1, "{{ns}}", ns)
922 args := []string{"apply", "--prune", "--applyset=applyset1", "-f", "-"}
923 e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest1).ExecOrDie(ns)
924
925 framework.Logf("checking which objects exist")
926 objects := mustListObjectsInNamespace(ctx, c, ns)
927 names := mustGetNames(objects)
928 if diff := cmp.Diff(names, []string{"cm1", "cm2"}); diff != "" {
929 framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
930 }
931
932 framework.Logf("applying manifest2")
933 manifest2 := `
934 apiVersion: v1
935 kind: ConfigMap
936 metadata:
937 name: cm1
938 namespace: {{ns}}
939 `
940 manifest2 = strings.ReplaceAll(manifest2, "{{ns}}", ns)
941
942 e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest2).ExecOrDie(ns)
943
944 framework.Logf("checking which objects exist")
945 objects = mustListObjectsInNamespace(ctx, c, ns)
946 names = mustGetNames(objects)
947 if diff := cmp.Diff(names, []string{"cm1"}); diff != "" {
948 framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
949 }
950
951 framework.Logf("applying manifest2 (again)")
952 e2ekubectl.NewKubectlCommand(ns, args...).AppendEnv([]string{"KUBECTL_APPLYSET=true"}).WithStdinData(manifest2).ExecOrDie(ns)
953
954 framework.Logf("checking which objects exist")
955 objects = mustListObjectsInNamespace(ctx, c, ns)
956 names = mustGetNames(objects)
957 if diff := cmp.Diff(names, []string{"cm1"}); diff != "" {
958 framework.Failf("unexpected configmap names (-want +got):\n%s", diff)
959 }
960 })
961 })
962
963 ginkgo.Describe("Kubectl apply", func() {
964 ginkgo.It("should apply a new configuration to an existing RC", func(ctx context.Context) {
965 controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
966
967 ginkgo.By("creating Agnhost RC")
968 e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
969 ginkgo.By("applying a modified configuration")
970 stdin := modifyReplicationControllerConfiguration(controllerJSON)
971 e2ekubectl.NewKubectlCommand(ns, "apply", "-f", "-").
972 WithStdinReader(stdin).
973 ExecOrDie(ns)
974 ginkgo.By("checking the result")
975 forEachReplicationController(ctx, c, ns, "app", "agnhost", validateReplicationControllerConfiguration)
976 })
977 ginkgo.It("should reuse port when apply to an existing SVC", func(ctx context.Context) {
978 serviceJSON := readTestFileOrDie(agnhostServiceFilename)
979
980 ginkgo.By("creating Agnhost SVC")
981 e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
982
983 ginkgo.By("getting the original port")
984 originalNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
985
986 ginkgo.By("applying the same configuration")
987 e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "apply", "-f", "-")
988
989 ginkgo.By("getting the port after applying configuration")
990 currentNodePort := e2ekubectl.RunKubectlOrDie(ns, "get", "service", "agnhost-primary", "-o", "jsonpath={.spec.ports[0].port}")
991
992 ginkgo.By("checking the result")
993 if originalNodePort != currentNodePort {
994 framework.Failf("port should keep the same")
995 }
996 })
997
998 ginkgo.It("apply set/view last-applied", func(ctx context.Context) {
999 deployment1Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment1Filename)))
1000 deployment2Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment2Filename)))
1001 deployment3Yaml := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
1002
1003 ginkgo.By("deployment replicas number is 2")
1004 e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "-f", "-")
1005
1006 ginkgo.By("check the last-applied matches expectations annotations")
1007 output := e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
1008 requiredString := "\"replicas\": 2"
1009 if !strings.Contains(output, requiredString) {
1010 framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
1011 }
1012
1013 ginkgo.By("apply file doesn't have replicas")
1014 e2ekubectl.RunKubectlOrDieInput(ns, deployment2Yaml, "apply", "set-last-applied", "-f", "-")
1015
1016 ginkgo.By("check last-applied has been updated, annotations doesn't have replicas")
1017 output = e2ekubectl.RunKubectlOrDieInput(ns, deployment1Yaml, "apply", "view-last-applied", "-f", "-", "-o", "json")
1018 requiredString = "\"replicas\": 2"
1019 if strings.Contains(output, requiredString) {
1020 framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
1021 }
1022
1023 ginkgo.By("scale set replicas to 3")
1024 httpdDeploy := "httpd-deployment"
1025 debugDiscovery()
1026 e2ekubectl.RunKubectlOrDie(ns, "scale", "deployment", httpdDeploy, "--replicas=3")
1027
1028 ginkgo.By("apply file doesn't have replicas but image changed")
1029 e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "apply", "-f", "-")
1030
1031 ginkgo.By("verify replicas still is 3 and image has been updated")
1032 output = e2ekubectl.RunKubectlOrDieInput(ns, deployment3Yaml, "get", "-f", "-", "-o", "json")
1033 requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
1034 for _, item := range requiredItems {
1035 if !strings.Contains(output, item) {
1036 framework.Failf("Missing %s in kubectl apply", item)
1037 }
1038 }
1039 })
1040 })
1041
1042 ginkgo.Describe("Kubectl diff", func() {
1043
1048 framework.ConformanceIt("should check if kubectl diff finds a difference for Deployments", func(ctx context.Context) {
1049 ginkgo.By("create deployment with httpd image")
1050 deployment := commonutils.SubstituteImageName(string(readTestFileOrDie(httpdDeployment3Filename)))
1051 e2ekubectl.RunKubectlOrDieInput(ns, deployment, "create", "-f", "-")
1052
1053 ginkgo.By("verify diff finds difference between live and declared image")
1054 deployment = strings.Replace(deployment, httpdImage, busyboxImage, 1)
1055 if !strings.Contains(deployment, busyboxImage) {
1056 framework.Failf("Failed replacing image from %s to %s in:\n%s\n", httpdImage, busyboxImage, deployment)
1057 }
1058 output, err := e2ekubectl.RunKubectlInput(ns, deployment, "diff", "-f", "-")
1059 if err, ok := err.(*exec.ExitError); ok && err.ExitCode() == 1 {
1060 framework.Failf("Expected kubectl diff exit code of 1, but got %d: %v\n", err.ExitCode(), err)
1061 }
1062 requiredItems := []string{httpdImage, busyboxImage}
1063 for _, item := range requiredItems {
1064 if !strings.Contains(output, item) {
1065 framework.Failf("Missing %s in kubectl diff output:\n%s\n%v\n", item, output, err)
1066 }
1067 }
1068
1069 e2ekubectl.RunKubectlOrDieInput(ns, deployment, "delete", "-f", "-")
1070 })
1071 })
1072
1073 ginkgo.Describe("Kubectl server-side dry-run", func() {
1074
1079 framework.ConformanceIt("should check if kubectl can dry-run update Pods", func(ctx context.Context) {
1080 ginkgo.By("running the image " + httpdImage)
1081 podName := "e2e-test-httpd-pod"
1082 e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
1083
1084 ginkgo.By("replace the image in the pod with server-side dry-run")
1085 specImage := fmt.Sprintf(`{"spec":{"containers":[{"name": "%s","image": "%s"}]}}`, podName, busyboxImage)
1086 e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", podName, "-p", specImage, "--dry-run=server")
1087
1088 ginkgo.By("verifying the pod " + podName + " has the right image " + httpdImage)
1089 pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
1090 if err != nil {
1091 framework.Failf("Failed getting pod %s: %v", podName, err)
1092 }
1093 containers := pod.Spec.Containers
1094 if checkContainersImage(containers, httpdImage) {
1095 framework.Failf("Failed creating pod with expected image %s", httpdImage)
1096 }
1097
1098 e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
1099 })
1100 })
1101
1102
1103 definitionMatchesGVK := func(extensions []*openapi_v2.NamedAny, desiredGVK schema.GroupVersionKind) bool {
1104 for _, extension := range extensions {
1105 if extension.GetValue().GetYaml() == "" ||
1106 extension.GetName() != "x-kubernetes-group-version-kind" {
1107 continue
1108 }
1109 var values []map[string]string
1110 err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
1111 if err != nil {
1112 framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
1113 continue
1114 }
1115 for _, value := range values {
1116 if value["group"] != desiredGVK.Group {
1117 continue
1118 }
1119 if value["version"] != desiredGVK.Version {
1120 continue
1121 }
1122 if value["kind"] != desiredGVK.Kind {
1123 continue
1124 }
1125 return true
1126 }
1127 }
1128 return false
1129 }
1130
1131
1132 schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
1133 d, err := f.ClientSet.Discovery().OpenAPISchema()
1134 if err != nil {
1135 framework.Failf("%v", err)
1136 }
1137 if d == nil || d.Definitions == nil {
1138 return nil
1139 }
1140 for _, p := range d.Definitions.AdditionalProperties {
1141 if p == nil || p.Value == nil {
1142 continue
1143 }
1144 if !definitionMatchesGVK(p.Value.VendorExtension, desiredGVK) {
1145 continue
1146 }
1147 return p.Value
1148 }
1149 return nil
1150 }
1151
1152 ginkgo.Describe("Kubectl validation", func() {
1153 ginkgo.It("should create/apply a CR with unknown fields for CRD with no validation schema", func(ctx context.Context) {
1154 ginkgo.By("create CRD with no validation schema")
1155 crd, err := crd.CreateTestCRD(f)
1156 if err != nil {
1157 framework.Failf("failed to create test CRD: %v", err)
1158 }
1159 ginkgo.DeferCleanup(crd.CleanUp)
1160
1161 ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
1162 time.Sleep(10 * time.Second)
1163
1164 meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
1165 randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
1166 if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
1167 framework.Failf("%v", err)
1168 }
1169 })
1170
1171 ginkgo.It("should create/apply a valid CR for CRD with validation schema", func(ctx context.Context) {
1172 ginkgo.By("prepare CRD with validation schema")
1173 crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
1174 props := &apiextensionsv1.JSONSchemaProps{}
1175 if err := yaml.Unmarshal(schemaFoo, props); err != nil {
1176 framework.Failf("failed to unmarshal schema: %v", err)
1177 }
1178 for i := range crd.Spec.Versions {
1179 crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
1180 }
1181 })
1182 if err != nil {
1183 framework.Failf("failed to create test CRD: %v", err)
1184 }
1185 ginkgo.DeferCleanup(crd.CleanUp)
1186
1187 ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
1188 time.Sleep(10 * time.Second)
1189
1190 meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
1191 validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
1192 if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
1193 framework.Failf("%v", err)
1194 }
1195 })
1196
1197 ginkgo.It("should create/apply an invalid/valid CR with arbitrary-extra properties for CRD with partially-specified validation schema", func(ctx context.Context) {
1198 ginkgo.By("prepare CRD with partially-specified validation schema")
1199 crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
1200 props := &apiextensionsv1.JSONSchemaProps{}
1201 if err := yaml.Unmarshal(schemaFoo, props); err != nil {
1202 framework.Failf("failed to unmarshal schema: %v", err)
1203 }
1204
1205 props.XPreserveUnknownFields = pointer.BoolPtr(true)
1206 for i := range crd.Spec.Versions {
1207 crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
1208 }
1209 })
1210 if err != nil {
1211 framework.Failf("failed to create test CRD: %v", err)
1212 }
1213 ginkgo.DeferCleanup(crd.CleanUp)
1214
1215 ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
1216 time.Sleep(10 * time.Second)
1217
1218 schema := schemaForGVK(schema.GroupVersionKind{Group: crd.Crd.Spec.Group, Version: crd.Crd.Spec.Versions[0].Name, Kind: crd.Crd.Spec.Names.Kind})
1219 gomega.Expect(schema).ToNot(gomega.BeNil(), "retrieving a schema for the crd")
1220
1221 meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
1222
1223
1224
1225 invalidArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta)
1226 err = createApplyCustomResource(invalidArbitraryCR, f.Namespace.Name, "test-cr", crd)
1227 gomega.Expect(err).To(gomega.HaveOccurred(), "creating custom resource")
1228
1229 if !strings.Contains(err.Error(), `unknown field "spec.extraProperty"`) {
1230 framework.Failf("incorrect error from createApplyCustomResource: %v", err)
1231 }
1232
1233
1234 validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]},"extraProperty":"arbitrary-value"}`, meta)
1235 err = createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd)
1236 framework.ExpectNoError(err, "creating custom resource")
1237 })
1238
1239 ginkgo.It("should detect unknown metadata fields in both the root and embedded object of a CR", func(ctx context.Context) {
1240 ginkgo.By("prepare CRD with x-kubernetes-embedded-resource: true")
1241 opt := func(crd *apiextensionsv1.CustomResourceDefinition) {
1242 props := &apiextensionsv1.JSONSchemaProps{}
1243 if err := yaml.Unmarshal(schemaFooEmbedded, props); err != nil {
1244 framework.Failf("failed to unmarshal schema: %v", err)
1245 }
1246 crd.Spec.Versions = []apiextensionsv1.CustomResourceDefinitionVersion{
1247 {
1248 Name: "v1",
1249 Served: true,
1250 Storage: true,
1251 Schema: &apiextensionsv1.CustomResourceValidation{
1252 OpenAPIV3Schema: props,
1253 },
1254 },
1255 }
1256 }
1257
1258 group := fmt.Sprintf("%s.example.com", f.BaseName)
1259 testCRD, err := crd.CreateMultiVersionTestCRD(f, group, opt)
1260 if err != nil {
1261 framework.Failf("failed to create test CRD: %v", err)
1262 }
1263 ginkgo.DeferCleanup(testCRD.CleanUp)
1264
1265 ginkgo.By("sleep for 10s to wait for potential crd openapi publishing alpha feature")
1266 time.Sleep(10 * time.Second)
1267
1268 ginkgo.By("attempting to create a CR with unknown metadata fields at the root level")
1269 gvk := schema.GroupVersionKind{Group: testCRD.Crd.Spec.Group, Version: testCRD.Crd.Spec.Versions[0].Name, Kind: testCRD.Crd.Spec.Names.Kind}
1270 schema := schemaForGVK(gvk)
1271 gomega.Expect(schema).ToNot(gomega.BeNil(), "retrieving a schema for the crd")
1272 embeddedCRPattern := `
1273
1274 {%s,
1275 "spec": {
1276 "template": {
1277 "apiVersion": "foo/v1",
1278 "kind": "Sub",
1279 "metadata": {
1280 %s
1281 "name": "subobject",
1282 "namespace": "%s"
1283 }
1284 }
1285 }
1286 }`
1287 meta := unknownFieldMetadataJSON(gvk, "test-cr")
1288 unknownRootMetaCR := fmt.Sprintf(embeddedCRPattern, meta, "", ns)
1289 _, err = e2ekubectl.RunKubectlInput(ns, unknownRootMetaCR, "create", "--validate=true", "-f", "-")
1290 if err == nil {
1291 framework.Failf("unexpected nil error when creating CR with unknown root metadata field")
1292 }
1293 if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
1294 framework.Failf("error missing root unknown metadata field, got: %v", err)
1295 }
1296 if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
1297 framework.Failf("unexpected error, CR's root metadata namespace field unrecognized: %v", err)
1298 }
1299
1300 ginkgo.By("attempting to create a CR with unknown metadata fields in the embedded object")
1301 metaEmbedded := fmt.Sprintf(metaPattern, testCRD.Crd.Spec.Names.Kind, testCRD.Crd.Spec.Group, testCRD.Crd.Spec.Versions[0].Name, "test-cr-embedded")
1302 unknownEmbeddedMetaCR := fmt.Sprintf(embeddedCRPattern, metaEmbedded, `"unknownMetaEmbedded": "bar",`, ns)
1303 _, err = e2ekubectl.RunKubectlInput(ns, unknownEmbeddedMetaCR, "create", "--validate=true", "-f", "-")
1304 if err == nil {
1305 framework.Failf("unexpected nil error when creating CR with unknown embedded metadata field")
1306 }
1307 if !(strings.Contains(err.Error(), `unknown field "unknownMetaEmbedded"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.unknownMetaEmbedded"`)) {
1308 framework.Failf("error missing embedded unknown metadata field, got: %v", err)
1309 }
1310 if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "spec.template.metadata.namespace"`) {
1311 framework.Failf("unexpected error, CR's embedded metadata namespace field unrecognized: %v", err)
1312 }
1313 })
1314
1315 ginkgo.It("should detect unknown metadata fields of a typed object", func(ctx context.Context) {
1316 ginkgo.By("calling kubectl create deployment")
1317 invalidMetaDeployment := `
1318 {
1319 "apiVersion": "apps/v1",
1320 "kind": "Deployment",
1321 "metadata": {
1322 "name": "my-dep",
1323 "unknownMeta": "foo",
1324 "labels": {"app": "nginx"}
1325 },
1326 "spec": {
1327 "selector": {
1328 "matchLabels": {
1329 "app": "nginx"
1330 }
1331 },
1332 "template": {
1333 "metadata": {
1334 "labels": {
1335 "app": "nginx"
1336 }
1337 },
1338 "spec": {
1339 "containers": [{
1340 "name": "nginx",
1341 "image": "nginx:latest"
1342 }]
1343 }
1344 }
1345 }
1346 }
1347 `
1348 _, err := e2ekubectl.RunKubectlInput(ns, invalidMetaDeployment, "create", "-f", "-")
1349 if err == nil {
1350 framework.Failf("unexpected nil error when creating deployment with unknown metadata field")
1351 }
1352 if !(strings.Contains(err.Error(), `unknown field "unknownMeta"`) || strings.Contains(err.Error(), `unknown field "metadata.unknownMeta"`)) {
1353 framework.Failf("error missing unknown metadata field, got: %v", err)
1354 }
1355 if strings.Contains(err.Error(), `unknown field "namespace"`) || strings.Contains(err.Error(), `unknown field "metadata.namespace"`) {
1356 framework.Failf("unexpected error, deployment's metadata namespace field unrecognized: %v", err)
1357 }
1358
1359 })
1360 })
1361
1362 ginkgo.Describe("Kubectl cluster-info", func() {
1363
1368 framework.ConformanceIt("should check if Kubernetes control plane services is included in cluster-info", func(ctx context.Context) {
1369 ginkgo.By("validating cluster-info")
1370 output := e2ekubectl.RunKubectlOrDie(ns, "cluster-info")
1371
1372 requiredItems := []string{"Kubernetes control plane", "is running at"}
1373 for _, item := range requiredItems {
1374 if !strings.Contains(output, item) {
1375 framework.Failf("Missing %s in kubectl cluster-info", item)
1376 }
1377 }
1378 })
1379 })
1380
1381 ginkgo.Describe("Kubectl cluster-info dump", func() {
1382 ginkgo.It("should check if cluster-info dump succeeds", func(ctx context.Context) {
1383 ginkgo.By("running cluster-info dump")
1384 e2ekubectl.RunKubectlOrDie(ns, "cluster-info", "dump")
1385 })
1386 })
1387
1388 ginkgo.Describe("Kubectl describe", func() {
1389
1394 framework.ConformanceIt("should check if kubectl describe prints relevant information for rc and pods", func(ctx context.Context) {
1395 controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
1396 serviceJSON := readTestFileOrDie(agnhostServiceFilename)
1397
1398 e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
1399 e2ekubectl.RunKubectlOrDieInput(ns, string(serviceJSON[:]), "create", "-f", "-")
1400
1401 ginkgo.By("Waiting for Agnhost primary to start.")
1402 waitForOrFailWithDebug(ctx, 1)
1403
1404
1405 forEachPod(ctx, func(pod v1.Pod) {
1406 output := e2ekubectl.RunKubectlOrDie(ns, "describe", "pod", pod.Name)
1407 requiredStrings := [][]string{
1408 {"Name:", "agnhost-primary-"},
1409 {"Namespace:", ns},
1410 {"Node:"},
1411 {"Labels:", "app=agnhost"},
1412 {"role=primary"},
1413 {"Annotations:"},
1414 {"Status:", "Running"},
1415 {"IP:"},
1416 {"Controlled By:", "ReplicationController/agnhost-primary"},
1417 {"Image:", agnhostImage},
1418 {"State:", "Running"},
1419 {"QoS Class:", "BestEffort"},
1420 }
1421 checkOutput(output, requiredStrings)
1422 })
1423
1424
1425 requiredStrings := [][]string{
1426 {"Name:", "agnhost-primary"},
1427 {"Namespace:", ns},
1428 {"Selector:", "app=agnhost,role=primary"},
1429 {"Labels:", "app=agnhost"},
1430 {"role=primary"},
1431 {"Annotations:"},
1432 {"Replicas:", "1 current", "1 desired"},
1433 {"Pods Status:", "1 Running", "0 Waiting", "0 Succeeded", "0 Failed"},
1434 {"Pod Template:"},
1435 {"Image:", agnhostImage},
1436 {"Events:"}}
1437 checkKubectlOutputWithRetry(ns, requiredStrings, "describe", "rc", "agnhost-primary")
1438
1439
1440 output := e2ekubectl.RunKubectlOrDie(ns, "describe", "service", "agnhost-primary")
1441 requiredStrings = [][]string{
1442 {"Name:", "agnhost-primary"},
1443 {"Namespace:", ns},
1444 {"Labels:", "app=agnhost"},
1445 {"role=primary"},
1446 {"Annotations:"},
1447 {"Selector:", "app=agnhost", "role=primary"},
1448 {"Type:", "ClusterIP"},
1449 {"IP:"},
1450 {"Port:", "<unset>", "6379/TCP"},
1451 {"Endpoints:"},
1452 {"Session Affinity:", "None"}}
1453 checkOutput(output, requiredStrings)
1454
1455
1456
1457 nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
1458 framework.ExpectNoError(err)
1459 node := nodes.Items[0]
1460 output = e2ekubectl.RunKubectlOrDie(ns, "describe", "node", node.Name)
1461 requiredStrings = [][]string{
1462 {"Name:", node.Name},
1463 {"Labels:"},
1464 {"Annotations:"},
1465 {"CreationTimestamp:"},
1466 {"Conditions:"},
1467 {"Type", "Status", "LastHeartbeatTime", "LastTransitionTime", "Reason", "Message"},
1468 {"Addresses:"},
1469 {"Capacity:"},
1470 {"Version:"},
1471 {"Kernel Version:"},
1472 {"OS Image:"},
1473 {"Container Runtime Version:"},
1474 {"Kubelet Version:"},
1475 {"Kube-Proxy Version:"},
1476 {"Pods:"}}
1477 checkOutput(output, requiredStrings)
1478
1479
1480 output = e2ekubectl.RunKubectlOrDie(ns, "describe", "namespace", ns)
1481 requiredStrings = [][]string{
1482 {"Name:", ns},
1483 {"Labels:"},
1484 {"Annotations:"},
1485 {"Status:", "Active"}}
1486 checkOutput(output, requiredStrings)
1487
1488
1489 })
1490
1491 ginkgo.It("should check if kubectl describe prints relevant information for cronjob", func(ctx context.Context) {
1492 ginkgo.By("creating a cronjob")
1493 cronjobYaml := commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-cronjob.yaml.in")))
1494 e2ekubectl.RunKubectlOrDieInput(ns, cronjobYaml, "create", "-f", "-")
1495
1496 ginkgo.By("waiting for cronjob to start.")
1497 err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
1498 cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{})
1499 if err != nil {
1500 return false, fmt.Errorf("Failed getting CronJob %s: %w", ns, err)
1501 }
1502 return len(cj.Items) > 0, nil
1503 })
1504 framework.ExpectNoError(err)
1505
1506 ginkgo.By("verifying kubectl describe prints")
1507 output := e2ekubectl.RunKubectlOrDie(ns, "describe", "cronjob", "cronjob-test")
1508 requiredStrings := [][]string{
1509 {"Name:", "cronjob-test"},
1510 {"Namespace:", ns},
1511 {"Labels:"},
1512 {"Annotations:"},
1513 {"Schedule:", "*/1 * * * *"},
1514 {"Concurrency Policy:", "Allow"},
1515 {"Suspend:", "False"},
1516 {"Successful Job History Limit:", "3"},
1517 {"Failed Job History Limit:", "1"},
1518 {"Starting Deadline Seconds:", "30s"},
1519 {"Selector:"},
1520 {"Parallelism:"},
1521 {"Completions:"},
1522 }
1523 checkOutput(output, requiredStrings)
1524 })
1525 })
1526
1527 ginkgo.Describe("Kubectl expose", func() {
1528
1533 framework.ConformanceIt("should create services for rc", func(ctx context.Context) {
1534 controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
1535
1536 agnhostPort := 6379
1537
1538 ginkgo.By("creating Agnhost RC")
1539
1540 framework.Logf("namespace %v", ns)
1541 e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
1542
1543
1544 ginkgo.By("Waiting for Agnhost primary to start.")
1545 waitForOrFailWithDebug(ctx, 1)
1546 forEachPod(ctx, func(pod v1.Pod) {
1547 framework.Logf("wait on agnhost-primary startup in %v ", ns)
1548 e2eoutput.LookForStringInLog(ns, pod.Name, "agnhost-primary", "Paused", framework.PodStartTimeout)
1549 })
1550 validateService := func(name string, servicePort int, timeout time.Duration) {
1551 err := wait.Poll(framework.Poll, timeout, func() (bool, error) {
1552 ep, err := c.CoreV1().Endpoints(ns).Get(ctx, name, metav1.GetOptions{})
1553 if err != nil {
1554
1555 framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
1556
1557
1558 if apierrors.IsNotFound(err) ||
1559 apierrors.IsUnauthorized(err) ||
1560 apierrors.IsServerTimeout(err) {
1561 err = nil
1562 }
1563 return false, err
1564 }
1565
1566 uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
1567 if len(uidToPort) == 0 {
1568 framework.Logf("No endpoint found, retrying")
1569 return false, nil
1570 }
1571 if len(uidToPort) > 1 {
1572 framework.Failf("Too many endpoints found")
1573 }
1574 for _, port := range uidToPort {
1575 if port[0] != agnhostPort {
1576 framework.Failf("Wrong endpoint port: %d", port[0])
1577 }
1578 }
1579 return true, nil
1580 })
1581 framework.ExpectNoError(err)
1582
1583 e2eservice, err := c.CoreV1().Services(ns).Get(ctx, name, metav1.GetOptions{})
1584 framework.ExpectNoError(err)
1585
1586 if len(e2eservice.Spec.Ports) != 1 {
1587 framework.Failf("1 port is expected")
1588 }
1589 port := e2eservice.Spec.Ports[0]
1590 if port.Port != int32(servicePort) {
1591 framework.Failf("Wrong service port: %d", port.Port)
1592 }
1593 if port.TargetPort.IntValue() != agnhostPort {
1594 framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
1595 }
1596 }
1597
1598 ginkgo.By("exposing RC")
1599 e2ekubectl.RunKubectlOrDie(ns, "expose", "rc", "agnhost-primary", "--name=rm2", "--port=1234", fmt.Sprintf("--target-port=%d", agnhostPort))
1600 framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm2", true, framework.Poll, framework.ServiceStartTimeout))
1601 validateService("rm2", 1234, framework.ServiceStartTimeout)
1602
1603 ginkgo.By("exposing service")
1604 e2ekubectl.RunKubectlOrDie(ns, "expose", "service", "rm2", "--name=rm3", "--port=2345", fmt.Sprintf("--target-port=%d", agnhostPort))
1605 framework.ExpectNoError(e2enetwork.WaitForService(ctx, c, ns, "rm3", true, framework.Poll, framework.ServiceStartTimeout))
1606 validateService("rm3", 2345, framework.ServiceStartTimeout)
1607 })
1608 })
1609
1610 ginkgo.Describe("Kubectl label", func() {
1611 var podYaml string
1612 ginkgo.BeforeEach(func(ctx context.Context) {
1613 ginkgo.By("creating the pod")
1614 podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
1615 e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
1616 framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, pausePodName, ns, framework.PodStartTimeout))
1617 })
1618 ginkgo.AfterEach(func() {
1619 cleanupKubectlInputs(podYaml, ns, pausePodSelector)
1620 })
1621
1622
1627 framework.ConformanceIt("should update the label on a resource", func(ctx context.Context) {
1628 labelName := "testing-label"
1629 labelValue := "testing-label-value"
1630
1631 ginkgo.By("adding the label " + labelName + " with value " + labelValue + " to a pod")
1632 e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"="+labelValue)
1633 ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
1634 output := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
1635 if !strings.Contains(output, labelValue) {
1636 framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
1637 }
1638
1639 ginkgo.By("removing the label " + labelName + " of a pod")
1640 e2ekubectl.RunKubectlOrDie(ns, "label", "pods", pausePodName, labelName+"-")
1641 ginkgo.By("verifying the pod doesn't have the label " + labelName)
1642 output = e2ekubectl.RunKubectlOrDie(ns, "get", "pod", pausePodName, "-L", labelName)
1643 if strings.Contains(output, labelValue) {
1644 framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
1645 }
1646 })
1647 })
1648
1649 ginkgo.Describe("Kubectl copy", func() {
1650 var podYaml string
1651 ginkgo.BeforeEach(func(ctx context.Context) {
1652 ginkgo.By("creating the pod")
1653 podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
1654 e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
1655 framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, busyboxPodName, ns, framework.PodStartTimeout))
1656 })
1657 ginkgo.AfterEach(func() {
1658 cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
1659 })
1660
1661
1666 ginkgo.It("should copy a file from a running Pod", func(ctx context.Context) {
1667 remoteContents := "foobar\n"
1668 podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
1669 tempDestination, err := os.CreateTemp(os.TempDir(), "copy-foobar")
1670 if err != nil {
1671 framework.Failf("Failed creating temporary destination file: %v", err)
1672 }
1673
1674 ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
1675 e2ekubectl.RunKubectlOrDie(ns, "cp", podSource, tempDestination.Name())
1676 ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
1677 localData, err := io.ReadAll(tempDestination)
1678 if err != nil {
1679 framework.Failf("Failed reading temporary local file: %v", err)
1680 }
1681 if string(localData) != remoteContents {
1682 framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
1683 }
1684 })
1685 })
1686
1687 ginkgo.Describe("Kubectl patch", func() {
1688
1693 framework.ConformanceIt("should add annotations for pods in rc", func(ctx context.Context) {
1694 controllerJSON := commonutils.SubstituteImageName(string(readTestFileOrDie(agnhostControllerFilename)))
1695 ginkgo.By("creating Agnhost RC")
1696 e2ekubectl.RunKubectlOrDieInput(ns, controllerJSON, "create", "-f", "-")
1697 ginkgo.By("Waiting for Agnhost primary to start.")
1698 waitForOrFailWithDebug(ctx, 1)
1699 ginkgo.By("patching all pods")
1700 forEachPod(ctx, func(pod v1.Pod) {
1701 e2ekubectl.RunKubectlOrDie(ns, "patch", "pod", pod.Name, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
1702 })
1703
1704 ginkgo.By("checking annotations")
1705 forEachPod(ctx, func(pod v1.Pod) {
1706 found := false
1707 for key, val := range pod.Annotations {
1708 if key == "x" && val == "y" {
1709 found = true
1710 break
1711 }
1712 }
1713 if !found {
1714 framework.Failf("Added annotation not found")
1715 }
1716 })
1717 })
1718 })
1719
1720 ginkgo.Describe("Kubectl version", func() {
1721
1726 framework.ConformanceIt("should check is all data is printed", func(ctx context.Context) {
1727 versionString := e2ekubectl.RunKubectlOrDie(ns, "version")
1728
1729 requiredItems := []string{"Client Version: ", "Server Version: "}
1730 for _, item := range requiredItems {
1731
1732 oldMatched, _ := regexp.MatchString(item+`version.Info\{Major:"\d", Minor:"\d+\+?", GitVersion:"v\d\.\d+\.[\d\w\-\.\+]+", GitCommit:"[0-9a-f]+"`, versionString)
1733
1734 newMatched, _ := regexp.MatchString(item+`v\d\.\d+\.[\d\w\-\.\+]+`, versionString)
1735
1736 if !oldMatched && !newMatched {
1737 framework.Failf("Item %s value is not valid in %s\n", item, versionString)
1738 }
1739 }
1740 })
1741 })
1742
1743 ginkgo.Describe("Kubectl run pod", func() {
1744 var podName string
1745
1746 ginkgo.BeforeEach(func() {
1747 podName = "e2e-test-httpd-pod"
1748 })
1749
1750 ginkgo.AfterEach(func() {
1751 e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
1752 })
1753
1754
1759 framework.ConformanceIt("should create a pod from an image when restart is Never", func(ctx context.Context) {
1760 ginkgo.By("running the image " + httpdImage)
1761 e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--restart=Never", podRunningTimeoutArg, "--image="+httpdImage)
1762 ginkgo.By("verifying the pod " + podName + " was created")
1763 pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
1764 if err != nil {
1765 framework.Failf("Failed getting pod %s: %v", podName, err)
1766 }
1767 containers := pod.Spec.Containers
1768 if checkContainersImage(containers, httpdImage) {
1769 framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
1770 }
1771 if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
1772 framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
1773 }
1774 })
1775 })
1776
1777 ginkgo.Describe("Kubectl replace", func() {
1778 var podName string
1779
1780 ginkgo.BeforeEach(func() {
1781 podName = "e2e-test-httpd-pod"
1782 })
1783
1784 ginkgo.AfterEach(func() {
1785 e2ekubectl.RunKubectlOrDie(ns, "delete", "pods", podName)
1786 })
1787
1788
1793 framework.ConformanceIt("should update a single-container pod's image", func(ctx context.Context) {
1794 ginkgo.By("running the image " + httpdImage)
1795 e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
1796
1797 ginkgo.By("verifying the pod " + podName + " is running")
1798 label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
1799 err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
1800 if err != nil {
1801 framework.Failf("Failed getting pod %s: %v", podName, err)
1802 }
1803
1804 ginkgo.By("verifying the pod " + podName + " was created")
1805 podJSON := e2ekubectl.RunKubectlOrDie(ns, "get", "pod", podName, "-o", "json")
1806 if !strings.Contains(podJSON, podName) {
1807 framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
1808 }
1809
1810 ginkgo.By("replace the image in the pod")
1811 podJSON = strings.Replace(podJSON, httpdImage, busyboxImage, 1)
1812 e2ekubectl.RunKubectlOrDieInput(ns, podJSON, "replace", "-f", "-")
1813
1814 ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
1815 pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
1816 if err != nil {
1817 framework.Failf("Failed getting deployment %s: %v", podName, err)
1818 }
1819 containers := pod.Spec.Containers
1820 if checkContainersImage(containers, busyboxImage) {
1821 framework.Failf("Failed creating pod with expected image %s", busyboxImage)
1822 }
1823 })
1824 })
1825
1826 ginkgo.Describe("Proxy server", func() {
1827
1828
1833 framework.ConformanceIt("should support proxy with --port 0", func(ctx context.Context) {
1834 ginkgo.By("starting the proxy server")
1835 port, cmd, err := startProxyServer(ns)
1836 if cmd != nil {
1837 defer framework.TryKill(cmd)
1838 }
1839 if err != nil {
1840 framework.Failf("Failed to start proxy server: %v", err)
1841 }
1842 ginkgo.By("curling proxy /api/ output")
1843 localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
1844 apiVersions, err := getAPIVersions(localAddr)
1845 if err != nil {
1846 framework.Failf("Expected at least one supported apiversion, got error %v", err)
1847 }
1848 if len(apiVersions.Versions) < 1 {
1849 framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
1850 }
1851 })
1852
1853
1858 framework.ConformanceIt("should support --unix-socket=/path", func(ctx context.Context) {
1859 ginkgo.By("Starting the proxy")
1860 tmpdir, err := os.MkdirTemp("", "kubectl-proxy-unix")
1861 if err != nil {
1862 framework.Failf("Failed to create temporary directory: %v", err)
1863 }
1864 path := filepath.Join(tmpdir, "test")
1865 defer os.Remove(path)
1866 defer os.Remove(tmpdir)
1867 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
1868 cmd := tk.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
1869 stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
1870 if err != nil {
1871 framework.Failf("Failed to start kubectl command: %v", err)
1872 }
1873 defer stdout.Close()
1874 defer stderr.Close()
1875 defer framework.TryKill(cmd)
1876 buf := make([]byte, 128)
1877 if _, err = stdout.Read(buf); err != nil {
1878 framework.Failf("Expected output from kubectl proxy: %v", err)
1879 }
1880 ginkgo.By("retrieving proxy /api/ output")
1881 _, err = curlUnix("http://unused/api", path)
1882 if err != nil {
1883 framework.Failf("Failed get of /api at %s: %v", path, err)
1884 }
1885 })
1886 })
1887
1888
1889
1890 f.Describe("Kubectl taint", framework.WithSerial(), func() {
1891 ginkgo.It("should update the taint on a node", func(ctx context.Context) {
1892 testTaint := v1.Taint{
1893 Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
1894 Value: "testing-taint-value",
1895 Effect: v1.TaintEffectNoSchedule,
1896 }
1897
1898 nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
1899
1900 ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
1901 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
1902 ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, testTaint)
1903
1904 ginkgo.By("verifying the node has the taint " + testTaint.ToString())
1905 output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1906 requiredStrings := [][]string{
1907 {"Name:", nodeName},
1908 {"Taints:"},
1909 {testTaint.ToString()},
1910 }
1911 checkOutput(output, requiredStrings)
1912
1913 ginkgo.By("removing the taint " + testTaint.ToString() + " of a node")
1914 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+":"+string(testTaint.Effect)+"-")
1915 ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
1916 output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1917 if strings.Contains(output, testTaint.Key) {
1918 framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
1919 }
1920 })
1921
1922 ginkgo.It("should remove all the taints with the same key off a node", func(ctx context.Context) {
1923 testTaint := v1.Taint{
1924 Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
1925 Value: "testing-taint-value",
1926 Effect: v1.TaintEffectNoSchedule,
1927 }
1928
1929 nodeName := scheduling.GetNodeThatCanRunPod(ctx, f)
1930
1931 ginkgo.By("adding the taint " + testTaint.ToString() + " to a node")
1932 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.ToString())
1933 ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName,
1934 testTaint)
1935
1936 ginkgo.By("verifying the node has the taint " + testTaint.ToString())
1937 output := runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1938 requiredStrings := [][]string{
1939 {"Name:", nodeName},
1940 {"Taints:"},
1941 {testTaint.ToString()},
1942 }
1943 checkOutput(output, requiredStrings)
1944
1945 newTestTaint := v1.Taint{
1946 Key: testTaint.Key,
1947 Value: "another-testing-taint-value",
1948 Effect: v1.TaintEffectPreferNoSchedule,
1949 }
1950 ginkgo.By("adding another taint " + newTestTaint.ToString() + " to the node")
1951 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, newTestTaint.ToString())
1952 ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, newTestTaint)
1953
1954 ginkgo.By("verifying the node has the taint " + newTestTaint.ToString())
1955 output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1956 requiredStrings = [][]string{
1957 {"Name:", nodeName},
1958 {"Taints:"},
1959 {newTestTaint.ToString()},
1960 }
1961 checkOutput(output, requiredStrings)
1962
1963 noExecuteTaint := v1.Taint{
1964 Key: testTaint.Key,
1965 Value: "testing-taint-value-no-execute",
1966 Effect: v1.TaintEffectNoExecute,
1967 }
1968 ginkgo.By("adding NoExecute taint " + noExecuteTaint.ToString() + " to the node")
1969 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, noExecuteTaint.ToString())
1970 ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, noExecuteTaint)
1971
1972 ginkgo.By("verifying the node has the taint " + noExecuteTaint.ToString())
1973 output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1974 requiredStrings = [][]string{
1975 {"Name:", nodeName},
1976 {"Taints:"},
1977 {noExecuteTaint.ToString()},
1978 }
1979 checkOutput(output, requiredStrings)
1980
1981 ginkgo.By("removing all taints that have the same key " + testTaint.Key + " of the node")
1982 runKubectlRetryOrDie(ns, "taint", "nodes", nodeName, testTaint.Key+"-")
1983 ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
1984 output = runKubectlRetryOrDie(ns, "describe", "node", nodeName)
1985 if strings.Contains(output, testTaint.Key) {
1986 framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
1987 }
1988 })
1989 })
1990
1991 ginkgo.Describe("Kubectl events", func() {
1992 ginkgo.It("should show event when pod is created", func(ctx context.Context) {
1993 podName := "e2e-test-httpd-pod"
1994 ginkgo.By("running the image " + httpdImage)
1995 e2ekubectl.RunKubectlOrDie(ns, "run", podName, "--image="+httpdImage, podRunningTimeoutArg, "--labels=run="+podName)
1996
1997 ginkgo.By("verifying the pod " + podName + " is running")
1998 label := labels.SelectorFromSet(map[string]string{"run": podName})
1999 err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
2000 if err != nil {
2001 framework.Failf("Failed getting pod %s: %v", podName, err)
2002 }
2003
2004 ginkgo.By("show started event for this pod")
2005 events := e2ekubectl.RunKubectlOrDie(ns, "events", "--for=pod/"+podName)
2006
2007
2008 eventsStr := strings.Join(strings.Fields(strings.TrimSpace(events)), " ")
2009 if !strings.Contains(string(eventsStr), fmt.Sprintf("Normal Scheduled Pod/%s", podName)) {
2010 framework.Failf("failed to list expected event")
2011 }
2012
2013 ginkgo.By("expect not showing any WARNING message except timeouts")
2014 events = e2ekubectl.RunKubectlOrDie(ns, "events", "--types=WARNING", "--for=pod/"+podName)
2015 if events != "" && !strings.Contains(events, "timed out") {
2016 framework.Failf("unexpected WARNING event fired")
2017 }
2018 })
2019 })
2020
2021 ginkgo.Describe("Kubectl create quota", func() {
2022 ginkgo.It("should create a quota without scopes", func(ctx context.Context) {
2023 quotaName := "million"
2024
2025 ginkgo.By("calling kubectl quota")
2026 e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000,services=1000000")
2027
2028 ginkgo.By("verifying that the quota was created")
2029 quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
2030 if err != nil {
2031 framework.Failf("Failed getting quota %s: %v", quotaName, err)
2032 }
2033
2034 if len(quota.Spec.Scopes) != 0 {
2035 framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
2036 }
2037 if len(quota.Spec.Hard) != 2 {
2038 framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
2039 }
2040 r, found := quota.Spec.Hard[v1.ResourcePods]
2041 if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
2042 framework.Failf("Expected pods=1000000, got %v", r)
2043 }
2044 r, found = quota.Spec.Hard[v1.ResourceServices]
2045 if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
2046 framework.Failf("Expected services=1000000, got %v", r)
2047 }
2048 })
2049
2050 ginkgo.It("should create a quota with scopes", func(ctx context.Context) {
2051 quotaName := "scopes"
2052
2053 ginkgo.By("calling kubectl quota")
2054 e2ekubectl.RunKubectlOrDie(ns, "create", "quota", quotaName, "--hard=pods=1000000", "--scopes=BestEffort,NotTerminating")
2055
2056 ginkgo.By("verifying that the quota was created")
2057 quota, err := c.CoreV1().ResourceQuotas(ns).Get(ctx, quotaName, metav1.GetOptions{})
2058 if err != nil {
2059 framework.Failf("Failed getting quota %s: %v", quotaName, err)
2060 }
2061
2062 if len(quota.Spec.Scopes) != 2 {
2063 framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
2064 }
2065 scopes := make(map[v1.ResourceQuotaScope]struct{})
2066 for _, scope := range quota.Spec.Scopes {
2067 scopes[scope] = struct{}{}
2068 }
2069 if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
2070 framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
2071 }
2072 if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
2073 framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
2074 }
2075 })
2076
2077 ginkgo.It("should reject quota with invalid scopes", func(ctx context.Context) {
2078 quotaName := "scopes"
2079
2080 ginkgo.By("calling kubectl quota")
2081 out, err := e2ekubectl.RunKubectl(ns, "create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo")
2082 if err == nil {
2083 framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
2084 }
2085 })
2086 })
2087
2088 ginkgo.Describe("kubectl wait", func() {
2089 ginkgo.It("should ignore not found error with --for=delete", func(ctx context.Context) {
2090 ginkgo.By("calling kubectl wait --for=delete")
2091 e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod/doesnotexist")
2092 e2ekubectl.RunKubectlOrDie(ns, "wait", "--for=delete", "pod", "--selector=app.kubernetes.io/name=noexist")
2093 })
2094 })
2095
2096 ginkgo.Describe("kubectl subresource flag", func() {
2097 ginkgo.It("should not be used in a bulk GET", func() {
2098 ginkgo.By("calling kubectl get nodes --subresource=status")
2099 out, err := e2ekubectl.RunKubectl("", "get", "nodes", "--subresource=status")
2100 gomega.Expect(err).To(gomega.HaveOccurred(), fmt.Sprintf("Expected kubectl to fail, but it succeeded: %s", out))
2101 gomega.Expect(err).To(gomega.ContainSubstring("subresource cannot be used when bulk resources are specified"))
2102 })
2103 ginkgo.It("GET on status subresource of built-in type (node) returns identical info as GET on the built-in type", func(ctx context.Context) {
2104 ginkgo.By("first listing nodes in the cluster, and using first node of the list")
2105 nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
2106 framework.ExpectNoError(err)
2107 gomega.Expect(nodes.Items).ToNot(gomega.BeEmpty())
2108 node := nodes.Items[0]
2109
2110
2111
2112 ginkgo.By(fmt.Sprintf("calling kubectl get nodes %s", node.Name))
2113 outBuiltIn := e2ekubectl.RunKubectlOrDie("", "get", "nodes", node.Name,
2114 "--output=jsonpath='{.metadata.name}{.status.nodeInfo.kubeletVersion}'",
2115 )
2116 ginkgo.By(fmt.Sprintf("calling kubectl get nodes %s --subresource=status", node.Name))
2117 outStatusSubresource := e2ekubectl.RunKubectlOrDie("", "get", "nodes", node.Name,
2118 "--output=jsonpath='{.metadata.name}{.status.nodeInfo.kubeletVersion}'",
2119 "--subresource=status",
2120 )
2121 gomega.Expect(outBuiltIn).To(gomega.Equal(outStatusSubresource))
2122 })
2123 })
2124 })
2125
2126 func getTestContextHost() string {
2127 if len(framework.TestContext.Host) > 0 {
2128 return framework.TestContext.Host
2129 }
2130
2131 if framework.TestContext.KubeConfig != "" {
2132 c, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
2133 if err == nil {
2134 for _, v := range c.Clusters {
2135 if v.Server != "" {
2136 framework.Logf("--host variable was not set, picking up the first server from %s",
2137 framework.TestContext.KubeConfig)
2138 return v.Server
2139 }
2140 }
2141 }
2142 }
2143 framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
2144 return ""
2145 }
2146
2147
2148 func checkOutputReturnError(output string, required [][]string) error {
2149 outputLines := strings.Split(output, "\n")
2150 currentLine := 0
2151 for _, requirement := range required {
2152 for currentLine < len(outputLines) && !strings.Contains(outputLines[currentLine], requirement[0]) {
2153 currentLine++
2154 }
2155 if currentLine == len(outputLines) {
2156 return fmt.Errorf("failed to find %s in %s", requirement[0], output)
2157 }
2158 for _, item := range requirement[1:] {
2159 if !strings.Contains(outputLines[currentLine], item) {
2160 return fmt.Errorf("failed to find %s in %s", item, outputLines[currentLine])
2161 }
2162 }
2163 }
2164 return nil
2165 }
2166
2167 func checkOutput(output string, required [][]string) {
2168 err := checkOutputReturnError(output, required)
2169 if err != nil {
2170 framework.Failf("%v", err)
2171 }
2172 }
2173
2174 func checkKubectlOutputWithRetry(namespace string, required [][]string, args ...string) {
2175 var pollErr error
2176 wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
2177 output := e2ekubectl.RunKubectlOrDie(namespace, args...)
2178 err := checkOutputReturnError(output, required)
2179 if err != nil {
2180 pollErr = err
2181 return false, nil
2182 }
2183 pollErr = nil
2184 return true, nil
2185 })
2186 if pollErr != nil {
2187 framework.Failf("%v", pollErr)
2188 }
2189 return
2190 }
2191
2192 func checkContainersImage(containers []v1.Container, expectImage string) bool {
2193 return containers == nil || len(containers) != 1 || containers[0].Image != expectImage
2194 }
2195
2196 func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
2197 body, err := curl(apiEndpoint)
2198 if err != nil {
2199 return nil, fmt.Errorf("Failed http.Get of %s: %w", apiEndpoint, err)
2200 }
2201 var apiVersions metav1.APIVersions
2202 if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
2203 return nil, fmt.Errorf("Failed to parse /api output %s: %w", body, err)
2204 }
2205 return &apiVersions, nil
2206 }
2207
2208 func startProxyServer(ns string) (int, *exec.Cmd, error) {
2209
2210 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, ns)
2211 cmd := tk.KubectlCmd("proxy", "-p", "0", "--disable-filter")
2212 stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
2213 if err != nil {
2214 return -1, nil, err
2215 }
2216 buf := make([]byte, 128)
2217 var n int
2218 if n, err = stdout.Read(buf); err != nil {
2219 return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %w", err)
2220 }
2221 go func() {
2222 out, _ := io.ReadAll(stdout)
2223 framework.Logf("kubectl proxy stdout: %s", string(buf[:n])+string(out))
2224 stdout.Close()
2225 }()
2226 go func() {
2227 err, _ := io.ReadAll(stderr)
2228 framework.Logf("kubectl proxy stderr: %s", string(err))
2229 stderr.Close()
2230 }()
2231 output := string(buf[:n])
2232 match := proxyRegexp.FindStringSubmatch(output)
2233 if len(match) == 2 {
2234 if port, err := strconv.Atoi(match[1]); err == nil {
2235 return port, cmd, nil
2236 }
2237 }
2238 return -1, cmd, fmt.Errorf("Failed to parse port from proxy stdout: %s", output)
2239 }
2240
2241 func curlUnix(url string, path string) (string, error) {
2242 dial := func(ctx context.Context, proto, addr string) (net.Conn, error) {
2243 var d net.Dialer
2244 return d.DialContext(ctx, "unix", path)
2245 }
2246 transport := utilnet.SetTransportDefaults(&http.Transport{
2247 DialContext: dial,
2248 })
2249 return curlTransport(url, transport)
2250 }
2251
2252 func curlTransport(url string, transport *http.Transport) (string, error) {
2253 client := &http.Client{Transport: transport}
2254 resp, err := client.Get(url)
2255 if err != nil {
2256 return "", err
2257 }
2258 defer resp.Body.Close()
2259 body, err := io.ReadAll(resp.Body)
2260 if err != nil {
2261 return "", err
2262 }
2263 return string(body[:]), nil
2264 }
2265
2266 func curl(url string) (string, error) {
2267 return curlTransport(url, utilnet.SetTransportDefaults(&http.Transport{}))
2268 }
2269
2270 func validateGuestbookApp(ctx context.Context, c clientset.Interface, ns string) {
2271 framework.Logf("Waiting for all frontend pods to be Running.")
2272 label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
2273 err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
2274 framework.ExpectNoError(err)
2275 framework.Logf("Waiting for frontend to serve content.")
2276 if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":""}`, guestbookStartupTimeout, ns) {
2277 framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
2278 }
2279
2280 framework.Logf("Trying to add a new entry to the guestbook.")
2281 if !waitForGuestbookResponse(ctx, c, "set", "TestEntry", `{"message":"Updated"}`, guestbookResponseTimeout, ns) {
2282 framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
2283 }
2284
2285 framework.Logf("Verifying that added entry can be retrieved.")
2286 if !waitForGuestbookResponse(ctx, c, "get", "", `{"data":"TestEntry"}`, guestbookResponseTimeout, ns) {
2287 framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
2288 }
2289 }
2290
2291
2292 func waitForGuestbookResponse(ctx context.Context, c clientset.Interface, cmd, arg, expectedResponse string, timeout time.Duration, ns string) bool {
2293 for start := time.Now(); time.Since(start) < timeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
2294 res, err := makeRequestToGuestbook(ctx, c, cmd, arg, ns)
2295 if err == nil && res == expectedResponse {
2296 return true
2297 }
2298 framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
2299 }
2300 return false
2301 }
2302
2303 func makeRequestToGuestbook(ctx context.Context, c clientset.Interface, cmd, value string, ns string) (string, error) {
2304 proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(c, c.CoreV1().RESTClient().Get())
2305 if errProxy != nil {
2306 return "", errProxy
2307 }
2308
2309 ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
2310 defer cancel()
2311
2312 result, err := proxyRequest.Namespace(ns).
2313 Name("frontend").
2314 Suffix("/guestbook").
2315 Param("cmd", cmd).
2316 Param("key", "messages").
2317 Param("value", value).
2318 Do(ctx).
2319 Raw()
2320 return string(result), err
2321 }
2322
2323 type updateDemoData struct {
2324 Image string
2325 }
2326
2327 const applyTestLabel = "kubectl.kubernetes.io/apply-test"
2328
2329 func readReplicationControllerFromString(contents string) *v1.ReplicationController {
2330 rc := v1.ReplicationController{}
2331 if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
2332 framework.Failf(err.Error())
2333 }
2334
2335 return &rc
2336 }
2337
2338 func modifyReplicationControllerConfiguration(contents string) io.Reader {
2339 rc := readReplicationControllerFromString(contents)
2340 rc.Labels[applyTestLabel] = "ADDED"
2341 rc.Spec.Selector[applyTestLabel] = "ADDED"
2342 rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
2343 data, err := json.Marshal(rc)
2344 if err != nil {
2345 framework.Failf("json marshal failed: %s\n", err)
2346 }
2347
2348 return bytes.NewReader(data)
2349 }
2350
2351 func forEachReplicationController(ctx context.Context, c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
2352 var rcs *v1.ReplicationControllerList
2353 var err error
2354 for t := time.Now(); time.Since(t) < framework.PodListTimeout && ctx.Err() == nil; time.Sleep(framework.Poll) {
2355 label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
2356 options := metav1.ListOptions{LabelSelector: label.String()}
2357 rcs, err = c.CoreV1().ReplicationControllers(ns).List(ctx, options)
2358 framework.ExpectNoError(err)
2359 if len(rcs.Items) > 0 {
2360 break
2361 }
2362 }
2363
2364 if rcs == nil || len(rcs.Items) == 0 {
2365 framework.Failf("No replication controllers found")
2366 }
2367
2368 for _, rc := range rcs.Items {
2369 fn(rc)
2370 }
2371 }
2372
2373 func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
2374 if rc.Name == "agnhost-primary" {
2375 if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
2376 framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
2377 }
2378
2379 if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
2380 framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
2381 }
2382 }
2383 }
2384
2385
2386
2387
2388 func getUDData(jpgExpected string, ns string) func(context.Context, clientset.Interface, string) error {
2389
2390
2391 return func(ctx context.Context, c clientset.Interface, podID string) error {
2392 framework.Logf("validating pod %s", podID)
2393
2394 ctx, cancel := context.WithTimeout(ctx, framework.SingleCallTimeout)
2395 defer cancel()
2396
2397 body, err := c.CoreV1().RESTClient().Get().
2398 Namespace(ns).
2399 Resource("pods").
2400 SubResource("proxy").
2401 Name(podID).
2402 Suffix("data.json").
2403 Do(ctx).
2404 Raw()
2405
2406 if err != nil {
2407 if ctx.Err() != nil {
2408 framework.Failf("Failed to retrieve data from container: %v", err)
2409 }
2410 return err
2411 }
2412 framework.Logf("got data: %s", body)
2413 var data updateDemoData
2414 if err := json.Unmarshal(body, &data); err != nil {
2415 return err
2416 }
2417 framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
2418 if strings.Contains(data.Image, jpgExpected) {
2419 return nil
2420 }
2421 return fmt.Errorf("data served up in container is inaccurate, %s didn't contain %s", data, jpgExpected)
2422 }
2423 }
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433 func newBlockingReader(s string) (io.Reader, io.Closer, error) {
2434 r, w, err := os.Pipe()
2435 if err != nil {
2436 return nil, nil, err
2437 }
2438 w.Write([]byte(s))
2439 return r, w, nil
2440 }
2441
2442
2443
2444 func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
2445 ginkgo.By("successfully create CR")
2446 if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
2447 return fmt.Errorf("failed to create CR %s in namespace %s: %w", resource, namespace, err)
2448 }
2449 if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
2450 return fmt.Errorf("failed to delete CR %s: %w", name, err)
2451 }
2452 ginkgo.By("successfully apply CR")
2453 if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
2454 return fmt.Errorf("failed to apply CR %s in namespace %s: %w", resource, namespace, err)
2455 }
2456 if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
2457 return fmt.Errorf("failed to delete CR %s: %w", name, err)
2458 }
2459 return nil
2460 }
2461
2462
2463
2464
2465 func trimDockerRegistry(imagename string) string {
2466 imagename = strings.Replace(imagename, "docker.io/", "", 1)
2467 return strings.Replace(imagename, "library/", "", 1)
2468 }
2469
2470
2471
2472 type validatorFn func(ctx context.Context, c clientset.Interface, podID string) error
2473
2474
2475
2476
2477
2478
2479
2480 func validateController(ctx context.Context, c clientset.Interface, containerImage string, replicas int, containername string, testname string, validator validatorFn, ns string) {
2481 containerImage = trimDockerRegistry(containerImage)
2482 getPodsTemplate := "--template={{range.items}}{{.metadata.name}} {{end}}"
2483
2484 getContainerStateTemplate := fmt.Sprintf(`--template={{if (exists . "status" "containerStatuses")}}{{range .status.containerStatuses}}{{if (and (eq .name "%s") (exists . "state" "running"))}}true{{end}}{{end}}{{end}}`, containername)
2485
2486 getImageTemplate := fmt.Sprintf(`--template={{if (exists . "spec" "containers")}}{{range .spec.containers}}{{if eq .name "%s"}}{{.image}}{{end}}{{end}}{{end}}`, containername)
2487
2488 ginkgo.By(fmt.Sprintf("waiting for all containers in %s pods to come up.", testname))
2489 waitLoop:
2490 for start := time.Now(); time.Since(start) < framework.PodStartTimeout && ctx.Err() == nil; time.Sleep(5 * time.Second) {
2491 getPodsOutput := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", "-o", "template", getPodsTemplate, "-l", testname)
2492 pods := strings.Fields(getPodsOutput)
2493 if numPods := len(pods); numPods != replicas {
2494 ginkgo.By(fmt.Sprintf("Replicas for %s: expected=%d actual=%d", testname, replicas, numPods))
2495 continue
2496 }
2497 var runningPods []string
2498 for _, podID := range pods {
2499 running := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getContainerStateTemplate)
2500 if running != "true" {
2501 framework.Logf("%s is created but not running", podID)
2502 continue waitLoop
2503 }
2504
2505 currentImage := e2ekubectl.RunKubectlOrDie(ns, "get", "pods", podID, "-o", "template", getImageTemplate)
2506 currentImage = trimDockerRegistry(currentImage)
2507 if currentImage != containerImage {
2508 framework.Logf("%s is created but running wrong image; expected: %s, actual: %s", podID, containerImage, currentImage)
2509 continue waitLoop
2510 }
2511
2512
2513
2514 if err := validator(ctx, c, podID); err != nil {
2515 framework.Logf("%s is running right image but validator function failed: %v", podID, err)
2516 continue waitLoop
2517 }
2518
2519 framework.Logf("%s is verified up and running", podID)
2520 runningPods = append(runningPods, podID)
2521 }
2522
2523 if len(runningPods) == replicas {
2524 return
2525 }
2526 }
2527
2528 framework.Failf("Timed out after %v seconds waiting for %s pods to reach valid state", framework.PodStartTimeout.Seconds(), testname)
2529 }
2530
2531
2532
2533
2534 func mustListObjectsInNamespace(ctx context.Context, c clientset.Interface, ns string) []runtime.Object {
2535 var objects []runtime.Object
2536 configMaps, err := c.CoreV1().ConfigMaps(ns).List(ctx, metav1.ListOptions{})
2537 if err != nil {
2538 framework.Failf("error listing configmaps: %v", err)
2539 }
2540 for i := range configMaps.Items {
2541 cm := &configMaps.Items[i]
2542 if cm.Name == "kube-root-ca.crt" {
2543
2544 continue
2545 }
2546 objects = append(objects, cm)
2547 }
2548 return objects
2549 }
2550
2551
2552 func mustGetNames(objects []runtime.Object) []string {
2553 var names []string
2554 for _, obj := range objects {
2555 metaAccessor, err := meta.Accessor(obj)
2556 if err != nil {
2557 framework.Failf("error getting accessor for %T: %v", obj, err)
2558 }
2559 name := metaAccessor.GetName()
2560 names = append(names, name)
2561 }
2562 sort.Strings(names)
2563 return names
2564 }
2565
View as plain text