...

Source file src/k8s.io/kubernetes/test/e2e/storage/utils/host_exec.go

Documentation: k8s.io/kubernetes/test/e2e/storage/utils

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package utils
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	v1 "k8s.io/api/core/v1"
    24  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    25  	"k8s.io/client-go/util/exec"
    26  	"k8s.io/kubernetes/test/e2e/framework"
    27  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    28  )
    29  
    30  // Result holds the execution result of remote execution command.
    31  type Result struct {
    32  	Host   string
    33  	Cmd    string
    34  	Stdout string
    35  	Stderr string
    36  	Code   int
    37  }
    38  
    39  // LogResult records result log
    40  func LogResult(result Result) {
    41  	remote := result.Host
    42  	framework.Logf("exec %s: command:   %s", remote, result.Cmd)
    43  	framework.Logf("exec %s: stdout:    %q", remote, result.Stdout)
    44  	framework.Logf("exec %s: stderr:    %q", remote, result.Stderr)
    45  	framework.Logf("exec %s: exit code: %d", remote, result.Code)
    46  }
    47  
    48  // HostExec represents interface we require to execute commands on remote host.
    49  type HostExec interface {
    50  	Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error)
    51  	IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error)
    52  	IssueCommand(ctx context.Context, cmd string, node *v1.Node) error
    53  	Cleanup(ctx context.Context)
    54  }
    55  
    56  // hostExecutor implements HostExec
    57  type hostExecutor struct {
    58  	*framework.Framework
    59  	nodeExecPods map[string]*v1.Pod
    60  }
    61  
    62  // NewHostExec returns a HostExec
    63  func NewHostExec(framework *framework.Framework) HostExec {
    64  	return &hostExecutor{
    65  		Framework:    framework,
    66  		nodeExecPods: make(map[string]*v1.Pod),
    67  	}
    68  }
    69  
    70  // launchNodeExecPod launches a hostexec pod for local PV and waits
    71  // until it's Running.
    72  func (h *hostExecutor) launchNodeExecPod(ctx context.Context, node string) *v1.Pod {
    73  	f := h.Framework
    74  	cs := f.ClientSet
    75  	ns := f.Namespace
    76  
    77  	hostExecPod := e2epod.NewExecPodSpec(ns.Name, "", true)
    78  	hostExecPod.GenerateName = fmt.Sprintf("hostexec-%s-", node)
    79  
    80  	if framework.TestContext.NodeE2E {
    81  		// E2E node tests do not run a scheduler, so set the node name directly
    82  		hostExecPod.Spec.NodeName = node
    83  	} else {
    84  		// Use NodeAffinity instead of NodeName so that pods will not
    85  		// be immediately Failed by kubelet if it's out of space. Instead
    86  		// Pods will be pending in the scheduler until there is space freed
    87  		// up.
    88  		e2epod.SetNodeAffinity(&hostExecPod.Spec, node)
    89  
    90  	}
    91  	hostExecPod.Spec.Volumes = []v1.Volume{
    92  		{
    93  			// Required to enter into host mount namespace via nsenter.
    94  			Name: "rootfs",
    95  			VolumeSource: v1.VolumeSource{
    96  				HostPath: &v1.HostPathVolumeSource{
    97  					Path: "/",
    98  				},
    99  			},
   100  		},
   101  	}
   102  	hostExecPod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
   103  		{
   104  			Name:      "rootfs",
   105  			MountPath: "/rootfs",
   106  			ReadOnly:  true,
   107  		},
   108  	}
   109  	hostExecPod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{
   110  		Privileged: func(privileged bool) *bool {
   111  			return &privileged
   112  		}(true),
   113  	}
   114  	pod, err := cs.CoreV1().Pods(ns.Name).Create(ctx, hostExecPod, metav1.CreateOptions{})
   115  	framework.ExpectNoError(err)
   116  	err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
   117  	framework.ExpectNoError(err)
   118  	return pod
   119  }
   120  
   121  // Execute executes the command on the given node. If there is no error
   122  // performing the remote command execution, the stdout, stderr and exit code
   123  // are returned.
   124  // This works like ssh.SSH(...) utility.
   125  func (h *hostExecutor) Execute(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
   126  	result, err := h.exec(ctx, cmd, node)
   127  	if codeExitErr, ok := err.(exec.CodeExitError); ok {
   128  		// extract the exit code of remote command and silence the command
   129  		// non-zero exit code error
   130  		result.Code = codeExitErr.ExitStatus()
   131  		err = nil
   132  	}
   133  	return result, err
   134  }
   135  
   136  func (h *hostExecutor) exec(ctx context.Context, cmd string, node *v1.Node) (Result, error) {
   137  	result := Result{
   138  		Host: node.Name,
   139  		Cmd:  cmd,
   140  	}
   141  	pod, ok := h.nodeExecPods[node.Name]
   142  	if !ok {
   143  		pod = h.launchNodeExecPod(ctx, node.Name)
   144  		if pod == nil {
   145  			return result, fmt.Errorf("failed to create hostexec pod for node %q", node)
   146  		}
   147  		h.nodeExecPods[node.Name] = pod
   148  	}
   149  	args := []string{
   150  		"nsenter",
   151  		"--mount=/rootfs/proc/1/ns/mnt",
   152  		"--",
   153  		"sh",
   154  		"-c",
   155  		cmd,
   156  	}
   157  	containerName := pod.Spec.Containers[0].Name
   158  	var err error
   159  	result.Stdout, result.Stderr, err = e2epod.ExecWithOptions(h.Framework, e2epod.ExecOptions{
   160  		Command:            args,
   161  		Namespace:          pod.Namespace,
   162  		PodName:            pod.Name,
   163  		ContainerName:      containerName,
   164  		Stdin:              nil,
   165  		CaptureStdout:      true,
   166  		CaptureStderr:      true,
   167  		PreserveWhitespace: true,
   168  	})
   169  	return result, err
   170  }
   171  
   172  // IssueCommandWithResult issues command on the given node and returns stdout as
   173  // result. It returns error if there are some issues executing the command or
   174  // the command exits non-zero.
   175  func (h *hostExecutor) IssueCommandWithResult(ctx context.Context, cmd string, node *v1.Node) (string, error) {
   176  	result, err := h.exec(ctx, cmd, node)
   177  	if err != nil {
   178  		LogResult(result)
   179  	}
   180  	return result.Stdout, err
   181  }
   182  
   183  // IssueCommand works like IssueCommandWithResult, but discards result.
   184  func (h *hostExecutor) IssueCommand(ctx context.Context, cmd string, node *v1.Node) error {
   185  	_, err := h.IssueCommandWithResult(ctx, cmd, node)
   186  	return err
   187  }
   188  
   189  // Cleanup cleanup resources it created during test.
   190  // Note that in most cases it is not necessary to call this because we create
   191  // pods under test namespace which will be destroyed in teardown phase.
   192  func (h *hostExecutor) Cleanup(ctx context.Context) {
   193  	for _, pod := range h.nodeExecPods {
   194  		e2epod.DeletePodOrFail(ctx, h.Framework.ClientSet, pod.Namespace, pod.Name)
   195  	}
   196  	h.nodeExecPods = make(map[string]*v1.Pod)
   197  }
   198  

View as plain text