...

Source file src/k8s.io/kubernetes/test/e2e/storage/testsuites/disruptive.go

Documentation: k8s.io/kubernetes/test/e2e/storage/testsuites

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  
    22  	"github.com/onsi/ginkgo/v2"
    23  	v1 "k8s.io/api/core/v1"
    24  	"k8s.io/apimachinery/pkg/util/errors"
    25  	clientset "k8s.io/client-go/kubernetes"
    26  	"k8s.io/kubernetes/test/e2e/feature"
    27  	"k8s.io/kubernetes/test/e2e/framework"
    28  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    29  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    30  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    31  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    32  	storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
    33  	admissionapi "k8s.io/pod-security-admission/api"
    34  )
    35  
    36  type disruptiveTestSuite struct {
    37  	tsInfo storageframework.TestSuiteInfo
    38  }
    39  
    40  // InitCustomDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
    41  // using custom test patterns
    42  func InitCustomDisruptiveTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    43  	return &disruptiveTestSuite{
    44  		tsInfo: storageframework.TestSuiteInfo{
    45  			Name:         "disruptive",
    46  			TestTags:     []interface{}{framework.WithDisruptive(), framework.WithLabel("LinuxOnly")},
    47  			TestPatterns: patterns,
    48  		},
    49  	}
    50  }
    51  
    52  // InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
    53  // using test suite default patterns
    54  func InitDisruptiveTestSuite() storageframework.TestSuite {
    55  	testPatterns := []storageframework.TestPattern{
    56  		// FSVolMode is already covered in subpath testsuite
    57  		storageframework.DefaultFsInlineVolume,
    58  		storageframework.FsVolModePreprovisionedPV,
    59  		storageframework.FsVolModeDynamicPV,
    60  		storageframework.BlockVolModePreprovisionedPV,
    61  		storageframework.BlockVolModeDynamicPV,
    62  	}
    63  	return InitCustomDisruptiveTestSuite(testPatterns)
    64  }
    65  
    66  func (s *disruptiveTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    67  	return s.tsInfo
    68  }
    69  
    70  func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    71  	skipVolTypePatterns(pattern, driver, storageframework.NewVolTypeMap(storageframework.PreprovisionedPV))
    72  	if pattern.VolMode == v1.PersistentVolumeBlock && !driver.GetDriverInfo().Capabilities[storageframework.CapBlock] {
    73  		e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", driver.GetDriverInfo().Name, pattern.VolMode)
    74  	}
    75  	e2eskipper.SkipUnlessSSHKeyPresent()
    76  }
    77  
    78  func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    79  	type local struct {
    80  		config *storageframework.PerTestConfig
    81  
    82  		cs clientset.Interface
    83  		ns *v1.Namespace
    84  
    85  		// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
    86  		resource *storageframework.VolumeResource
    87  		pod      *v1.Pod
    88  	}
    89  	var l local
    90  
    91  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
    92  	// f must run inside an It or Context callback.
    93  	f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver))
    94  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    95  
    96  	init := func(ctx context.Context, accessModes []v1.PersistentVolumeAccessMode) {
    97  		l = local{}
    98  		l.ns = f.Namespace
    99  		l.cs = f.ClientSet
   100  
   101  		// Now do the more expensive test initialization.
   102  		l.config = driver.PrepareTest(ctx, f)
   103  
   104  		testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
   105  		if accessModes == nil {
   106  			l.resource = storageframework.CreateVolumeResource(
   107  				ctx,
   108  				driver,
   109  				l.config,
   110  				pattern,
   111  				testVolumeSizeRange)
   112  		} else {
   113  			l.resource = storageframework.CreateVolumeResourceWithAccessModes(
   114  				ctx,
   115  				driver,
   116  				l.config,
   117  				pattern,
   118  				testVolumeSizeRange,
   119  				accessModes)
   120  		}
   121  	}
   122  
   123  	cleanup := func(ctx context.Context) {
   124  		var errs []error
   125  		if l.pod != nil {
   126  			ginkgo.By("Deleting pod")
   127  			err := e2epod.DeletePodWithWait(ctx, f.ClientSet, l.pod)
   128  			errs = append(errs, err)
   129  			l.pod = nil
   130  		}
   131  
   132  		if l.resource != nil {
   133  			err := l.resource.CleanupResource(ctx)
   134  			errs = append(errs, err)
   135  			l.resource = nil
   136  		}
   137  
   138  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   139  	}
   140  
   141  	type singlePodTestBody func(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, mountPath string)
   142  	type singlePodTest struct {
   143  		testItStmt   string
   144  		runTestFile  singlePodTestBody
   145  		runTestBlock singlePodTestBody
   146  	}
   147  	singlePodTests := []singlePodTest{
   148  		{
   149  			testItStmt:   "Should test that pv written before kubelet restart is readable after restart.",
   150  			runTestFile:  storageutils.TestKubeletRestartsAndRestoresMount,
   151  			runTestBlock: storageutils.TestKubeletRestartsAndRestoresMap,
   152  		},
   153  		{
   154  			testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down cleans up when the kubelet returns.",
   155  			// File test is covered by subpath testsuite
   156  			runTestBlock: storageutils.TestVolumeUnmapsFromDeletedPod,
   157  		},
   158  		{
   159  			testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down cleans up when the kubelet returns.",
   160  			// File test is covered by subpath testsuite
   161  			runTestBlock: storageutils.TestVolumeUnmapsFromForceDeletedPod,
   162  		},
   163  	}
   164  
   165  	for _, test := range singlePodTests {
   166  		func(t singlePodTest) {
   167  			if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) ||
   168  				(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
   169  				ginkgo.It(t.testItStmt, func(ctx context.Context) {
   170  					init(ctx, nil)
   171  					ginkgo.DeferCleanup(cleanup)
   172  
   173  					var err error
   174  					var pvcs []*v1.PersistentVolumeClaim
   175  					var inlineSources []*v1.VolumeSource
   176  					if pattern.VolType == storageframework.InlineVolume {
   177  						inlineSources = append(inlineSources, l.resource.VolSource)
   178  					} else {
   179  						pvcs = append(pvcs, l.resource.Pvc)
   180  					}
   181  					ginkgo.By("Creating a pod with pvc")
   182  					podConfig := e2epod.Config{
   183  						NS:                  l.ns.Name,
   184  						PVCs:                pvcs,
   185  						InlineVolumeSources: inlineSources,
   186  						SeLinuxLabel:        e2epv.SELinuxLabel,
   187  						NodeSelection:       l.config.ClientNodeSelection,
   188  						ImageID:             e2epod.GetDefaultTestImageID(),
   189  					}
   190  					l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, l.cs, &podConfig, f.Timeouts.PodStart)
   191  					framework.ExpectNoError(err, "While creating pods for kubelet restart test")
   192  
   193  					if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {
   194  						t.runTestBlock(ctx, l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1)
   195  					}
   196  					if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
   197  						t.runTestFile(ctx, l.cs, l.config.Framework, l.pod, e2epod.VolumeMountPath1)
   198  					}
   199  				})
   200  			}
   201  		}(test)
   202  	}
   203  	type multiplePodTestBody func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod)
   204  	type multiplePodTest struct {
   205  		testItStmt            string
   206  		changeSELinuxContexts bool
   207  		runTestFile           multiplePodTestBody
   208  	}
   209  	multiplePodTests := []multiplePodTest{
   210  		{
   211  			testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns",
   212  			runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
   213  				storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1)
   214  			},
   215  		},
   216  		{
   217  			testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns",
   218  			runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
   219  				storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1)
   220  			},
   221  		},
   222  		{
   223  			testItStmt:            "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns",
   224  			changeSELinuxContexts: true,
   225  			runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
   226  				storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, false, false, pod2, e2epod.VolumeMountPath1)
   227  			},
   228  		},
   229  		{
   230  			testItStmt:            "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns",
   231  			changeSELinuxContexts: true,
   232  			runTestFile: func(ctx context.Context, c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
   233  				storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, pod1, true, false, pod2, e2epod.VolumeMountPath1)
   234  			},
   235  		},
   236  	}
   237  
   238  	for _, test := range multiplePodTests {
   239  		func(t multiplePodTest) {
   240  			if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
   241  				f.It(t.testItStmt, feature.SELinux, func(ctx context.Context) {
   242  					init(ctx, []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod})
   243  					ginkgo.DeferCleanup(cleanup)
   244  
   245  					var err error
   246  					var pvcs []*v1.PersistentVolumeClaim
   247  					var inlineSources []*v1.VolumeSource
   248  					if pattern.VolType == storageframework.InlineVolume {
   249  						inlineSources = append(inlineSources, l.resource.VolSource)
   250  					} else {
   251  						pvcs = append(pvcs, l.resource.Pvc)
   252  					}
   253  					ginkgo.By("Creating a pod with pvc")
   254  					podConfig := e2epod.Config{
   255  						NS:                  l.ns.Name,
   256  						PVCs:                pvcs,
   257  						InlineVolumeSources: inlineSources,
   258  						SeLinuxLabel:        e2epv.SELinuxLabel,
   259  						NodeSelection:       l.config.ClientNodeSelection,
   260  						ImageID:             e2epod.GetDefaultTestImageID(),
   261  					}
   262  					l.pod, err = e2epod.CreateSecPodWithNodeSelection(ctx, l.cs, &podConfig, f.Timeouts.PodStart)
   263  					framework.ExpectNoError(err, "While creating pods for kubelet restart test")
   264  					if t.changeSELinuxContexts {
   265  						// Different than e2epv.SELinuxLabel
   266  						podConfig.SeLinuxLabel = &v1.SELinuxOptions{Level: "s0:c98,c99"}
   267  					}
   268  					pod2, err := e2epod.MakeSecPod(&podConfig)
   269  					// Instantly schedule the second pod on the same node as the first one.
   270  					pod2.Spec.NodeName = l.pod.Spec.NodeName
   271  					framework.ExpectNoError(err, "While creating second pod for kubelet restart test")
   272  					if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
   273  						t.runTestFile(ctx, l.cs, l.config.Framework, l.pod, pod2)
   274  					}
   275  				})
   276  			}
   277  		}(test)
   278  	}
   279  }
   280  

View as plain text