...

Source file src/k8s.io/kubernetes/test/e2e/storage/testsuites/readwriteoncepod.go

Documentation: k8s.io/kubernetes/test/e2e/storage/testsuites

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package testsuites
    18  
    19  import (
    20  	"context"
    21  
    22  	"github.com/onsi/ginkgo/v2"
    23  
    24  	v1 "k8s.io/api/core/v1"
    25  	schedulingv1 "k8s.io/api/scheduling/v1"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/fields"
    28  	errors "k8s.io/apimachinery/pkg/util/errors"
    29  	"k8s.io/apimachinery/pkg/util/uuid"
    30  	clientset "k8s.io/client-go/kubernetes"
    31  	"k8s.io/kubernetes/pkg/kubelet/events"
    32  	"k8s.io/kubernetes/test/e2e/framework"
    33  	e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
    34  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    35  	e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
    36  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    37  	storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
    38  	admissionapi "k8s.io/pod-security-admission/api"
    39  )
    40  
    41  type readWriteOncePodTestSuite struct {
    42  	tsInfo storageframework.TestSuiteInfo
    43  }
    44  
    45  var _ storageframework.TestSuite = &readWriteOncePodTestSuite{}
    46  
    47  type readWriteOncePodTest struct {
    48  	config *storageframework.PerTestConfig
    49  
    50  	cs            clientset.Interface
    51  	volume        *storageframework.VolumeResource
    52  	pods          []*v1.Pod
    53  	priorityClass *schedulingv1.PriorityClass
    54  
    55  	migrationCheck *migrationOpCheck
    56  }
    57  
    58  func InitCustomReadWriteOncePodTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
    59  	return &readWriteOncePodTestSuite{
    60  		tsInfo: storageframework.TestSuiteInfo{
    61  			Name:         "read-write-once-pod",
    62  			TestPatterns: patterns,
    63  			TestTags:     []interface{}{framework.WithLabel("MinimumKubeletVersion:1.27")},
    64  		},
    65  	}
    66  }
    67  
    68  // InitReadWriteOncePodTestSuite returns a test suite for the ReadWriteOncePod PersistentVolume access mode feature.
    69  func InitReadWriteOncePodTestSuite() storageframework.TestSuite {
    70  	// Only covers one test pattern since ReadWriteOncePod enforcement is
    71  	// handled through Kubernetes and does not differ across volume types.
    72  	patterns := []storageframework.TestPattern{storageframework.DefaultFsDynamicPV}
    73  	return InitCustomReadWriteOncePodTestSuite(patterns)
    74  }
    75  
    76  func (t *readWriteOncePodTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
    77  	return t.tsInfo
    78  }
    79  
    80  func (t *readWriteOncePodTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    81  	driverInfo := driver.GetDriverInfo()
    82  	if !driverInfo.Capabilities[storageframework.CapReadWriteOncePod] {
    83  		e2eskipper.Skipf("Driver %q doesn't support ReadWriteOncePod - skipping", driverInfo.Name)
    84  	}
    85  }
    86  
    87  func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
    88  	var (
    89  		driverInfo = driver.GetDriverInfo()
    90  		l          readWriteOncePodTest
    91  	)
    92  
    93  	// Beware that it also registers an AfterEach which renders f unusable. Any code using
    94  	// f must run inside an It or Context callback.
    95  	f := framework.NewFrameworkWithCustomTimeouts("read-write-once-pod", storageframework.GetDriverTimeouts(driver))
    96  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    97  
    98  	init := func(ctx context.Context) {
    99  		l = readWriteOncePodTest{}
   100  		l.config = driver.PrepareTest(ctx, f)
   101  		l.cs = f.ClientSet
   102  		l.pods = []*v1.Pod{}
   103  		l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), driverInfo.InTreePluginName)
   104  	}
   105  
   106  	cleanup := func(ctx context.Context) {
   107  		var errs []error
   108  		for _, pod := range l.pods {
   109  			framework.Logf("Deleting pod %v", pod.Name)
   110  			err := e2epod.DeletePodWithWait(ctx, l.cs, pod)
   111  			errs = append(errs, err)
   112  		}
   113  
   114  		framework.Logf("Deleting volume %s", l.volume.Pvc.GetName())
   115  		err := l.volume.CleanupResource(ctx)
   116  		errs = append(errs, err)
   117  
   118  		if l.priorityClass != nil {
   119  			framework.Logf("Deleting PriorityClass %v", l.priorityClass.Name)
   120  			err := l.cs.SchedulingV1().PriorityClasses().Delete(ctx, l.priorityClass.Name, metav1.DeleteOptions{})
   121  			errs = append(errs, err)
   122  		}
   123  
   124  		framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
   125  		l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
   126  	}
   127  
   128  	ginkgo.BeforeEach(func(ctx context.Context) {
   129  		init(ctx)
   130  		ginkgo.DeferCleanup(cleanup)
   131  	})
   132  
   133  	ginkgo.It("should preempt lower priority pods using ReadWriteOncePod volumes", func(ctx context.Context) {
   134  		// Create the ReadWriteOncePod PVC.
   135  		accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
   136  		l.volume = storageframework.CreateVolumeResourceWithAccessModes(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes)
   137  
   138  		l.priorityClass = &schedulingv1.PriorityClass{
   139  			ObjectMeta: metav1.ObjectMeta{Name: "e2e-test-read-write-once-pod-" + string(uuid.NewUUID())},
   140  			Value:      int32(1000),
   141  		}
   142  		_, err := l.cs.SchedulingV1().PriorityClasses().Create(ctx, l.priorityClass, metav1.CreateOptions{})
   143  		framework.ExpectNoError(err, "failed to create priority class")
   144  
   145  		podConfig := e2epod.Config{
   146  			NS:           f.Namespace.Name,
   147  			PVCs:         []*v1.PersistentVolumeClaim{l.volume.Pvc},
   148  			SeLinuxLabel: e2epv.SELinuxLabel,
   149  		}
   150  
   151  		// Create the first pod, which will take ownership of the ReadWriteOncePod PVC.
   152  		pod1, err := e2epod.MakeSecPod(&podConfig)
   153  		framework.ExpectNoError(err, "failed to create spec for pod1")
   154  		_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{})
   155  		framework.ExpectNoError(err, "failed to create pod1")
   156  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
   157  		framework.ExpectNoError(err, "failed to wait for pod1 running status")
   158  		l.pods = append(l.pods, pod1)
   159  
   160  		// Create the second pod, which will preempt the first pod because it's using the
   161  		// ReadWriteOncePod PVC and has higher priority.
   162  		pod2, err := e2epod.MakeSecPod(&podConfig)
   163  		framework.ExpectNoError(err, "failed to create spec for pod2")
   164  		pod2.Spec.PriorityClassName = l.priorityClass.Name
   165  		_, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(ctx, pod2, metav1.CreateOptions{})
   166  		framework.ExpectNoError(err, "failed to create pod2")
   167  		l.pods = append(l.pods, pod2)
   168  
   169  		// Wait for the first pod to be preempted and the second pod to start.
   170  		err = e2epod.WaitForPodNotFoundInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
   171  		framework.ExpectNoError(err, "failed to wait for pod1 to be preempted")
   172  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart)
   173  		framework.ExpectNoError(err, "failed to wait for pod2 running status")
   174  
   175  		// Recreate the first pod, which will fail to schedule because the second pod
   176  		// is using the ReadWriteOncePod PVC and has higher priority.
   177  		_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{})
   178  		framework.ExpectNoError(err, "failed to create pod1")
   179  		err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace)
   180  		framework.ExpectNoError(err, "failed to wait for pod1 unschedulable status")
   181  
   182  		// Delete the second pod with higher priority and observe the first pod can now start.
   183  		err = e2epod.DeletePodWithWait(ctx, l.cs, pod2)
   184  		framework.ExpectNoError(err, "failed to delete pod2")
   185  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
   186  		framework.ExpectNoError(err, "failed to wait for pod1 running status")
   187  	})
   188  
   189  	ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func(ctx context.Context) {
   190  		// Create the ReadWriteOncePod PVC.
   191  		accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
   192  		l.volume = storageframework.CreateVolumeResourceWithAccessModes(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes)
   193  
   194  		podConfig := e2epod.Config{
   195  			NS:           f.Namespace.Name,
   196  			PVCs:         []*v1.PersistentVolumeClaim{l.volume.Pvc},
   197  			SeLinuxLabel: e2epv.SELinuxLabel,
   198  		}
   199  
   200  		// Create the first pod, which will take ownership of the ReadWriteOncePod PVC.
   201  		pod1, err := e2epod.MakeSecPod(&podConfig)
   202  		framework.ExpectNoError(err, "failed to create spec for pod1")
   203  		_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{})
   204  		framework.ExpectNoError(err, "failed to create pod1")
   205  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
   206  		framework.ExpectNoError(err, "failed to wait for pod1 running status")
   207  		l.pods = append(l.pods, pod1)
   208  
   209  		// Get the node name for the first pod now that it's running.
   210  		pod1, err = l.cs.CoreV1().Pods(pod1.Namespace).Get(ctx, pod1.Name, metav1.GetOptions{})
   211  		framework.ExpectNoError(err, "failed to get pod1")
   212  		nodeName := pod1.Spec.NodeName
   213  
   214  		// Create the second pod on the same node as the first pod.
   215  		pod2, err := e2epod.MakeSecPod(&podConfig)
   216  		framework.ExpectNoError(err, "failed to create spec for pod2")
   217  		// Set the node name to that of the first pod.
   218  		// Node name is set to bypass scheduling, which would enforce the access mode otherwise.
   219  		pod2.Spec.NodeName = nodeName
   220  		_, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(ctx, pod2, metav1.CreateOptions{})
   221  		framework.ExpectNoError(err, "failed to create pod2")
   222  		l.pods = append(l.pods, pod2)
   223  
   224  		// Wait for the FailedMount event to be generated for the second pod.
   225  		eventSelector := fields.Set{
   226  			"involvedObject.kind":      "Pod",
   227  			"involvedObject.name":      pod2.Name,
   228  			"involvedObject.namespace": pod2.Namespace,
   229  			"reason":                   events.FailedMountVolume,
   230  		}.AsSelector().String()
   231  		msg := "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
   232  		err = e2eevents.WaitTimeoutForEvent(ctx, l.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
   233  		framework.ExpectNoError(err, "failed to wait for FailedMount event for pod2")
   234  
   235  		// Wait for the second pod to fail because it is stuck at container creating.
   236  		reason := "ContainerCreating"
   237  		err = e2epod.WaitForPodContainerToFail(ctx, l.cs, pod2.Namespace, pod2.Name, 0, reason, f.Timeouts.PodStart)
   238  		framework.ExpectNoError(err, "failed to wait for pod2 container to fail")
   239  
   240  		// Delete the first pod and observe the second pod can now start.
   241  		err = e2epod.DeletePodWithWait(ctx, l.cs, pod1)
   242  		framework.ExpectNoError(err, "failed to delete pod1")
   243  		err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart)
   244  		framework.ExpectNoError(err, "failed to wait for pod2 running status")
   245  	})
   246  }
   247  

View as plain text