...

Source file src/k8s.io/kubernetes/pkg/volume/csi/csi_mounter_test.go

Documentation: k8s.io/kubernetes/pkg/volume/csi

     1  /*
     2  Copyright 2017 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package csi
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"math/rand"
    23  	"os"
    24  	"path/filepath"
    25  	"reflect"
    26  	goruntime "runtime"
    27  	"testing"
    28  	"time"
    29  
    30  	"github.com/google/go-cmp/cmp"
    31  	"github.com/stretchr/testify/assert"
    32  	authenticationv1 "k8s.io/api/authentication/v1"
    33  	corev1 "k8s.io/api/core/v1"
    34  	storage "k8s.io/api/storage/v1"
    35  	meta "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/runtime"
    37  	"k8s.io/apimachinery/pkg/types"
    38  	utilruntime "k8s.io/apimachinery/pkg/util/runtime"
    39  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    40  	fakeclient "k8s.io/client-go/kubernetes/fake"
    41  	clitesting "k8s.io/client-go/testing"
    42  	featuregatetesting "k8s.io/component-base/featuregate/testing"
    43  	pkgauthenticationv1 "k8s.io/kubernetes/pkg/apis/authentication/v1"
    44  	pkgcorev1 "k8s.io/kubernetes/pkg/apis/core/v1"
    45  	pkgstoragev1 "k8s.io/kubernetes/pkg/apis/storage/v1"
    46  	"k8s.io/kubernetes/pkg/features"
    47  	"k8s.io/kubernetes/pkg/volume"
    48  	fakecsi "k8s.io/kubernetes/pkg/volume/csi/fake"
    49  	"k8s.io/kubernetes/pkg/volume/util"
    50  	volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
    51  )
    52  
    53  var (
    54  	testDriver  = "test-driver"
    55  	testVol     = "vol-123"
    56  	testns      = "test-ns"
    57  	testPod     = "test-pod"
    58  	testPodUID  = types.UID("test-pod")
    59  	testAccount = "test-service-account"
    60  )
    61  
    62  func prepareVolumeInfoFile(mountPath string, plug *csiPlugin, specVolumeName, volumeID, driverName, lifecycleMode, seLinuxMountContext string) error {
    63  	nodeName := string(plug.host.GetNodeName())
    64  	volData := map[string]string{
    65  		volDataKey.specVolID:           specVolumeName,
    66  		volDataKey.volHandle:           volumeID,
    67  		volDataKey.driverName:          driverName,
    68  		volDataKey.nodeName:            nodeName,
    69  		volDataKey.attachmentID:        getAttachmentName(volumeID, driverName, nodeName),
    70  		volDataKey.volumeLifecycleMode: lifecycleMode,
    71  		volDataKey.seLinuxMountContext: seLinuxMountContext,
    72  	}
    73  	if err := os.MkdirAll(mountPath, 0755); err != nil {
    74  		return fmt.Errorf("failed to create dir for volume info file: %s", err)
    75  	}
    76  	if err := saveVolumeData(mountPath, volDataFileName, volData); err != nil {
    77  		return fmt.Errorf("failed to save volume info file: %s", err)
    78  	}
    79  	return nil
    80  }
    81  
    82  func TestMounterGetPath(t *testing.T) {
    83  	plug, tmpDir := newTestPlugin(t, nil)
    84  	defer os.RemoveAll(tmpDir)
    85  
    86  	// TODO (vladimirvivien) specName with slashes will not work
    87  	testCases := []struct {
    88  		name           string
    89  		specVolumeName string
    90  		path           string
    91  	}{
    92  		{
    93  			name:           "simple specName",
    94  			specVolumeName: "spec-0",
    95  			path:           filepath.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "spec-0", "/mount")),
    96  		},
    97  		{
    98  			name:           "specName with dots",
    99  			specVolumeName: "test.spec.1",
   100  			path:           filepath.Join(tmpDir, fmt.Sprintf("pods/%s/volumes/kubernetes.io~csi/%s/%s", testPodUID, "test.spec.1", "/mount")),
   101  		},
   102  	}
   103  	for _, tc := range testCases {
   104  		t.Logf("test case: %s", tc.name)
   105  		registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
   106  		pv := makeTestPV(tc.specVolumeName, 10, testDriver, testVol)
   107  		spec := volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly)
   108  		mounter, err := plug.NewMounter(
   109  			spec,
   110  			&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
   111  			volume.VolumeOptions{},
   112  		)
   113  		if err != nil {
   114  			t.Fatalf("Failed to make a new Mounter: %v", err)
   115  		}
   116  		csiMounter := mounter.(*csiMountMgr)
   117  
   118  		mountPath := csiMounter.GetPath()
   119  
   120  		if tc.path != mountPath {
   121  			t.Errorf("expecting path %s, got %s", tc.path, mountPath)
   122  		}
   123  	}
   124  }
   125  
   126  func TestMounterSetUp(t *testing.T) {
   127  	tests := []struct {
   128  		name                     string
   129  		driver                   string
   130  		volumeContext            map[string]string
   131  		seLinuxLabel             string
   132  		enableSELinuxFeatureGate bool
   133  		expectedSELinuxContext   string
   134  		expectedVolumeContext    map[string]string
   135  	}{
   136  		{
   137  			name:                  "no pod info",
   138  			driver:                "no-info",
   139  			volumeContext:         nil,
   140  			expectedVolumeContext: nil,
   141  		},
   142  		{
   143  			name:                  "no CSIDriver -> no pod info",
   144  			driver:                "unknown-driver",
   145  			volumeContext:         nil,
   146  			expectedVolumeContext: nil,
   147  		},
   148  		{
   149  			name:                  "CSIDriver with PodInfoRequiredOnMount=nil -> no pod info",
   150  			driver:                "nil",
   151  			volumeContext:         nil,
   152  			expectedVolumeContext: nil,
   153  		},
   154  		{
   155  			name:                  "no pod info -> keep existing volumeContext",
   156  			driver:                "no-info",
   157  			volumeContext:         map[string]string{"foo": "bar"},
   158  			expectedVolumeContext: map[string]string{"foo": "bar"},
   159  		},
   160  		{
   161  			name:                  "add pod info",
   162  			driver:                "info",
   163  			volumeContext:         nil,
   164  			expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns", "csi.storage.k8s.io/ephemeral": "false"},
   165  		},
   166  		{
   167  			name:                  "add pod info -> keep existing volumeContext",
   168  			driver:                "info",
   169  			volumeContext:         map[string]string{"foo": "bar"},
   170  			expectedVolumeContext: map[string]string{"foo": "bar", "csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns", "csi.storage.k8s.io/ephemeral": "false"},
   171  		},
   172  		{
   173  			name:                  "CSIInlineVolume pod info",
   174  			driver:                "info",
   175  			volumeContext:         nil,
   176  			expectedVolumeContext: map[string]string{"csi.storage.k8s.io/pod.uid": "test-pod", "csi.storage.k8s.io/serviceAccount.name": "test-service-account", "csi.storage.k8s.io/pod.name": "test-pod", "csi.storage.k8s.io/pod.namespace": "test-ns", "csi.storage.k8s.io/ephemeral": "false"},
   177  		},
   178  		{
   179  			name:                     "should include SELinux mount options, if feature-gate is enabled and driver supports it",
   180  			driver:                   "supports_selinux",
   181  			volumeContext:            nil,
   182  			seLinuxLabel:             "s0,c0",
   183  			expectedSELinuxContext:   "context=\"s0,c0\"",
   184  			enableSELinuxFeatureGate: true,
   185  			expectedVolumeContext:    nil,
   186  		},
   187  		{
   188  			name:                     "should not include selinux mount options, if feature gate is enabled but driver does not support it",
   189  			driver:                   "no_selinux",
   190  			seLinuxLabel:             "s0,c0",
   191  			volumeContext:            nil,
   192  			enableSELinuxFeatureGate: true,
   193  			expectedVolumeContext:    nil,
   194  		},
   195  		{
   196  			name:                     "should not include selinux mount option, if feature gate is enabled but CSIDriver does not exist",
   197  			driver:                   "not_found_selinux",
   198  			seLinuxLabel:             "s0,c0",
   199  			volumeContext:            nil,
   200  			enableSELinuxFeatureGate: true,
   201  			expectedVolumeContext:    nil,
   202  		},
   203  		{
   204  			name:                     "should not include selinux mount options, if feature gate is enabled, driver supports it, but Pod does not have it",
   205  			driver:                   "supports_selinux",
   206  			seLinuxLabel:             "",
   207  			expectedSELinuxContext:   "", // especially make sure the volume plugin does not use -o context="", that is an invalid value
   208  			volumeContext:            nil,
   209  			enableSELinuxFeatureGate: true,
   210  			expectedVolumeContext:    nil,
   211  		},
   212  	}
   213  
   214  	noPodMountInfo := false
   215  	currentPodInfoMount := true
   216  	for _, test := range tests {
   217  		t.Run(test.name, func(t *testing.T) {
   218  			defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, test.enableSELinuxFeatureGate)()
   219  
   220  			modes := []storage.VolumeLifecycleMode{
   221  				storage.VolumeLifecyclePersistent,
   222  			}
   223  			fakeClient := fakeclient.NewSimpleClientset(
   224  				getTestCSIDriver("no-info", &noPodMountInfo, nil, modes),
   225  				getTestCSIDriver("info", &currentPodInfoMount, nil, modes),
   226  				getTestCSIDriver("nil", nil, nil, modes),
   227  				getTestCSIDriver("supports_selinux", &noPodMountInfo, nil, modes),
   228  				getTestCSIDriver("no_selinux", &noPodMountInfo, nil, modes),
   229  			)
   230  			plug, tmpDir := newTestPlugin(t, fakeClient)
   231  			defer os.RemoveAll(tmpDir)
   232  
   233  			registerFakePlugin(test.driver, "endpoint", []string{"1.0.0"}, t)
   234  			pv := makeTestPV("test-pv", 10, test.driver, testVol)
   235  			pv.Spec.CSI.VolumeAttributes = test.volumeContext
   236  			pv.Spec.MountOptions = []string{"foo=bar", "baz=qux"}
   237  			pvName := pv.GetName()
   238  
   239  			mounter, err := plug.NewMounter(
   240  				volume.NewSpecFromPersistentVolume(pv, pv.Spec.PersistentVolumeSource.CSI.ReadOnly),
   241  				&corev1.Pod{
   242  					ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
   243  					Spec: corev1.PodSpec{
   244  						ServiceAccountName: testAccount,
   245  					},
   246  				},
   247  				volume.VolumeOptions{},
   248  			)
   249  			if err != nil {
   250  				t.Fatalf("failed to make a new Mounter: %v", err)
   251  			}
   252  
   253  			if mounter == nil {
   254  				t.Fatal("failed to create CSI mounter")
   255  			}
   256  
   257  			csiMounter := mounter.(*csiMountMgr)
   258  			csiMounter.csiClient = setupClient(t, true)
   259  
   260  			attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
   261  
   262  			attachment := &storage.VolumeAttachment{
   263  				ObjectMeta: meta.ObjectMeta{
   264  					Name: attachID,
   265  				},
   266  				Spec: storage.VolumeAttachmentSpec{
   267  					NodeName: "test-node",
   268  					Attacher: CSIPluginName,
   269  					Source: storage.VolumeAttachmentSource{
   270  						PersistentVolumeName: &pvName,
   271  					},
   272  				},
   273  				Status: storage.VolumeAttachmentStatus{
   274  					Attached:    false,
   275  					AttachError: nil,
   276  					DetachError: nil,
   277  				},
   278  			}
   279  			_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, meta.CreateOptions{})
   280  			if err != nil {
   281  				t.Fatalf("failed to setup VolumeAttachment: %v", err)
   282  			}
   283  
   284  			// Mounter.SetUp()
   285  			var mounterArgs volume.MounterArgs
   286  			fsGroup := int64(2000)
   287  			mounterArgs.FsGroup = &fsGroup
   288  
   289  			if test.seLinuxLabel != "" {
   290  				mounterArgs.SELinuxLabel = test.seLinuxLabel
   291  			}
   292  
   293  			expectedMountOptions := pv.Spec.MountOptions
   294  
   295  			if test.expectedSELinuxContext != "" {
   296  				expectedMountOptions = append(expectedMountOptions, test.expectedSELinuxContext)
   297  			}
   298  
   299  			if err := csiMounter.SetUp(mounterArgs); err != nil {
   300  				t.Fatalf("mounter.Setup failed: %v", err)
   301  			}
   302  			//Test the default value of file system type is not overridden
   303  			if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != 0 {
   304  				t.Errorf("default value of file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
   305  			}
   306  
   307  			mountPath := csiMounter.GetPath()
   308  			if _, err := os.Stat(mountPath); err != nil {
   309  				if os.IsNotExist(err) {
   310  					t.Errorf("SetUp() failed, volume path not created: %s", mountPath)
   311  				} else {
   312  					t.Errorf("SetUp() failed: %v", err)
   313  				}
   314  			}
   315  
   316  			// ensure call went all the way
   317  			pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
   318  			vol, ok := pubs[csiMounter.volumeID]
   319  			if !ok {
   320  				t.Error("csi server may not have received NodePublishVolume call")
   321  			}
   322  			if vol.Path != csiMounter.GetPath() {
   323  				t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
   324  			}
   325  			if !reflect.DeepEqual(vol.MountFlags, expectedMountOptions) {
   326  				t.Errorf("csi server expected mount options %v, got %v", expectedMountOptions, vol.MountFlags)
   327  			}
   328  			if !reflect.DeepEqual(vol.VolumeContext, test.expectedVolumeContext) {
   329  				t.Errorf("csi server expected volumeContext %+v, got %+v", test.expectedVolumeContext, vol.VolumeContext)
   330  			}
   331  
   332  			// ensure data file is created
   333  			dataDir := filepath.Dir(mounter.GetPath())
   334  			dataFile := filepath.Join(dataDir, volDataFileName)
   335  			if _, err := os.Stat(dataFile); err != nil {
   336  				if os.IsNotExist(err) {
   337  					t.Errorf("data file not created %s", dataFile)
   338  				} else {
   339  					t.Fatal(err)
   340  				}
   341  			}
   342  			data, err := loadVolumeData(dataDir, volDataFileName)
   343  			if err != nil {
   344  				t.Fatal(err)
   345  			}
   346  			if data[volDataKey.specVolID] != csiMounter.spec.Name() {
   347  				t.Error("volume data file unexpected specVolID:", data[volDataKey.specVolID])
   348  			}
   349  			if data[volDataKey.volHandle] != csiMounter.volumeID {
   350  				t.Error("volume data file unexpected volHandle:", data[volDataKey.volHandle])
   351  			}
   352  			if data[volDataKey.driverName] != string(csiMounter.driverName) {
   353  				t.Error("volume data file unexpected driverName:", data[volDataKey.driverName])
   354  			}
   355  			if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
   356  				t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
   357  			}
   358  			if data[volDataKey.volumeLifecycleMode] != string(csiMounter.volumeLifecycleMode) {
   359  				t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
   360  			}
   361  
   362  		})
   363  	}
   364  }
   365  
   366  func TestMounterSetUpSimple(t *testing.T) {
   367  	fakeClient := fakeclient.NewSimpleClientset()
   368  	plug, tmpDir := newTestPlugin(t, fakeClient)
   369  	transientError := volumetypes.NewTransientOperationFailure("")
   370  	defer os.RemoveAll(tmpDir)
   371  
   372  	testCases := []struct {
   373  		name                 string
   374  		podUID               types.UID
   375  		mode                 storage.VolumeLifecycleMode
   376  		fsType               string
   377  		options              []string
   378  		spec                 func(string, []string) *volume.Spec
   379  		newMounterShouldFail bool
   380  		setupShouldFail      bool
   381  		unsetClient          bool
   382  		exitError            error
   383  	}{
   384  		{
   385  			name:            "setup with ephemeral source",
   386  			podUID:          types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   387  			mode:            storage.VolumeLifecycleEphemeral,
   388  			fsType:          "ext4",
   389  			setupShouldFail: true,
   390  			spec: func(fsType string, options []string) *volume.Spec {
   391  				volSrc := makeTestVol("pv1", testDriver)
   392  				volSrc.CSI.FSType = &fsType
   393  				return volume.NewSpecFromVolume(volSrc)
   394  			},
   395  		},
   396  		{
   397  			name:   "setup with persistent source",
   398  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   399  			mode:   storage.VolumeLifecyclePersistent,
   400  			fsType: "zfs",
   401  			spec: func(fsType string, options []string) *volume.Spec {
   402  				pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
   403  				pvSrc.Spec.CSI.FSType = fsType
   404  				pvSrc.Spec.MountOptions = options
   405  				return volume.NewSpecFromPersistentVolume(pvSrc, false)
   406  			},
   407  		},
   408  		{
   409  			name:   "setup with persistent source without unspecified fstype and options",
   410  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   411  			mode:   storage.VolumeLifecyclePersistent,
   412  			spec: func(fsType string, options []string) *volume.Spec {
   413  				return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
   414  			},
   415  		},
   416  		{
   417  			name:                 "setup with missing spec",
   418  			newMounterShouldFail: true,
   419  			spec:                 func(fsType string, options []string) *volume.Spec { return nil },
   420  		},
   421  		{
   422  			name:   "setup with unknown CSI driver",
   423  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   424  			mode:   storage.VolumeLifecyclePersistent,
   425  			fsType: "zfs",
   426  			spec: func(fsType string, options []string) *volume.Spec {
   427  				pvSrc := makeTestPV("pv1", 20, "unknown-driver", "vol1")
   428  				pvSrc.Spec.CSI.FSType = fsType
   429  				pvSrc.Spec.MountOptions = options
   430  				return volume.NewSpecFromPersistentVolume(pvSrc, false)
   431  			},
   432  			setupShouldFail: true,
   433  			unsetClient:     true,
   434  			exitError:       transientError,
   435  		},
   436  	}
   437  
   438  	for _, tc := range testCases {
   439  		registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
   440  		t.Run(tc.name, func(t *testing.T) {
   441  			mounter, err := plug.NewMounter(
   442  				tc.spec(tc.fsType, tc.options),
   443  				&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
   444  				volume.VolumeOptions{},
   445  			)
   446  			if tc.newMounterShouldFail && err != nil {
   447  				t.Log(err)
   448  				return
   449  			}
   450  			if !tc.newMounterShouldFail && err != nil {
   451  				t.Fatal("unexpected error:", err)
   452  			}
   453  			if mounter == nil {
   454  				t.Fatal("failed to create CSI mounter")
   455  			}
   456  
   457  			csiMounter := mounter.(*csiMountMgr)
   458  			csiMounter.csiClient = setupClient(t, true)
   459  
   460  			if csiMounter.volumeLifecycleMode != tc.mode {
   461  				t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
   462  			}
   463  
   464  			attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
   465  			attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
   466  			_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, meta.CreateOptions{})
   467  			if err != nil {
   468  				t.Fatalf("failed to setup VolumeAttachment: %v", err)
   469  			}
   470  
   471  			if tc.unsetClient {
   472  				// Clear out the clients
   473  				csiMounter.csiClient = nil
   474  				csiMounter.csiClientGetter.csiClient = nil
   475  				t.Log("driver name is ", csiMounter.csiClientGetter.driverName)
   476  			}
   477  
   478  			// Mounter.SetUp()
   479  			err = csiMounter.SetUp(volume.MounterArgs{})
   480  			if tc.setupShouldFail {
   481  				if err != nil {
   482  					if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) {
   483  						t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(tc.exitError), reflect.TypeOf(err), err)
   484  					}
   485  					t.Log(err)
   486  					return
   487  				} else {
   488  					t.Error("test should fail, but no error occurred")
   489  				}
   490  			} else if err != nil {
   491  				t.Fatal("unexpected error:", err)
   492  			}
   493  
   494  			// ensure call went all the way
   495  			pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
   496  			vol, ok := pubs[csiMounter.volumeID]
   497  			if !ok {
   498  				t.Error("csi server may not have received NodePublishVolume call")
   499  			}
   500  			if vol.VolumeHandle != csiMounter.volumeID {
   501  				t.Error("volumeHandle not sent to CSI driver properly")
   502  			}
   503  
   504  			devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
   505  			if err != nil {
   506  				t.Fatal(err)
   507  			}
   508  			if vol.DeviceMountPath != devicePath {
   509  				t.Errorf("DeviceMountPath not sent properly to CSI driver: %s, %s", vol.DeviceMountPath, devicePath)
   510  			}
   511  
   512  			if !reflect.DeepEqual(vol.MountFlags, csiMounter.spec.PersistentVolume.Spec.MountOptions) {
   513  				t.Errorf("unexpected mount flags passed to driver: %+v", vol.MountFlags)
   514  			}
   515  
   516  			if vol.FSType != tc.fsType {
   517  				t.Error("unexpected FSType sent to driver:", vol.FSType)
   518  			}
   519  
   520  			if vol.Path != csiMounter.GetPath() {
   521  				t.Error("csi server may not have received NodePublishVolume call")
   522  			}
   523  
   524  			// ensure data file is created
   525  			dataDir := filepath.Dir(mounter.GetPath())
   526  			dataFile := filepath.Join(dataDir, volDataFileName)
   527  			if _, err := os.Stat(dataFile); err != nil {
   528  				if os.IsNotExist(err) {
   529  					t.Errorf("data file not created %s", dataFile)
   530  				} else {
   531  					t.Fatal(err)
   532  				}
   533  			}
   534  			data, err := loadVolumeData(dataDir, volDataFileName)
   535  			if err != nil {
   536  				t.Fatal(err)
   537  			}
   538  			if data[volDataKey.specVolID] != csiMounter.spec.Name() {
   539  				t.Error("volume data file unexpected specVolID:", data[volDataKey.specVolID])
   540  			}
   541  			if data[volDataKey.volHandle] != csiMounter.volumeID {
   542  				t.Error("volume data file unexpected volHandle:", data[volDataKey.volHandle])
   543  			}
   544  			if data[volDataKey.driverName] != string(csiMounter.driverName) {
   545  				t.Error("volume data file unexpected driverName:", data[volDataKey.driverName])
   546  			}
   547  			if data[volDataKey.nodeName] != string(csiMounter.plugin.host.GetNodeName()) {
   548  				t.Error("volume data file unexpected nodeName:", data[volDataKey.nodeName])
   549  			}
   550  			if data[volDataKey.volumeLifecycleMode] != string(tc.mode) {
   551  				t.Error("volume data file unexpected volumeLifecycleMode:", data[volDataKey.volumeLifecycleMode])
   552  			}
   553  		})
   554  	}
   555  }
   556  
   557  func TestMounterSetupWithStatusTracking(t *testing.T) {
   558  	fakeClient := fakeclient.NewSimpleClientset()
   559  	plug, tmpDir := newTestPlugin(t, fakeClient)
   560  	defer os.RemoveAll(tmpDir)
   561  	nonFinalError := volumetypes.NewUncertainProgressError("non-final-error")
   562  	transientError := volumetypes.NewTransientOperationFailure("transient-error")
   563  
   564  	testCases := []struct {
   565  		name             string
   566  		podUID           types.UID
   567  		spec             func(string, []string) *volume.Spec
   568  		shouldFail       bool
   569  		exitError        error
   570  		createAttachment bool
   571  	}{
   572  		{
   573  			name:   "setup with correct persistent volume source should result in finish exit status",
   574  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   575  			spec: func(fsType string, options []string) *volume.Spec {
   576  				pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
   577  				pvSrc.Spec.CSI.FSType = fsType
   578  				pvSrc.Spec.MountOptions = options
   579  				return volume.NewSpecFromPersistentVolume(pvSrc, false)
   580  			},
   581  			createAttachment: true,
   582  		},
   583  		{
   584  			name:   "setup with missing attachment should result in nochange",
   585  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   586  			spec: func(fsType string, options []string) *volume.Spec {
   587  				return volume.NewSpecFromPersistentVolume(makeTestPV("pv3", 20, testDriver, "vol4"), false)
   588  			},
   589  			exitError:        transientError,
   590  			createAttachment: false,
   591  			shouldFail:       true,
   592  		},
   593  		{
   594  			name:   "setup with timeout errors on NodePublish",
   595  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   596  			spec: func(fsType string, options []string) *volume.Spec {
   597  				return volume.NewSpecFromPersistentVolume(makeTestPV("pv4", 20, testDriver, fakecsi.NodePublishTimeOut_VolumeID), false)
   598  			},
   599  			createAttachment: true,
   600  			exitError:        nonFinalError,
   601  			shouldFail:       true,
   602  		},
   603  		{
   604  			name:   "setup with missing secrets should result in nochange exit",
   605  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   606  			spec: func(fsType string, options []string) *volume.Spec {
   607  				pv := makeTestPV("pv5", 20, testDriver, "vol6")
   608  				pv.Spec.PersistentVolumeSource.CSI.NodePublishSecretRef = &corev1.SecretReference{
   609  					Name:      "foo",
   610  					Namespace: "default",
   611  				}
   612  				return volume.NewSpecFromPersistentVolume(pv, false)
   613  			},
   614  			exitError:        transientError,
   615  			createAttachment: true,
   616  			shouldFail:       true,
   617  		},
   618  	}
   619  
   620  	for _, tc := range testCases {
   621  		registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
   622  		t.Run(tc.name, func(t *testing.T) {
   623  			mounter, err := plug.NewMounter(
   624  				tc.spec("ext4", []string{}),
   625  				&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
   626  				volume.VolumeOptions{},
   627  			)
   628  			if err != nil {
   629  				t.Fatalf("failed to create CSI mounter: %v", err)
   630  			}
   631  
   632  			csiMounter := mounter.(*csiMountMgr)
   633  			csiMounter.csiClient = setupClient(t, true)
   634  
   635  			if csiMounter.volumeLifecycleMode != storage.VolumeLifecyclePersistent {
   636  				t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
   637  			}
   638  
   639  			if tc.createAttachment {
   640  				attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
   641  				attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
   642  				_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, meta.CreateOptions{})
   643  				if err != nil {
   644  					t.Fatalf("failed to setup VolumeAttachment: %v", err)
   645  				}
   646  			}
   647  			err = csiMounter.SetUp(volume.MounterArgs{})
   648  
   649  			if tc.exitError != nil && reflect.TypeOf(tc.exitError) != reflect.TypeOf(err) {
   650  				t.Fatalf("expected exitError: %+v got: %+v", tc.exitError, err)
   651  			}
   652  
   653  			if tc.shouldFail && err == nil {
   654  				t.Fatalf("expected failure but Setup succeeded")
   655  			}
   656  
   657  			if !tc.shouldFail && err != nil {
   658  				t.Fatalf("expected success got mounter.Setup failed with: %v", err)
   659  			}
   660  		})
   661  	}
   662  }
   663  
   664  func TestMounterSetUpWithInline(t *testing.T) {
   665  	testCases := []struct {
   666  		name       string
   667  		podUID     types.UID
   668  		mode       storage.VolumeLifecycleMode
   669  		fsType     string
   670  		options    []string
   671  		spec       func(string, []string) *volume.Spec
   672  		shouldFail bool
   673  	}{
   674  		{
   675  			name:   "setup with vol source",
   676  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   677  			mode:   storage.VolumeLifecycleEphemeral,
   678  			fsType: "ext4",
   679  			spec: func(fsType string, options []string) *volume.Spec {
   680  				volSrc := makeTestVol("pv1", testDriver)
   681  				volSrc.CSI.FSType = &fsType
   682  				return volume.NewSpecFromVolume(volSrc)
   683  			},
   684  		},
   685  		{
   686  			name:   "setup with persistent source",
   687  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   688  			mode:   storage.VolumeLifecyclePersistent,
   689  			fsType: "zfs",
   690  			spec: func(fsType string, options []string) *volume.Spec {
   691  				pvSrc := makeTestPV("pv1", 20, testDriver, "vol1")
   692  				pvSrc.Spec.CSI.FSType = fsType
   693  				pvSrc.Spec.MountOptions = options
   694  				return volume.NewSpecFromPersistentVolume(pvSrc, false)
   695  			},
   696  		},
   697  		{
   698  			name:   "setup with persistent source without unspecified fstype and options",
   699  			podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
   700  			mode:   storage.VolumeLifecyclePersistent,
   701  			spec: func(fsType string, options []string) *volume.Spec {
   702  				return volume.NewSpecFromPersistentVolume(makeTestPV("pv1", 20, testDriver, "vol2"), false)
   703  			},
   704  		},
   705  		{
   706  			name:       "setup with missing spec",
   707  			shouldFail: true,
   708  			spec:       func(fsType string, options []string) *volume.Spec { return nil },
   709  		},
   710  	}
   711  
   712  	for _, tc := range testCases {
   713  		// The fake driver currently supports all modes.
   714  		volumeLifecycleModes := []storage.VolumeLifecycleMode{
   715  			storage.VolumeLifecycleEphemeral,
   716  			storage.VolumeLifecyclePersistent,
   717  		}
   718  		driver := getTestCSIDriver(testDriver, nil, nil, volumeLifecycleModes)
   719  		fakeClient := fakeclient.NewSimpleClientset(driver)
   720  		plug, tmpDir := newTestPlugin(t, fakeClient)
   721  		defer os.RemoveAll(tmpDir)
   722  		registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
   723  		t.Run(tc.name, func(t *testing.T) {
   724  			mounter, err := plug.NewMounter(
   725  				tc.spec(tc.fsType, tc.options),
   726  				&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
   727  				volume.VolumeOptions{},
   728  			)
   729  			if tc.shouldFail && err != nil {
   730  				t.Log(err)
   731  				return
   732  			}
   733  			if !tc.shouldFail && err != nil {
   734  				t.Fatal("unexpected error:", err)
   735  			}
   736  			if mounter == nil {
   737  				t.Fatal("failed to create CSI mounter")
   738  			}
   739  
   740  			csiMounter := mounter.(*csiMountMgr)
   741  			csiMounter.csiClient = setupClient(t, true)
   742  
   743  			if csiMounter.volumeLifecycleMode != tc.mode {
   744  				t.Fatal("unexpected volume mode: ", csiMounter.volumeLifecycleMode)
   745  			}
   746  
   747  			if csiMounter.volumeLifecycleMode == storage.VolumeLifecycleEphemeral && csiMounter.volumeID != makeVolumeHandle(string(tc.podUID), csiMounter.specVolumeID) {
   748  				t.Fatal("unexpected generated volumeHandle:", csiMounter.volumeID)
   749  			}
   750  
   751  			if csiMounter.volumeLifecycleMode == storage.VolumeLifecyclePersistent {
   752  				attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
   753  				attachment := makeTestAttachment(attachID, "test-node", csiMounter.spec.Name())
   754  				_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, meta.CreateOptions{})
   755  				if err != nil {
   756  					t.Fatalf("failed to setup VolumeAttachment: %v", err)
   757  				}
   758  			}
   759  
   760  			// Mounter.SetUp()
   761  			if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
   762  				t.Fatalf("mounter.Setup failed: %v", err)
   763  			}
   764  
   765  			// ensure call went all the way
   766  			pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
   767  			vol, ok := pubs[csiMounter.volumeID]
   768  			if !ok {
   769  				t.Error("csi server may not have received NodePublishVolume call")
   770  			}
   771  			if vol.VolumeHandle != csiMounter.volumeID {
   772  				t.Error("volumeHandle not sent to CSI driver properly")
   773  			}
   774  
   775  			// validate stagingTargetPath
   776  			if tc.mode == storage.VolumeLifecycleEphemeral && vol.DeviceMountPath != "" {
   777  				t.Errorf("unexpected devicePathTarget sent to driver: %s", vol.DeviceMountPath)
   778  			}
   779  			if tc.mode == storage.VolumeLifecyclePersistent {
   780  				devicePath, err := makeDeviceMountPath(plug, csiMounter.spec)
   781  				if err != nil {
   782  					t.Fatal(err)
   783  				}
   784  				if vol.DeviceMountPath != devicePath {
   785  					t.Errorf("DeviceMountPath not sent properly to CSI driver: %s, %s", vol.DeviceMountPath, devicePath)
   786  				}
   787  
   788  				if !reflect.DeepEqual(vol.MountFlags, csiMounter.spec.PersistentVolume.Spec.MountOptions) {
   789  					t.Errorf("unexpected mount flags passed to driver: %+v", vol.MountFlags)
   790  				}
   791  			}
   792  
   793  			if vol.FSType != tc.fsType {
   794  				t.Error("unexpected FSType sent to driver:", vol.FSType)
   795  			}
   796  
   797  			if vol.Path != csiMounter.GetPath() {
   798  				t.Error("csi server may not have received NodePublishVolume call")
   799  			}
   800  		})
   801  	}
   802  }
   803  
   804  func TestMounterSetUpWithFSGroup(t *testing.T) {
   805  	fakeClient := fakeclient.NewSimpleClientset()
   806  	plug, tmpDir := newTestPlugin(t, fakeClient)
   807  	defer os.RemoveAll(tmpDir)
   808  
   809  	testCases := []struct {
   810  		name                           string
   811  		accessModes                    []corev1.PersistentVolumeAccessMode
   812  		readOnly                       bool
   813  		fsType                         string
   814  		setFsGroup                     bool
   815  		fsGroup                        int64
   816  		driverFSGroupPolicy            bool
   817  		supportMode                    storage.FSGroupPolicy
   818  		driverSupportsVolumeMountGroup bool
   819  		expectedFSGroupInNodePublish   string
   820  	}{
   821  		{
   822  			name: "default fstype, with no fsgroup (should not apply fsgroup)",
   823  			accessModes: []corev1.PersistentVolumeAccessMode{
   824  				corev1.ReadWriteOnce,
   825  			},
   826  			readOnly: false,
   827  			fsType:   "",
   828  		},
   829  		{
   830  			name: "default fstype  with fsgroup (should not apply fsgroup)",
   831  			accessModes: []corev1.PersistentVolumeAccessMode{
   832  				corev1.ReadWriteOnce,
   833  			},
   834  			readOnly:   false,
   835  			fsType:     "",
   836  			setFsGroup: true,
   837  			fsGroup:    3000,
   838  		},
   839  		{
   840  			name: "fstype, fsgroup, RWM, ROM provided (should not apply fsgroup)",
   841  			accessModes: []corev1.PersistentVolumeAccessMode{
   842  				corev1.ReadWriteMany,
   843  				corev1.ReadOnlyMany,
   844  			},
   845  			fsType:     "ext4",
   846  			setFsGroup: true,
   847  			fsGroup:    3000,
   848  		},
   849  		{
   850  			name: "fstype, fsgroup, RWO, but readOnly (should not apply fsgroup)",
   851  			accessModes: []corev1.PersistentVolumeAccessMode{
   852  				corev1.ReadWriteOnce,
   853  			},
   854  			readOnly:   true,
   855  			fsType:     "ext4",
   856  			setFsGroup: true,
   857  			fsGroup:    3000,
   858  		},
   859  		{
   860  			name: "fstype, fsgroup, RWO provided (should apply fsgroup)",
   861  			accessModes: []corev1.PersistentVolumeAccessMode{
   862  				corev1.ReadWriteOnce,
   863  			},
   864  			fsType:     "ext4",
   865  			setFsGroup: true,
   866  			fsGroup:    3000,
   867  		},
   868  		{
   869  			name: "fstype, fsgroup, RWO provided, FSGroupPolicy ReadWriteOnceWithFSType (should apply fsgroup)",
   870  			accessModes: []corev1.PersistentVolumeAccessMode{
   871  				corev1.ReadWriteOnce,
   872  			},
   873  			fsType:              "ext4",
   874  			setFsGroup:          true,
   875  			fsGroup:             3000,
   876  			driverFSGroupPolicy: true,
   877  			supportMode:         storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
   878  		},
   879  		{
   880  			name: "default fstype with no fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
   881  			accessModes: []corev1.PersistentVolumeAccessMode{
   882  				corev1.ReadWriteOnce,
   883  			},
   884  			readOnly:            false,
   885  			fsType:              "",
   886  			driverFSGroupPolicy: true,
   887  			supportMode:         storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
   888  		},
   889  		{
   890  			name: "default fstype with fsgroup, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
   891  			accessModes: []corev1.PersistentVolumeAccessMode{
   892  				corev1.ReadWriteOnce,
   893  			},
   894  			readOnly:            false,
   895  			fsType:              "",
   896  			setFsGroup:          true,
   897  			fsGroup:             3000,
   898  			driverFSGroupPolicy: true,
   899  			supportMode:         storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
   900  		},
   901  		{
   902  			name: "fstype, fsgroup, RWO provided, readonly, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
   903  			accessModes: []corev1.PersistentVolumeAccessMode{
   904  				corev1.ReadWriteOnce,
   905  			},
   906  			readOnly:            true,
   907  			fsType:              "ext4",
   908  			setFsGroup:          true,
   909  			fsGroup:             3000,
   910  			driverFSGroupPolicy: true,
   911  			supportMode:         storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
   912  		},
   913  		{
   914  			name: "fstype, fsgroup, RWX provided, FSGroupPolicy ReadWriteOnceWithFSType (should not apply fsgroup)",
   915  			accessModes: []corev1.PersistentVolumeAccessMode{
   916  				corev1.ReadWriteMany,
   917  			},
   918  			readOnly:            false,
   919  			fsType:              "ext4",
   920  			setFsGroup:          true,
   921  			fsGroup:             3000,
   922  			driverFSGroupPolicy: true,
   923  			supportMode:         storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
   924  		},
   925  		{
   926  			name: "fstype, fsgroup, RWO provided, FSGroupPolicy None (should not apply fsgroup)",
   927  			accessModes: []corev1.PersistentVolumeAccessMode{
   928  				corev1.ReadWriteOnce,
   929  			},
   930  			fsType:              "ext4",
   931  			setFsGroup:          true,
   932  			fsGroup:             3000,
   933  			driverFSGroupPolicy: true,
   934  			supportMode:         storage.NoneFSGroupPolicy,
   935  		},
   936  		{
   937  			name: "fstype, fsgroup, RWO provided, readOnly, FSGroupPolicy File (should apply fsgroup)",
   938  			accessModes: []corev1.PersistentVolumeAccessMode{
   939  				corev1.ReadWriteOnce,
   940  			},
   941  			readOnly:            true,
   942  			fsType:              "ext4",
   943  			setFsGroup:          true,
   944  			fsGroup:             3000,
   945  			driverFSGroupPolicy: true,
   946  			supportMode:         storage.FileFSGroupPolicy,
   947  		},
   948  		{
   949  			name:                           "fsgroup provided, driver supports volume mount group; expect fsgroup to be passed to NodePublishVolume",
   950  			fsType:                         "ext4",
   951  			setFsGroup:                     true,
   952  			fsGroup:                        3000,
   953  			driverSupportsVolumeMountGroup: true,
   954  			expectedFSGroupInNodePublish:   "3000",
   955  		},
   956  		{
   957  			name:                           "fsgroup not provided, driver supports volume mount group; expect fsgroup not to be passed to NodePublishVolume",
   958  			fsType:                         "ext4",
   959  			setFsGroup:                     false,
   960  			driverSupportsVolumeMountGroup: true,
   961  			expectedFSGroupInNodePublish:   "",
   962  		},
   963  		{
   964  			name:                           "fsgroup provided, driver does not support volume mount group; expect fsgroup not to be passed to NodePublishVolume",
   965  			fsType:                         "ext4",
   966  			setFsGroup:                     true,
   967  			fsGroup:                        3000,
   968  			driverSupportsVolumeMountGroup: false,
   969  			expectedFSGroupInNodePublish:   "",
   970  		},
   971  	}
   972  
   973  	for i, tc := range testCases {
   974  		t.Logf("Running test %s", tc.name)
   975  
   976  		volName := fmt.Sprintf("test-vol-%d", i)
   977  		registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
   978  		pv := makeTestPV("test-pv", 10, testDriver, volName)
   979  		pv.Spec.AccessModes = tc.accessModes
   980  		pvName := pv.GetName()
   981  
   982  		spec := volume.NewSpecFromPersistentVolume(pv, tc.readOnly)
   983  
   984  		if tc.fsType != "" {
   985  			spec.PersistentVolume.Spec.CSI.FSType = tc.fsType
   986  		}
   987  
   988  		mounter, err := plug.NewMounter(
   989  			spec,
   990  			&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns}},
   991  			volume.VolumeOptions{},
   992  		)
   993  		if err != nil {
   994  			t.Fatalf("Failed to make a new Mounter: %v", err)
   995  		}
   996  
   997  		if mounter == nil {
   998  			t.Fatal("failed to create CSI mounter")
   999  		}
  1000  
  1001  		csiMounter := mounter.(*csiMountMgr)
  1002  		csiMounter.csiClient = setupClientWithVolumeMountGroup(t, true /* stageUnstageSet */, tc.driverSupportsVolumeMountGroup)
  1003  
  1004  		attachID := getAttachmentName(csiMounter.volumeID, string(csiMounter.driverName), string(plug.host.GetNodeName()))
  1005  		attachment := makeTestAttachment(attachID, "test-node", pvName)
  1006  
  1007  		_, err = csiMounter.k8s.StorageV1().VolumeAttachments().Create(context.TODO(), attachment, meta.CreateOptions{})
  1008  		if err != nil {
  1009  			t.Errorf("failed to setup VolumeAttachment: %v", err)
  1010  			continue
  1011  		}
  1012  
  1013  		// Mounter.SetUp()
  1014  		var mounterArgs volume.MounterArgs
  1015  		var fsGroupPtr *int64
  1016  		if tc.setFsGroup {
  1017  			fsGroup := tc.fsGroup
  1018  			fsGroupPtr = &fsGroup
  1019  		}
  1020  		mounterArgs.FsGroup = fsGroupPtr
  1021  		if err := csiMounter.SetUp(mounterArgs); err != nil {
  1022  			t.Fatalf("mounter.Setup failed: %v", err)
  1023  		}
  1024  
  1025  		//Test the default value of file system type is not overridden
  1026  		if len(csiMounter.spec.PersistentVolume.Spec.CSI.FSType) != len(tc.fsType) {
  1027  			t.Errorf("file system type was overridden by type %s", csiMounter.spec.PersistentVolume.Spec.CSI.FSType)
  1028  		}
  1029  
  1030  		// ensure call went all the way
  1031  		pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
  1032  		if pubs[csiMounter.volumeID].Path != csiMounter.GetPath() {
  1033  			t.Error("csi server may not have received NodePublishVolume call")
  1034  		}
  1035  		if pubs[csiMounter.volumeID].VolumeMountGroup != tc.expectedFSGroupInNodePublish {
  1036  			t.Errorf("expected VolumeMountGroup parameter in NodePublishVolumeRequest to be %q, got: %q", tc.expectedFSGroupInNodePublish, pubs[csiMounter.volumeID].VolumeMountGroup)
  1037  		}
  1038  	}
  1039  }
  1040  
  1041  func TestUnmounterTeardown(t *testing.T) {
  1042  	plug, tmpDir := newTestPlugin(t, nil)
  1043  	defer os.RemoveAll(tmpDir)
  1044  	registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
  1045  	pv := makeTestPV("test-pv", 10, testDriver, testVol)
  1046  
  1047  	// save the data file prior to unmount
  1048  	targetDir := getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host)
  1049  	dir := filepath.Join(targetDir, "mount")
  1050  	if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
  1051  		t.Errorf("failed to create dir [%s]: %v", dir, err)
  1052  	}
  1053  
  1054  	// do a fake local mount
  1055  	diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
  1056  	device := "/fake/device"
  1057  	if goruntime.GOOS == "windows" {
  1058  		// We need disk numbers on Windows.
  1059  		device = "1"
  1060  	}
  1061  	if err := diskMounter.FormatAndMount(device, dir, "testfs", nil); err != nil {
  1062  		t.Errorf("failed to mount dir [%s]: %v", dir, err)
  1063  	}
  1064  
  1065  	if err := saveVolumeData(
  1066  		targetDir,
  1067  		volDataFileName,
  1068  		map[string]string{
  1069  			volDataKey.specVolID:  pv.ObjectMeta.Name,
  1070  			volDataKey.driverName: testDriver,
  1071  			volDataKey.volHandle:  testVol,
  1072  		},
  1073  	); err != nil {
  1074  		t.Fatalf("failed to save volume data: %v", err)
  1075  	}
  1076  
  1077  	unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
  1078  	if err != nil {
  1079  		t.Fatalf("failed to make a new Unmounter: %v", err)
  1080  	}
  1081  
  1082  	csiUnmounter := unmounter.(*csiMountMgr)
  1083  	csiUnmounter.csiClient = setupClient(t, true)
  1084  	err = csiUnmounter.TearDownAt(dir)
  1085  	if err != nil {
  1086  		t.Fatal(err)
  1087  	}
  1088  
  1089  	// ensure csi client call
  1090  	pubs := csiUnmounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
  1091  	if _, ok := pubs[csiUnmounter.volumeID]; ok {
  1092  		t.Error("csi server may not have received NodeUnpublishVolume call")
  1093  	}
  1094  
  1095  }
  1096  
  1097  func TestUnmounterTeardownNoClientError(t *testing.T) {
  1098  	transientError := volumetypes.NewTransientOperationFailure("")
  1099  	plug, tmpDir := newTestPlugin(t, nil)
  1100  	defer os.RemoveAll(tmpDir)
  1101  	registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
  1102  	pv := makeTestPV("test-pv", 10, testDriver, testVol)
  1103  
  1104  	// save the data file prior to unmount
  1105  	targetDir := getTargetPath(testPodUID, pv.ObjectMeta.Name, plug.host)
  1106  	dir := filepath.Join(targetDir, "mount")
  1107  	if err := os.MkdirAll(dir, 0755); err != nil && !os.IsNotExist(err) {
  1108  		t.Errorf("failed to create dir [%s]: %v", dir, err)
  1109  	}
  1110  
  1111  	// do a fake local mount
  1112  	diskMounter := util.NewSafeFormatAndMountFromHost(plug.GetPluginName(), plug.host)
  1113  	device := "/fake/device"
  1114  	if goruntime.GOOS == "windows" {
  1115  		// We need disk numbers on Windows.
  1116  		device = "1"
  1117  	}
  1118  	if err := diskMounter.FormatAndMount(device, dir, "testfs", nil); err != nil {
  1119  		t.Errorf("failed to mount dir [%s]: %v", dir, err)
  1120  	}
  1121  
  1122  	if err := saveVolumeData(
  1123  		targetDir,
  1124  		volDataFileName,
  1125  		map[string]string{
  1126  			volDataKey.specVolID:  pv.ObjectMeta.Name,
  1127  			volDataKey.driverName: testDriver,
  1128  			volDataKey.volHandle:  testVol,
  1129  		},
  1130  	); err != nil {
  1131  		t.Fatalf("failed to save volume data: %v", err)
  1132  	}
  1133  
  1134  	unmounter, err := plug.NewUnmounter(pv.ObjectMeta.Name, testPodUID)
  1135  	if err != nil {
  1136  		t.Fatalf("failed to make a new Unmounter: %v", err)
  1137  	}
  1138  
  1139  	csiUnmounter := unmounter.(*csiMountMgr)
  1140  
  1141  	// Clear out the cached client
  1142  	// The lookup to generate a new client will fail when it tries to query a driver with an unknown name
  1143  	csiUnmounter.csiClientGetter.csiClient = nil
  1144  	// Note that registerFakePlugin above will create a driver with a name of "test-driver"
  1145  	csiUnmounter.csiClientGetter.driverName = "unknown-driver"
  1146  
  1147  	err = csiUnmounter.TearDownAt(dir)
  1148  	if err == nil {
  1149  		t.Errorf("test should fail, but no error occurred")
  1150  	} else if reflect.TypeOf(transientError) != reflect.TypeOf(err) {
  1151  		t.Fatalf("expected exitError type: %v got: %v (%v)", reflect.TypeOf(transientError), reflect.TypeOf(err), err)
  1152  	}
  1153  }
  1154  
  1155  func TestIsCorruptedDir(t *testing.T) {
  1156  	existingMountPath, err := os.MkdirTemp(os.TempDir(), "blobfuse-csi-mount-test")
  1157  	if err != nil {
  1158  		t.Fatalf("failed to create tmp dir: %v", err)
  1159  	}
  1160  	defer os.RemoveAll(existingMountPath)
  1161  
  1162  	tests := []struct {
  1163  		desc           string
  1164  		dir            string
  1165  		expectedResult bool
  1166  	}{
  1167  		{
  1168  			desc:           "NotExist dir",
  1169  			dir:            "/tmp/NotExist",
  1170  			expectedResult: false,
  1171  		},
  1172  		{
  1173  			desc:           "Existing dir",
  1174  			dir:            existingMountPath,
  1175  			expectedResult: false,
  1176  		},
  1177  	}
  1178  
  1179  	for i, test := range tests {
  1180  		isCorruptedDir := isCorruptedDir(test.dir)
  1181  		assert.Equal(t, test.expectedResult, isCorruptedDir, "TestCase[%d]: %s", i, test.desc)
  1182  	}
  1183  }
  1184  
  1185  func TestPodServiceAccountTokenAttrs(t *testing.T) {
  1186  	scheme := runtime.NewScheme()
  1187  	utilruntime.Must(pkgauthenticationv1.RegisterDefaults(scheme))
  1188  	utilruntime.Must(pkgstoragev1.RegisterDefaults(scheme))
  1189  	utilruntime.Must(pkgcorev1.RegisterDefaults(scheme))
  1190  
  1191  	gcp := "gcp"
  1192  
  1193  	tests := []struct {
  1194  		desc              string
  1195  		driver            *storage.CSIDriver
  1196  		volumeContext     map[string]string
  1197  		wantVolumeContext map[string]string
  1198  	}{
  1199  		{
  1200  			desc: "csi driver has no ServiceAccountToken",
  1201  			driver: &storage.CSIDriver{
  1202  				ObjectMeta: meta.ObjectMeta{
  1203  					Name: testDriver,
  1204  				},
  1205  				Spec: storage.CSIDriverSpec{},
  1206  			},
  1207  			wantVolumeContext: nil,
  1208  		},
  1209  		{
  1210  			desc: "one token with empty string as audience",
  1211  			driver: &storage.CSIDriver{
  1212  				ObjectMeta: meta.ObjectMeta{
  1213  					Name: testDriver,
  1214  				},
  1215  				Spec: storage.CSIDriverSpec{
  1216  					TokenRequests: []storage.TokenRequest{
  1217  						{
  1218  							Audience: "",
  1219  						},
  1220  					},
  1221  				},
  1222  			},
  1223  			wantVolumeContext: map[string]string{"csi.storage.k8s.io/serviceAccount.tokens": `{"":{"token":"test-ns:test-service-account:3600:[api]","expirationTimestamp":"1970-01-01T00:00:01Z"}}`},
  1224  		},
  1225  		{
  1226  			desc: "one token with non-empty string as audience",
  1227  			driver: &storage.CSIDriver{
  1228  				ObjectMeta: meta.ObjectMeta{
  1229  					Name: testDriver,
  1230  				},
  1231  				Spec: storage.CSIDriverSpec{
  1232  					TokenRequests: []storage.TokenRequest{
  1233  						{
  1234  							Audience: gcp,
  1235  						},
  1236  					},
  1237  				},
  1238  			},
  1239  			wantVolumeContext: map[string]string{"csi.storage.k8s.io/serviceAccount.tokens": `{"gcp":{"token":"test-ns:test-service-account:3600:[gcp]","expirationTimestamp":"1970-01-01T00:00:01Z"}}`},
  1240  		},
  1241  	}
  1242  
  1243  	for _, test := range tests {
  1244  		t.Run(test.desc, func(t *testing.T) {
  1245  			registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
  1246  			client := fakeclient.NewSimpleClientset()
  1247  			if test.driver != nil {
  1248  				test.driver.Spec.VolumeLifecycleModes = []storage.VolumeLifecycleMode{
  1249  					storage.VolumeLifecycleEphemeral,
  1250  					storage.VolumeLifecyclePersistent,
  1251  				}
  1252  				scheme.Default(test.driver)
  1253  				client = fakeclient.NewSimpleClientset(test.driver)
  1254  			}
  1255  			client.PrependReactor("create", "serviceaccounts", clitesting.ReactionFunc(func(action clitesting.Action) (bool, runtime.Object, error) {
  1256  				tr := action.(clitesting.CreateAction).GetObject().(*authenticationv1.TokenRequest)
  1257  				scheme.Default(tr)
  1258  				if len(tr.Spec.Audiences) == 0 {
  1259  					tr.Spec.Audiences = []string{"api"}
  1260  				}
  1261  				tr.Status.Token = fmt.Sprintf("%v:%v:%d:%v", action.GetNamespace(), testAccount, *tr.Spec.ExpirationSeconds, tr.Spec.Audiences)
  1262  				tr.Status.ExpirationTimestamp = meta.NewTime(time.Unix(1, 1))
  1263  				return true, tr, nil
  1264  			}))
  1265  			plug, tmpDir := newTestPlugin(t, client)
  1266  			defer os.RemoveAll(tmpDir)
  1267  			mounter, err := plug.NewMounter(
  1268  				volume.NewSpecFromVolume(makeTestVol("test", testDriver)),
  1269  				&corev1.Pod{
  1270  					ObjectMeta: meta.ObjectMeta{UID: testPodUID, Namespace: testns, Name: testPod},
  1271  					Spec: corev1.PodSpec{
  1272  						ServiceAccountName: testAccount,
  1273  					},
  1274  				},
  1275  				volume.VolumeOptions{},
  1276  			)
  1277  			if err != nil {
  1278  				t.Fatalf("Failed to create a csi mounter, err: %v", err)
  1279  			}
  1280  
  1281  			csiMounter := mounter.(*csiMountMgr)
  1282  			csiMounter.csiClient = setupClient(t, false)
  1283  			if err := csiMounter.SetUp(volume.MounterArgs{}); err != nil {
  1284  				t.Fatalf("mounter.Setup failed: %v", err)
  1285  			}
  1286  
  1287  			pubs := csiMounter.csiClient.(*fakeCsiDriverClient).nodeClient.GetNodePublishedVolumes()
  1288  			vol, ok := pubs[csiMounter.volumeID]
  1289  			if !ok {
  1290  				t.Error("csi server may not have received NodePublishVolume call")
  1291  			}
  1292  			if vol.Path != csiMounter.GetPath() {
  1293  				t.Errorf("csi server expected path %s, got %s", csiMounter.GetPath(), vol.Path)
  1294  			}
  1295  			if diff := cmp.Diff(test.wantVolumeContext, vol.VolumeContext); diff != "" {
  1296  				t.Errorf("podServiceAccountTokenAttrs() = diff (-want +got):\n%s", diff)
  1297  			}
  1298  		})
  1299  	}
  1300  }
  1301  
  1302  func Test_csiMountMgr_supportsFSGroup(t *testing.T) {
  1303  	type fields struct {
  1304  		plugin              *csiPlugin
  1305  		driverName          csiDriverName
  1306  		volumeLifecycleMode storage.VolumeLifecycleMode
  1307  		volumeID            string
  1308  		specVolumeID        string
  1309  		readOnly            bool
  1310  		supportsSELinux     bool
  1311  		spec                *volume.Spec
  1312  		pod                 *corev1.Pod
  1313  		podUID              types.UID
  1314  		publishContext      map[string]string
  1315  		kubeVolHost         volume.KubeletVolumeHost
  1316  		MetricsProvider     volume.MetricsProvider
  1317  	}
  1318  	type args struct {
  1319  		fsType       string
  1320  		fsGroup      *int64
  1321  		driverPolicy storage.FSGroupPolicy
  1322  	}
  1323  	tests := []struct {
  1324  		name   string
  1325  		fields fields
  1326  		args   args
  1327  		want   bool
  1328  	}{
  1329  		{
  1330  			name: "empty all",
  1331  			args: args{},
  1332  			want: false,
  1333  		},
  1334  		{
  1335  			name: "driverPolicy is FileFSGroupPolicy",
  1336  			args: args{
  1337  				fsGroup:      new(int64),
  1338  				driverPolicy: storage.FileFSGroupPolicy,
  1339  			},
  1340  			want: true,
  1341  		},
  1342  		{
  1343  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy",
  1344  			args: args{
  1345  				fsGroup:      new(int64),
  1346  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1347  			},
  1348  			want: false,
  1349  		},
  1350  		{
  1351  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy with empty Spec",
  1352  			args: args{
  1353  				fsGroup:      new(int64),
  1354  				fsType:       "ext4",
  1355  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1356  			},
  1357  			fields: fields{
  1358  				spec: &volume.Spec{},
  1359  			},
  1360  			want: false,
  1361  		},
  1362  		{
  1363  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy with empty PersistentVolume",
  1364  			args: args{
  1365  				fsGroup:      new(int64),
  1366  				fsType:       "ext4",
  1367  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1368  			},
  1369  			fields: fields{
  1370  				spec: volume.NewSpecFromPersistentVolume(&corev1.PersistentVolume{}, true),
  1371  			},
  1372  			want: false,
  1373  		},
  1374  		{
  1375  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy with empty AccessModes",
  1376  			args: args{
  1377  				fsGroup:      new(int64),
  1378  				fsType:       "ext4",
  1379  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1380  			},
  1381  			fields: fields{
  1382  				spec: volume.NewSpecFromPersistentVolume(&corev1.PersistentVolume{
  1383  					Spec: corev1.PersistentVolumeSpec{
  1384  						AccessModes: []corev1.PersistentVolumeAccessMode{},
  1385  					},
  1386  				}, true),
  1387  			},
  1388  			want: false,
  1389  		},
  1390  		{
  1391  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy with ReadWriteOnce AccessModes",
  1392  			args: args{
  1393  				fsGroup:      new(int64),
  1394  				fsType:       "ext4",
  1395  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1396  			},
  1397  			fields: fields{
  1398  				spec: volume.NewSpecFromPersistentVolume(&corev1.PersistentVolume{
  1399  					Spec: corev1.PersistentVolumeSpec{
  1400  						AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
  1401  					},
  1402  				}, true),
  1403  			},
  1404  			want: true,
  1405  		},
  1406  		{
  1407  			name: "driverPolicy is ReadWriteOnceWithFSTypeFSGroupPolicy with CSI inline volume",
  1408  			args: args{
  1409  				fsGroup:      new(int64),
  1410  				fsType:       "ext4",
  1411  				driverPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1412  			},
  1413  			fields: fields{
  1414  				spec: volume.NewSpecFromVolume(&corev1.Volume{
  1415  					VolumeSource: corev1.VolumeSource{
  1416  						CSI: &corev1.CSIVolumeSource{
  1417  							Driver: testDriver,
  1418  						},
  1419  					},
  1420  				}),
  1421  			},
  1422  			want: true,
  1423  		},
  1424  	}
  1425  
  1426  	for _, tt := range tests {
  1427  		t.Run(tt.name, func(t *testing.T) {
  1428  			c := &csiMountMgr{
  1429  				plugin:              tt.fields.plugin,
  1430  				driverName:          tt.fields.driverName,
  1431  				volumeLifecycleMode: tt.fields.volumeLifecycleMode,
  1432  				volumeID:            tt.fields.volumeID,
  1433  				specVolumeID:        tt.fields.specVolumeID,
  1434  				readOnly:            tt.fields.readOnly,
  1435  				needSELinuxRelabel:  tt.fields.supportsSELinux,
  1436  				spec:                tt.fields.spec,
  1437  				pod:                 tt.fields.pod,
  1438  				podUID:              tt.fields.podUID,
  1439  				publishContext:      tt.fields.publishContext,
  1440  				kubeVolHost:         tt.fields.kubeVolHost,
  1441  				MetricsProvider:     tt.fields.MetricsProvider,
  1442  			}
  1443  			if got := c.supportsFSGroup(tt.args.fsType, tt.args.fsGroup, tt.args.driverPolicy); got != tt.want {
  1444  				t.Errorf("supportsFSGroup() = %v, want %v", got, tt.want)
  1445  			}
  1446  		})
  1447  	}
  1448  }
  1449  
  1450  func TestMounterGetFSGroupPolicy(t *testing.T) {
  1451  	defaultPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
  1452  	testCases := []struct {
  1453  		name                  string
  1454  		defined               bool
  1455  		expectedFSGroupPolicy storage.FSGroupPolicy
  1456  	}{
  1457  		{
  1458  			name:                  "no FSGroupPolicy defined, expect default",
  1459  			defined:               false,
  1460  			expectedFSGroupPolicy: storage.ReadWriteOnceWithFSTypeFSGroupPolicy,
  1461  		},
  1462  		{
  1463  			name:                  "File FSGroupPolicy defined, expect File",
  1464  			defined:               true,
  1465  			expectedFSGroupPolicy: storage.FileFSGroupPolicy,
  1466  		},
  1467  		{
  1468  			name:                  "None FSGroupPolicy defined, expected None",
  1469  			defined:               true,
  1470  			expectedFSGroupPolicy: storage.NoneFSGroupPolicy,
  1471  		},
  1472  	}
  1473  	for _, tc := range testCases {
  1474  		t.Logf("testing: %s", tc.name)
  1475  		// Define the driver and set the FSGroupPolicy
  1476  		driver := getTestCSIDriver(testDriver, nil, nil, nil)
  1477  		if tc.defined {
  1478  			driver.Spec.FSGroupPolicy = &tc.expectedFSGroupPolicy
  1479  		} else {
  1480  			driver.Spec.FSGroupPolicy = &defaultPolicy
  1481  		}
  1482  
  1483  		// Create the client and register the resources
  1484  		fakeClient := fakeclient.NewSimpleClientset(driver)
  1485  		plug, tmpDir := newTestPlugin(t, fakeClient)
  1486  		defer os.RemoveAll(tmpDir)
  1487  		registerFakePlugin(testDriver, "endpoint", []string{"1.3.0"}, t)
  1488  
  1489  		mounter, err := plug.NewMounter(
  1490  			volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
  1491  			&corev1.Pod{ObjectMeta: meta.ObjectMeta{UID: "1", Namespace: testns}},
  1492  			volume.VolumeOptions{},
  1493  		)
  1494  		if err != nil {
  1495  			t.Fatalf("Error creating a new mounter: %s", err)
  1496  		}
  1497  
  1498  		csiMounter := mounter.(*csiMountMgr)
  1499  
  1500  		// Check to see if we can obtain the CSIDriver, along with examining its FSGroupPolicy
  1501  		fsGroup, err := csiMounter.getFSGroupPolicy()
  1502  		if err != nil {
  1503  			t.Fatalf("Error attempting to obtain FSGroupPolicy: %v", err)
  1504  		}
  1505  		if fsGroup != *driver.Spec.FSGroupPolicy {
  1506  			t.Fatalf("FSGroupPolicy doesn't match expected value: %v, %v", fsGroup, tc.expectedFSGroupPolicy)
  1507  		}
  1508  	}
  1509  }
  1510  

View as plain text