1
16
17 package nfs
18
19 import (
20 "os"
21 "path/filepath"
22 "testing"
23
24 "k8s.io/mount-utils"
25
26 v1 "k8s.io/api/core/v1"
27 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28 "k8s.io/apimachinery/pkg/types"
29 "k8s.io/client-go/kubernetes/fake"
30 utiltesting "k8s.io/client-go/util/testing"
31 "k8s.io/kubernetes/pkg/volume"
32 volumetest "k8s.io/kubernetes/pkg/volume/testing"
33 )
34
35 func TestCanSupport(t *testing.T) {
36 tmpDir, err := utiltesting.MkTmpdir("nfs_test")
37 if err != nil {
38 t.Fatalf("error creating temp dir: %v", err)
39 }
40 defer os.RemoveAll(tmpDir)
41
42 plugMgr := volume.VolumePluginMgr{}
43 plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil , volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
44 plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
45 if err != nil {
46 t.Fatal("Can't find the plugin by name")
47 }
48 if plug.GetPluginName() != "kubernetes.io/nfs" {
49 t.Errorf("Wrong name: %s", plug.GetPluginName())
50 }
51
52 if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{}}}}) {
53 t.Errorf("Expected true")
54 }
55 if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{}}}}}) {
56 t.Errorf("Expected true")
57 }
58 if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
59 t.Errorf("Expected false")
60 }
61 }
62
63 func TestGetAccessModes(t *testing.T) {
64 tmpDir, err := utiltesting.MkTmpdir("nfs_test")
65 if err != nil {
66 t.Fatalf("error creating temp dir: %v", err)
67 }
68 defer os.RemoveAll(tmpDir)
69
70 plugMgr := volume.VolumePluginMgr{}
71 plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil , volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
72
73 plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/nfs")
74 if err != nil {
75 t.Errorf("Can't find the plugin by name")
76 }
77 if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
78 t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
79 }
80 }
81
82 func TestRecycler(t *testing.T) {
83 tmpDir, err := utiltesting.MkTmpdir("nfs_test")
84 if err != nil {
85 t.Fatalf("error creating temp dir: %v", err)
86 }
87 defer os.RemoveAll(tmpDir)
88
89 plugMgr := volume.VolumePluginMgr{}
90 plugMgr.InitPlugins([]volume.VolumePlugin{&nfsPlugin{nil, volume.VolumeConfig{}}}, nil, volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
91
92 spec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{NFS: &v1.NFSVolumeSource{Path: "/foo"}}}}}
93 _, pluginErr := plugMgr.FindRecyclablePluginBySpec(spec)
94 if pluginErr != nil {
95 t.Errorf("Can't find the plugin by name")
96 }
97 }
98
99 func doTestPlugin(t *testing.T, spec *volume.Spec, expectedDevice string) {
100 tmpDir, err := utiltesting.MkTmpdir("nfs_test")
101 if err != nil {
102 t.Fatalf("error creating temp dir: %v", err)
103 }
104 defer os.RemoveAll(tmpDir)
105
106 plugMgr := volume.VolumePluginMgr{}
107 plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil , volumetest.NewFakeVolumeHost(t, tmpDir, nil, nil))
108 plug, err := plugMgr.FindPluginByName("kubernetes.io/nfs")
109 if err != nil {
110 t.Errorf("Can't find the plugin by name")
111 }
112 fake := mount.NewFakeMounter(nil)
113 pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
114 mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake)
115 if err != nil {
116 t.Errorf("Failed to make a new Mounter: %v", err)
117 }
118 if mounter == nil {
119 t.Errorf("Got a nil Mounter")
120 }
121 volumePath := mounter.GetPath()
122 expectedPath := filepath.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~nfs/vol1")
123 if volumePath != expectedPath {
124 t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
125 }
126 if err := mounter.SetUp(volume.MounterArgs{}); err != nil {
127 t.Errorf("Expected success, got: %v", err)
128 }
129 if _, err := os.Stat(volumePath); err != nil {
130 if os.IsNotExist(err) {
131 t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
132 } else {
133 t.Errorf("SetUp() failed: %v", err)
134 }
135 }
136 if mounter.(*nfsMounter).readOnly {
137 t.Errorf("The volume source should not be read-only and it is.")
138 }
139 mntDevs, err := fake.List()
140 if err != nil {
141 t.Errorf("fakeMounter.List() failed: %v", err)
142 }
143 if len(mntDevs) != 1 {
144 t.Errorf("unexpected number of mounted devices. expected: %v, got %v", 1, len(mntDevs))
145 } else {
146 if mntDevs[0].Type != "nfs" {
147 t.Errorf("unexpected type of mounted devices. expected: %v, got %v", "nfs", mntDevs[0].Type)
148 }
149 if mntDevs[0].Device != expectedDevice {
150 t.Errorf("unexpected nfs device, expected %q, got: %q", expectedDevice, mntDevs[0].Device)
151 }
152 }
153 log := fake.GetLog()
154 if len(log) != 1 {
155 t.Errorf("Mount was not called exactly one time. It was called %d times.", len(log))
156 } else {
157 if log[0].Action != mount.FakeActionMount {
158 t.Errorf("Unexpected mounter action: %#v", log[0])
159 }
160 }
161 fake.ResetLog()
162
163 unmounter, err := plug.(*nfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fake)
164 if err != nil {
165 t.Errorf("Failed to make a new Unmounter: %v", err)
166 }
167 if unmounter == nil {
168 t.Errorf("Got a nil Unmounter")
169 }
170 if err := unmounter.TearDown(); err != nil {
171 t.Errorf("Expected success, got: %v", err)
172 }
173 if _, err := os.Stat(volumePath); err == nil {
174 t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
175 } else if !os.IsNotExist(err) {
176 t.Errorf("TearDown() failed: %v", err)
177 }
178 log = fake.GetLog()
179 if len(log) != 1 {
180 t.Errorf("Unmount was not called exactly one time. It was called %d times.", len(log))
181 } else {
182 if log[0].Action != mount.FakeActionUnmount {
183 t.Errorf("Unexpected unmounter action: %#v", log[0])
184 }
185 }
186
187 fake.ResetLog()
188 }
189
190 func TestPluginVolume(t *testing.T) {
191 vol := &v1.Volume{
192 Name: "vol1",
193 VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false}},
194 }
195 doTestPlugin(t, volume.NewSpecFromVolume(vol), "localhost:/somepath")
196 }
197
198 func TestIPV6VolumeSource(t *testing.T) {
199 vol := &v1.Volume{
200 Name: "vol1",
201 VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{Server: "0:0:0:0:0:0:0:1", Path: "/somepath", ReadOnly: false}},
202 }
203 doTestPlugin(t, volume.NewSpecFromVolume(vol), "[0:0:0:0:0:0:0:1]:/somepath")
204 }
205
206 func TestIPV4VolumeSource(t *testing.T) {
207 vol := &v1.Volume{
208 Name: "vol1",
209 VolumeSource: v1.VolumeSource{NFS: &v1.NFSVolumeSource{Server: "127.0.0.1", Path: "/somepath", ReadOnly: false}},
210 }
211 doTestPlugin(t, volume.NewSpecFromVolume(vol), "127.0.0.1:/somepath")
212 }
213
214 func TestPluginPersistentVolume(t *testing.T) {
215 vol := &v1.PersistentVolume{
216 ObjectMeta: metav1.ObjectMeta{
217 Name: "vol1",
218 },
219 Spec: v1.PersistentVolumeSpec{
220 PersistentVolumeSource: v1.PersistentVolumeSource{
221 NFS: &v1.NFSVolumeSource{Server: "localhost", Path: "/somepath", ReadOnly: false},
222 },
223 },
224 }
225
226 doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false), "localhost:/somepath")
227 }
228
229 func TestPersistentClaimReadOnlyFlag(t *testing.T) {
230 tmpDir, err := utiltesting.MkTmpdir("nfs_test")
231 if err != nil {
232 t.Fatalf("error creating temp dir: %v", err)
233 }
234 defer os.RemoveAll(tmpDir)
235
236 pv := &v1.PersistentVolume{
237 ObjectMeta: metav1.ObjectMeta{
238 Name: "pvA",
239 },
240 Spec: v1.PersistentVolumeSpec{
241 PersistentVolumeSource: v1.PersistentVolumeSource{
242 NFS: &v1.NFSVolumeSource{},
243 },
244 ClaimRef: &v1.ObjectReference{
245 Name: "claimA",
246 },
247 },
248 }
249
250 claim := &v1.PersistentVolumeClaim{
251 ObjectMeta: metav1.ObjectMeta{
252 Name: "claimA",
253 Namespace: "nsA",
254 },
255 Spec: v1.PersistentVolumeClaimSpec{
256 VolumeName: "pvA",
257 },
258 Status: v1.PersistentVolumeClaimStatus{
259 Phase: v1.ClaimBound,
260 },
261 }
262
263 client := fake.NewSimpleClientset(pv, claim)
264
265 plugMgr := volume.VolumePluginMgr{}
266 plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), nil , volumetest.NewFakeVolumeHost(t, tmpDir, client, nil))
267 plug, _ := plugMgr.FindPluginByName(nfsPluginName)
268
269
270 spec := volume.NewSpecFromPersistentVolume(pv, true)
271 pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
272 mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
273 if mounter == nil {
274 t.Fatalf("Got a nil Mounter")
275 }
276
277 if !mounter.GetAttributes().ReadOnly {
278 t.Errorf("Expected true for mounter.IsReadOnly")
279 }
280 }
281
View as plain text