1
16
17 package deployment
18
19 import (
20 "fmt"
21 "testing"
22
23 apps "k8s.io/api/apps/v1"
24 v1 "k8s.io/api/core/v1"
25 "k8s.io/apimachinery/pkg/runtime"
26 "k8s.io/apimachinery/pkg/types"
27 "k8s.io/client-go/informers"
28 "k8s.io/client-go/kubernetes/fake"
29 "k8s.io/client-go/tools/record"
30 "k8s.io/klog/v2/ktesting"
31 "k8s.io/kubernetes/pkg/controller"
32 )
33
34 func TestScaleDownOldReplicaSets(t *testing.T) {
35 tests := []struct {
36 oldRSSizes []int32
37 d *apps.Deployment
38 }{
39 {
40 oldRSSizes: []int32{3},
41 d: newDeployment("foo", 3, nil, nil, nil, map[string]string{"foo": "bar"}),
42 },
43 }
44
45 for i := range tests {
46 t.Logf("running scenario %d", i)
47 test := tests[i]
48
49 var oldRSs []*apps.ReplicaSet
50 var expected []runtime.Object
51
52 for n, size := range test.oldRSSizes {
53 rs := newReplicaSet(test.d, fmt.Sprintf("%s-%d", test.d.Name, n), size)
54 oldRSs = append(oldRSs, rs)
55
56 rsCopy := rs.DeepCopy()
57
58 zero := int32(0)
59 rsCopy.Spec.Replicas = &zero
60 expected = append(expected, rsCopy)
61
62 if *(oldRSs[n].Spec.Replicas) == *(expected[n].(*apps.ReplicaSet).Spec.Replicas) {
63 t.Errorf("broken test - original and expected RS have the same size")
64 }
65 }
66
67 kc := fake.NewSimpleClientset(expected...)
68 informers := informers.NewSharedInformerFactory(kc, controller.NoResyncPeriodFunc())
69 _, ctx := ktesting.NewTestContext(t)
70 c, err := NewDeploymentController(ctx, informers.Apps().V1().Deployments(), informers.Apps().V1().ReplicaSets(), informers.Core().V1().Pods(), kc)
71 if err != nil {
72 t.Fatalf("error creating Deployment controller: %v", err)
73 }
74 c.eventRecorder = &record.FakeRecorder{}
75
76 c.scaleDownOldReplicaSetsForRecreate(ctx, oldRSs, test.d)
77 for j := range oldRSs {
78 rs := oldRSs[j]
79
80 if *rs.Spec.Replicas != 0 {
81 t.Errorf("rs %q has non-zero replicas", rs.Name)
82 }
83 }
84 }
85 }
86
87 func TestOldPodsRunning(t *testing.T) {
88 tests := []struct {
89 name string
90
91 newRS *apps.ReplicaSet
92 oldRSs []*apps.ReplicaSet
93 podMap map[types.UID][]*v1.Pod
94
95 hasOldPodsRunning bool
96 }{
97 {
98 name: "no old RSs",
99 hasOldPodsRunning: false,
100 },
101 {
102 name: "old RSs with running pods",
103 oldRSs: []*apps.ReplicaSet{rsWithUID("some-uid"), rsWithUID("other-uid")},
104 podMap: podMapWithUIDs([]string{"some-uid", "other-uid"}),
105 hasOldPodsRunning: true,
106 },
107 {
108 name: "old RSs without pods but with non-zero status replicas",
109 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 1, nil)},
110 hasOldPodsRunning: true,
111 },
112 {
113 name: "old RSs without pods or non-zero status replicas",
114 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
115 hasOldPodsRunning: false,
116 },
117 {
118 name: "old RSs with zero status replicas but pods in terminal state are present",
119 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
120 podMap: map[types.UID][]*v1.Pod{
121 "uid-1": {
122 {
123 Status: v1.PodStatus{
124 Phase: v1.PodFailed,
125 },
126 },
127 {
128 Status: v1.PodStatus{
129 Phase: v1.PodSucceeded,
130 },
131 },
132 },
133 },
134 hasOldPodsRunning: false,
135 },
136 {
137 name: "old RSs with zero status replicas but pod in unknown phase present",
138 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
139 podMap: map[types.UID][]*v1.Pod{
140 "uid-1": {
141 {
142 Status: v1.PodStatus{
143 Phase: v1.PodUnknown,
144 },
145 },
146 },
147 },
148 hasOldPodsRunning: true,
149 },
150 {
151 name: "old RSs with zero status replicas with pending pod present",
152 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
153 podMap: map[types.UID][]*v1.Pod{
154 "uid-1": {
155 {
156 Status: v1.PodStatus{
157 Phase: v1.PodPending,
158 },
159 },
160 },
161 },
162 hasOldPodsRunning: true,
163 },
164 {
165 name: "old RSs with zero status replicas with running pod present",
166 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
167 podMap: map[types.UID][]*v1.Pod{
168 "uid-1": {
169 {
170 Status: v1.PodStatus{
171 Phase: v1.PodRunning,
172 },
173 },
174 },
175 },
176 hasOldPodsRunning: true,
177 },
178 {
179 name: "old RSs with zero status replicas but pods in terminal state and pending are present",
180 oldRSs: []*apps.ReplicaSet{newRSWithStatus("rs-1", 0, 0, nil)},
181 podMap: map[types.UID][]*v1.Pod{
182 "uid-1": {
183 {
184 Status: v1.PodStatus{
185 Phase: v1.PodFailed,
186 },
187 },
188 {
189 Status: v1.PodStatus{
190 Phase: v1.PodSucceeded,
191 },
192 },
193 },
194 "uid-2": {},
195 "uid-3": {
196 {
197 Status: v1.PodStatus{
198 Phase: v1.PodPending,
199 },
200 },
201 },
202 },
203 hasOldPodsRunning: true,
204 },
205 }
206
207 for _, test := range tests {
208 t.Run(test.name, func(t *testing.T) {
209 if expected, got := test.hasOldPodsRunning, oldPodsRunning(test.newRS, test.oldRSs, test.podMap); expected != got {
210 t.Errorf("%s: expected %t, got %t", test.name, expected, got)
211 }
212 })
213 }
214 }
215
216 func rsWithUID(uid string) *apps.ReplicaSet {
217 d := newDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"})
218 rs := newReplicaSet(d, fmt.Sprintf("foo-%s", uid), 0)
219 rs.UID = types.UID(uid)
220 return rs
221 }
222
223 func podMapWithUIDs(uids []string) map[types.UID][]*v1.Pod {
224 podMap := make(map[types.UID][]*v1.Pod)
225 for _, uid := range uids {
226 podMap[types.UID(uid)] = []*v1.Pod{
227 { },
228 { },
229 }
230 }
231 return podMap
232 }
233
View as plain text