1
16
17 package garbagecollector
18
19 import (
20 "context"
21 goerrors "errors"
22 "fmt"
23 "k8s.io/controller-manager/pkg/informerfactory"
24 "reflect"
25 "sync"
26 "time"
27
28 "k8s.io/apimachinery/pkg/api/errors"
29 "k8s.io/apimachinery/pkg/api/meta"
30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
31 "k8s.io/apimachinery/pkg/runtime/schema"
32 "k8s.io/apimachinery/pkg/types"
33 utilerrors "k8s.io/apimachinery/pkg/util/errors"
34 utilruntime "k8s.io/apimachinery/pkg/util/runtime"
35 "k8s.io/apimachinery/pkg/util/sets"
36 "k8s.io/apimachinery/pkg/util/wait"
37 "k8s.io/client-go/discovery"
38 clientset "k8s.io/client-go/kubernetes"
39 v1core "k8s.io/client-go/kubernetes/typed/core/v1"
40 "k8s.io/client-go/metadata"
41 "k8s.io/client-go/tools/cache"
42 "k8s.io/client-go/tools/record"
43 "k8s.io/client-go/util/workqueue"
44 "k8s.io/controller-manager/controller"
45 "k8s.io/klog/v2"
46 c "k8s.io/kubernetes/pkg/controller"
47 "k8s.io/kubernetes/pkg/controller/garbagecollector/metrics"
48 )
49
50
51 const ResourceResyncTime time.Duration = 0
52
53
54
55
56
57
58
59
60
61
62
63
64 type GarbageCollector struct {
65 restMapper meta.ResettableRESTMapper
66 metadataClient metadata.Interface
67
68 attemptToDelete workqueue.RateLimitingInterface
69
70 attemptToOrphan workqueue.RateLimitingInterface
71 dependencyGraphBuilder *GraphBuilder
72
73 absentOwnerCache *ReferenceCache
74
75 kubeClient clientset.Interface
76 eventBroadcaster record.EventBroadcaster
77
78 workerLock sync.RWMutex
79 }
80
81 var _ controller.Interface = (*GarbageCollector)(nil)
82 var _ controller.Debuggable = (*GarbageCollector)(nil)
83
84
85 func NewGarbageCollector(
86 ctx context.Context,
87 kubeClient clientset.Interface,
88 metadataClient metadata.Interface,
89 mapper meta.ResettableRESTMapper,
90 ignoredResources map[schema.GroupResource]struct{},
91 sharedInformers informerfactory.InformerFactory,
92 informersStarted <-chan struct{},
93 ) (*GarbageCollector, error) {
94 graphBuilder := NewDependencyGraphBuilder(ctx, metadataClient, mapper, ignoredResources, sharedInformers, informersStarted)
95 return NewComposedGarbageCollector(ctx, kubeClient, metadataClient, mapper, graphBuilder)
96 }
97
98 func NewComposedGarbageCollector(
99 ctx context.Context,
100 kubeClient clientset.Interface,
101 metadataClient metadata.Interface,
102 mapper meta.ResettableRESTMapper,
103 graphBuilder *GraphBuilder,
104 ) (*GarbageCollector, error) {
105 attemptToDelete, attemptToOrphan, absentOwnerCache := graphBuilder.GetGraphResources()
106
107 gc := &GarbageCollector{
108 metadataClient: metadataClient,
109 restMapper: mapper,
110 attemptToDelete: attemptToDelete,
111 attemptToOrphan: attemptToOrphan,
112 absentOwnerCache: absentOwnerCache,
113 kubeClient: kubeClient,
114 eventBroadcaster: graphBuilder.eventBroadcaster,
115 dependencyGraphBuilder: graphBuilder,
116 }
117
118 metrics.Register()
119
120 return gc, nil
121 }
122
123
124
125 func (gc *GarbageCollector) resyncMonitors(logger klog.Logger, deletableResources map[schema.GroupVersionResource]struct{}) error {
126 if err := gc.dependencyGraphBuilder.syncMonitors(logger, deletableResources); err != nil {
127 return err
128 }
129 gc.dependencyGraphBuilder.startMonitors(logger)
130 return nil
131 }
132
133
134 func (gc *GarbageCollector) Run(ctx context.Context, workers int) {
135 defer utilruntime.HandleCrash()
136 defer gc.attemptToDelete.ShutDown()
137 defer gc.attemptToOrphan.ShutDown()
138 defer gc.dependencyGraphBuilder.graphChanges.ShutDown()
139
140
141 gc.eventBroadcaster.StartStructuredLogging(3)
142 gc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: gc.kubeClient.CoreV1().Events("")})
143 defer gc.eventBroadcaster.Shutdown()
144
145 logger := klog.FromContext(ctx)
146 logger.Info("Starting controller", "controller", "garbagecollector")
147 defer logger.Info("Shutting down controller", "controller", "garbagecollector")
148
149 go gc.dependencyGraphBuilder.Run(ctx)
150
151 if !cache.WaitForNamedCacheSync("garbage collector", ctx.Done(), func() bool {
152 return gc.dependencyGraphBuilder.IsSynced(logger)
153 }) {
154 return
155 }
156
157 logger.Info("All resource monitors have synced. Proceeding to collect garbage")
158
159
160 for i := 0; i < workers; i++ {
161 go wait.UntilWithContext(ctx, gc.runAttemptToDeleteWorker, 1*time.Second)
162 go wait.Until(func() { gc.runAttemptToOrphanWorker(logger) }, 1*time.Second, ctx.Done())
163 }
164
165 <-ctx.Done()
166 }
167
168
169
170
171
172
173
174
175 func (gc *GarbageCollector) Sync(ctx context.Context, discoveryClient discovery.ServerResourcesInterface, period time.Duration) {
176 oldResources := make(map[schema.GroupVersionResource]struct{})
177 wait.UntilWithContext(ctx, func(ctx context.Context) {
178 logger := klog.FromContext(ctx)
179
180
181 newResources, err := GetDeletableResources(logger, discoveryClient)
182
183 if len(newResources) == 0 {
184 logger.V(2).Info("no resources reported by discovery, skipping garbage collector sync")
185 metrics.GarbageCollectorResourcesSyncError.Inc()
186 return
187 }
188 if groupLookupFailures, isLookupFailure := discovery.GroupDiscoveryFailedErrorGroups(err); isLookupFailure {
189
190 for k, v := range oldResources {
191 if _, failed := groupLookupFailures[k.GroupVersion()]; failed && gc.dependencyGraphBuilder.IsResourceSynced(k) {
192 newResources[k] = v
193 }
194 }
195 }
196
197
198 if reflect.DeepEqual(oldResources, newResources) {
199 logger.V(5).Info("no resource updates from discovery, skipping garbage collector sync")
200 return
201 }
202
203
204
205 gc.workerLock.Lock()
206 defer gc.workerLock.Unlock()
207
208
209 attempt := 0
210 wait.PollImmediateUntilWithContext(ctx, 100*time.Millisecond, func(ctx context.Context) (bool, error) {
211 attempt++
212
213
214 if attempt > 1 {
215 newResources, err = GetDeletableResources(logger, discoveryClient)
216
217 if len(newResources) == 0 {
218 logger.V(2).Info("no resources reported by discovery", "attempt", attempt)
219 metrics.GarbageCollectorResourcesSyncError.Inc()
220 return false, nil
221 }
222 if groupLookupFailures, isLookupFailure := discovery.GroupDiscoveryFailedErrorGroups(err); isLookupFailure {
223
224 for k, v := range oldResources {
225 if _, failed := groupLookupFailures[k.GroupVersion()]; failed && gc.dependencyGraphBuilder.IsResourceSynced(k) {
226 newResources[k] = v
227 }
228 }
229 }
230 }
231
232 logger.V(2).Info(
233 "syncing garbage collector with updated resources from discovery",
234 "attempt", attempt,
235 "diff", printDiff(oldResources, newResources),
236 )
237
238
239
240
241 gc.restMapper.Reset()
242 logger.V(4).Info("reset restmapper")
243
244
245
246
247
248
249
250
251
252
253 if err := gc.resyncMonitors(logger, newResources); err != nil {
254 utilruntime.HandleError(fmt.Errorf("failed to sync resource monitors (attempt %d): %v", attempt, err))
255 metrics.GarbageCollectorResourcesSyncError.Inc()
256 return false, nil
257 }
258 logger.V(4).Info("resynced monitors")
259
260
261
262
263
264
265 if !cache.WaitForNamedCacheSync("garbage collector", waitForStopOrTimeout(ctx.Done(), period), func() bool {
266 return gc.dependencyGraphBuilder.IsSynced(logger)
267 }) {
268 utilruntime.HandleError(fmt.Errorf("timed out waiting for dependency graph builder sync during GC sync (attempt %d)", attempt))
269 metrics.GarbageCollectorResourcesSyncError.Inc()
270 return false, nil
271 }
272
273
274 return true, nil
275 })
276
277
278
279
280 oldResources = newResources
281 logger.V(2).Info("synced garbage collector")
282 }, period)
283 }
284
285
286 func printDiff(oldResources, newResources map[schema.GroupVersionResource]struct{}) string {
287 removed := sets.NewString()
288 for oldResource := range oldResources {
289 if _, ok := newResources[oldResource]; !ok {
290 removed.Insert(fmt.Sprintf("%+v", oldResource))
291 }
292 }
293 added := sets.NewString()
294 for newResource := range newResources {
295 if _, ok := oldResources[newResource]; !ok {
296 added.Insert(fmt.Sprintf("%+v", newResource))
297 }
298 }
299 return fmt.Sprintf("added: %v, removed: %v", added.List(), removed.List())
300 }
301
302
303 func waitForStopOrTimeout(stopCh <-chan struct{}, timeout time.Duration) <-chan struct{} {
304 stopChWithTimeout := make(chan struct{})
305 go func() {
306 select {
307 case <-stopCh:
308 case <-time.After(timeout):
309 }
310 close(stopChWithTimeout)
311 }()
312 return stopChWithTimeout
313 }
314
315
316 func (gc *GarbageCollector) IsSynced(logger klog.Logger) bool {
317 return gc.dependencyGraphBuilder.IsSynced(logger)
318 }
319
320 func (gc *GarbageCollector) runAttemptToDeleteWorker(ctx context.Context) {
321 for gc.processAttemptToDeleteWorker(ctx) {
322 }
323 }
324
325 var enqueuedVirtualDeleteEventErr = goerrors.New("enqueued virtual delete event")
326
327 var namespacedOwnerOfClusterScopedObjectErr = goerrors.New("cluster-scoped objects cannot refer to namespaced owners")
328
329 func (gc *GarbageCollector) processAttemptToDeleteWorker(ctx context.Context) bool {
330 item, quit := gc.attemptToDelete.Get()
331 gc.workerLock.RLock()
332 defer gc.workerLock.RUnlock()
333 if quit {
334 return false
335 }
336 defer gc.attemptToDelete.Done(item)
337
338 action := gc.attemptToDeleteWorker(ctx, item)
339 switch action {
340 case forgetItem:
341 gc.attemptToDelete.Forget(item)
342 case requeueItem:
343 gc.attemptToDelete.AddRateLimited(item)
344 }
345
346 return true
347 }
348
349 type workQueueItemAction int
350
351 const (
352 requeueItem = iota
353 forgetItem
354 )
355
356 func (gc *GarbageCollector) attemptToDeleteWorker(ctx context.Context, item interface{}) workQueueItemAction {
357 n, ok := item.(*node)
358 if !ok {
359 utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
360 return forgetItem
361 }
362
363 logger := klog.FromContext(ctx)
364
365 if !n.isObserved() {
366 nodeFromGraph, existsInGraph := gc.dependencyGraphBuilder.uidToNode.Read(n.identity.UID)
367 if !existsInGraph {
368
369
370 logger.V(5).Info("item no longer in the graph, skipping attemptToDeleteItem", "item", n.identity)
371 return forgetItem
372 }
373 if nodeFromGraph.isObserved() {
374
375
376 logger.V(5).Info("item no longer virtual in the graph, skipping attemptToDeleteItem on virtual node", "item", n.identity)
377 return forgetItem
378 }
379 }
380
381 err := gc.attemptToDeleteItem(ctx, n)
382 if err == enqueuedVirtualDeleteEventErr {
383
384 return forgetItem
385 } else if err == namespacedOwnerOfClusterScopedObjectErr {
386
387 return forgetItem
388 } else if err != nil {
389 if _, ok := err.(*restMappingError); ok {
390
391
392
393
394
395
396
397 logger.V(5).Error(err, "error syncing item", "item", n.identity)
398 } else {
399 utilruntime.HandleError(fmt.Errorf("error syncing item %s: %v", n, err))
400 }
401
402 return requeueItem
403 } else if !n.isObserved() {
404
405
406
407 logger.V(5).Info("item hasn't been observed via informer yet", "item", n.identity)
408 return requeueItem
409 }
410
411 return forgetItem
412 }
413
414
415
416
417 func (gc *GarbageCollector) isDangling(ctx context.Context, reference metav1.OwnerReference, item *node) (
418 dangling bool, owner *metav1.PartialObjectMetadata, err error) {
419
420 logger := klog.FromContext(ctx)
421
422 absentOwnerCacheKey := objectReference{OwnerReference: ownerReferenceCoordinates(reference)}
423 if gc.absentOwnerCache.Has(absentOwnerCacheKey) {
424 logger.V(5).Info("according to the absentOwnerCache, item's owner does not exist",
425 "item", item.identity,
426 "owner", reference,
427 )
428 return true, nil, nil
429 }
430
431
432 absentOwnerCacheKey.Namespace = item.identity.Namespace
433 if gc.absentOwnerCache.Has(absentOwnerCacheKey) {
434 logger.V(5).Info("according to the absentOwnerCache, item's owner does not exist in namespace",
435 "item", item.identity,
436 "owner", reference,
437 )
438 return true, nil, nil
439 }
440
441
442
443
444
445
446
447 resource, namespaced, err := gc.apiResource(reference.APIVersion, reference.Kind)
448 if err != nil {
449 return false, nil, err
450 }
451 if !namespaced {
452 absentOwnerCacheKey.Namespace = ""
453 }
454
455 if len(item.identity.Namespace) == 0 && namespaced {
456
457
458 logger.V(2).Info("item is cluster-scoped, but refers to a namespaced owner",
459 "item", item.identity,
460 "owner", reference,
461 )
462 return false, nil, namespacedOwnerOfClusterScopedObjectErr
463 }
464
465
466
467
468 owner, err = gc.metadataClient.Resource(resource).Namespace(resourceDefaultNamespace(namespaced, item.identity.Namespace)).Get(ctx, reference.Name, metav1.GetOptions{})
469 switch {
470 case errors.IsNotFound(err):
471 gc.absentOwnerCache.Add(absentOwnerCacheKey)
472 logger.V(5).Info("item's owner is not found",
473 "item", item.identity,
474 "owner", reference,
475 )
476 return true, nil, nil
477 case err != nil:
478 return false, nil, err
479 }
480
481 if owner.GetUID() != reference.UID {
482 logger.V(5).Info("item's owner is not found, UID mismatch",
483 "item", item.identity,
484 "owner", reference,
485 )
486 gc.absentOwnerCache.Add(absentOwnerCacheKey)
487 return true, nil, nil
488 }
489 return false, owner, nil
490 }
491
492
493
494
495
496
497
498 func (gc *GarbageCollector) classifyReferences(ctx context.Context, item *node, latestReferences []metav1.OwnerReference) (
499 solid, dangling, waitingForDependentsDeletion []metav1.OwnerReference, err error) {
500 for _, reference := range latestReferences {
501 isDangling, owner, err := gc.isDangling(ctx, reference, item)
502 if err != nil {
503 return nil, nil, nil, err
504 }
505 if isDangling {
506 dangling = append(dangling, reference)
507 continue
508 }
509
510 ownerAccessor, err := meta.Accessor(owner)
511 if err != nil {
512 return nil, nil, nil, err
513 }
514 if ownerAccessor.GetDeletionTimestamp() != nil && hasDeleteDependentsFinalizer(ownerAccessor) {
515 waitingForDependentsDeletion = append(waitingForDependentsDeletion, reference)
516 } else {
517 solid = append(solid, reference)
518 }
519 }
520 return solid, dangling, waitingForDependentsDeletion, nil
521 }
522
523 func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
524 var ret []types.UID
525 for _, ref := range refs {
526 ret = append(ret, ref.UID)
527 }
528 return ret
529 }
530
531
532
533
534
535
536
537 func (gc *GarbageCollector) attemptToDeleteItem(ctx context.Context, item *node) error {
538 logger := klog.FromContext(ctx)
539
540 logger.V(2).Info("Processing item",
541 "item", item.identity,
542 "virtual", !item.isObserved(),
543 )
544
545
546 if item.isBeingDeleted() && !item.isDeletingDependents() {
547 logger.V(5).Info("processing item returned at once, because its DeletionTimestamp is non-nil",
548 "item", item.identity,
549 )
550 return nil
551 }
552
553
554
555 latest, err := gc.getObject(item.identity)
556 switch {
557 case errors.IsNotFound(err):
558
559
560
561 logger.V(5).Info("item not found, generating a virtual delete event",
562 "item", item.identity,
563 )
564 gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
565 return enqueuedVirtualDeleteEventErr
566 case err != nil:
567 return err
568 }
569
570 if latest.GetUID() != item.identity.UID {
571 logger.V(5).Info("UID doesn't match, item not found, generating a virtual delete event",
572 "item", item.identity,
573 )
574 gc.dependencyGraphBuilder.enqueueVirtualDeleteEvent(item.identity)
575 return enqueuedVirtualDeleteEventErr
576 }
577
578
579
580 if item.isDeletingDependents() {
581 return gc.processDeletingDependentsItem(logger, item)
582 }
583
584
585 ownerReferences := latest.GetOwnerReferences()
586 if len(ownerReferences) == 0 {
587 logger.V(2).Info("item doesn't have an owner, continue on next item",
588 "item", item.identity,
589 )
590 return nil
591 }
592
593 solid, dangling, waitingForDependentsDeletion, err := gc.classifyReferences(ctx, item, ownerReferences)
594 if err != nil {
595 return err
596 }
597 logger.V(5).Info("classify item's references",
598 "item", item.identity,
599 "solid", solid,
600 "dangling", dangling,
601 "waitingForDependentsDeletion", waitingForDependentsDeletion,
602 )
603
604 switch {
605 case len(solid) != 0:
606 logger.V(2).Info("item has at least one existing owner, will not garbage collect",
607 "item", item.identity,
608 "owner", solid,
609 )
610 if len(dangling) == 0 && len(waitingForDependentsDeletion) == 0 {
611 return nil
612 }
613 logger.V(2).Info("remove dangling references and waiting references for item",
614 "item", item.identity,
615 "dangling", dangling,
616 "waitingForDependentsDeletion", waitingForDependentsDeletion,
617 )
618
619
620
621 ownerUIDs := append(ownerRefsToUIDs(dangling), ownerRefsToUIDs(waitingForDependentsDeletion)...)
622 p, err := c.GenerateDeleteOwnerRefStrategicMergeBytes(item.identity.UID, ownerUIDs)
623 if err != nil {
624 return err
625 }
626 _, err = gc.patch(item, p, func(n *node) ([]byte, error) {
627 return gc.deleteOwnerRefJSONMergePatch(n, ownerUIDs...)
628 })
629 return err
630 case len(waitingForDependentsDeletion) != 0 && item.dependentsLength() != 0:
631 deps := item.getDependents()
632 for _, dep := range deps {
633 if dep.isDeletingDependents() {
634
635
636
637
638
639 logger.V(2).Info("processing item, some of its owners and its dependent have FinalizerDeletingDependents, to prevent potential cycle, its ownerReferences are going to be modified to be non-blocking, then the item is going to be deleted with Foreground",
640 "item", item.identity,
641 "dependent", dep.identity,
642 )
643 patch, err := item.unblockOwnerReferencesStrategicMergePatch()
644 if err != nil {
645 return err
646 }
647 if _, err := gc.patch(item, patch, gc.unblockOwnerReferencesJSONMergePatch); err != nil {
648 return err
649 }
650 break
651 }
652 }
653 logger.V(2).Info("at least one owner of item has FinalizerDeletingDependents, and the item itself has dependents, so it is going to be deleted in Foreground",
654 "item", item.identity,
655 )
656
657
658
659
660
661 policy := metav1.DeletePropagationForeground
662 return gc.deleteObject(item.identity, &policy)
663 default:
664
665
666
667 var policy metav1.DeletionPropagation
668 switch {
669 case hasOrphanFinalizer(latest):
670
671 policy = metav1.DeletePropagationOrphan
672 case hasDeleteDependentsFinalizer(latest):
673
674 policy = metav1.DeletePropagationForeground
675 default:
676
677 policy = metav1.DeletePropagationBackground
678 }
679 logger.V(2).Info("Deleting item",
680 "item", item.identity,
681 "propagationPolicy", policy,
682 )
683 return gc.deleteObject(item.identity, &policy)
684 }
685 }
686
687
688 func (gc *GarbageCollector) processDeletingDependentsItem(logger klog.Logger, item *node) error {
689 blockingDependents := item.blockingDependents()
690 if len(blockingDependents) == 0 {
691 logger.V(2).Info("remove DeleteDependents finalizer for item", "item", item.identity)
692 return gc.removeFinalizer(logger, item, metav1.FinalizerDeleteDependents)
693 }
694 for _, dep := range blockingDependents {
695 if !dep.isDeletingDependents() {
696 logger.V(2).Info("adding dependent to attemptToDelete, because its owner is deletingDependents",
697 "item", item.identity,
698 "dependent", dep.identity,
699 )
700 gc.attemptToDelete.Add(dep)
701 }
702 }
703 return nil
704 }
705
706
707 func (gc *GarbageCollector) orphanDependents(logger klog.Logger, owner objectReference, dependents []*node) error {
708 errCh := make(chan error, len(dependents))
709 wg := sync.WaitGroup{}
710 wg.Add(len(dependents))
711 for i := range dependents {
712 go func(dependent *node) {
713 defer wg.Done()
714
715 p, err := c.GenerateDeleteOwnerRefStrategicMergeBytes(dependent.identity.UID, []types.UID{owner.UID})
716 if err != nil {
717 errCh <- fmt.Errorf("orphaning %s failed, %v", dependent.identity, err)
718 return
719 }
720 _, err = gc.patch(dependent, p, func(n *node) ([]byte, error) {
721 return gc.deleteOwnerRefJSONMergePatch(n, owner.UID)
722 })
723
724
725 if err != nil && !errors.IsNotFound(err) {
726 errCh <- fmt.Errorf("orphaning %s failed, %v", dependent.identity, err)
727 }
728 }(dependents[i])
729 }
730 wg.Wait()
731 close(errCh)
732
733 var errorsSlice []error
734 for e := range errCh {
735 errorsSlice = append(errorsSlice, e)
736 }
737
738 if len(errorsSlice) != 0 {
739 return fmt.Errorf("failed to orphan dependents of owner %s, got errors: %s", owner, utilerrors.NewAggregate(errorsSlice).Error())
740 }
741 logger.V(5).Info("successfully updated all dependents", "owner", owner)
742 return nil
743 }
744
745 func (gc *GarbageCollector) runAttemptToOrphanWorker(logger klog.Logger) {
746 for gc.processAttemptToOrphanWorker(logger) {
747 }
748 }
749
750
751
752
753
754
755 func (gc *GarbageCollector) processAttemptToOrphanWorker(logger klog.Logger) bool {
756 item, quit := gc.attemptToOrphan.Get()
757 gc.workerLock.RLock()
758 defer gc.workerLock.RUnlock()
759 if quit {
760 return false
761 }
762 defer gc.attemptToOrphan.Done(item)
763
764 action := gc.attemptToOrphanWorker(logger, item)
765 switch action {
766 case forgetItem:
767 gc.attemptToOrphan.Forget(item)
768 case requeueItem:
769 gc.attemptToOrphan.AddRateLimited(item)
770 }
771
772 return true
773 }
774
775 func (gc *GarbageCollector) attemptToOrphanWorker(logger klog.Logger, item interface{}) workQueueItemAction {
776 owner, ok := item.(*node)
777 if !ok {
778 utilruntime.HandleError(fmt.Errorf("expect *node, got %#v", item))
779 return forgetItem
780 }
781
782 owner.dependentsLock.RLock()
783 dependents := make([]*node, 0, len(owner.dependents))
784 for dependent := range owner.dependents {
785 dependents = append(dependents, dependent)
786 }
787 owner.dependentsLock.RUnlock()
788
789 err := gc.orphanDependents(logger, owner.identity, dependents)
790 if err != nil {
791 utilruntime.HandleError(fmt.Errorf("orphanDependents for %s failed with %v", owner.identity, err))
792 return requeueItem
793 }
794
795 err = gc.removeFinalizer(logger, owner, metav1.FinalizerOrphanDependents)
796 if err != nil {
797 utilruntime.HandleError(fmt.Errorf("removeOrphanFinalizer for %s failed with %v", owner.identity, err))
798 return requeueItem
799 }
800 return forgetItem
801 }
802
803
804
805
806
807 func (gc *GarbageCollector) GraphHasUID(u types.UID) bool {
808 _, ok := gc.dependencyGraphBuilder.uidToNode.Read(u)
809 return ok
810 }
811
812
813
814
815
816
817
818
819
820
821
822 func GetDeletableResources(logger klog.Logger, discoveryClient discovery.ServerResourcesInterface) (map[schema.GroupVersionResource]struct{}, error) {
823 preferredResources, lookupErr := discoveryClient.ServerPreferredResources()
824 if lookupErr != nil {
825 if groupLookupFailures, isLookupFailure := discovery.GroupDiscoveryFailedErrorGroups(lookupErr); isLookupFailure {
826 logger.Info("failed to discover some groups", "groups", groupLookupFailures)
827 } else {
828 logger.Info("failed to discover preferred resources", "error", lookupErr)
829 }
830 }
831 if preferredResources == nil {
832 return map[schema.GroupVersionResource]struct{}{}, lookupErr
833 }
834
835
836
837 deletableResources := discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"delete", "list", "watch"}}, preferredResources)
838 deletableGroupVersionResources := map[schema.GroupVersionResource]struct{}{}
839 for _, rl := range deletableResources {
840 gv, err := schema.ParseGroupVersion(rl.GroupVersion)
841 if err != nil {
842 logger.Info("ignoring invalid discovered resource", "groupversion", rl.GroupVersion, "error", err)
843 continue
844 }
845 for i := range rl.APIResources {
846 deletableGroupVersionResources[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{}
847 }
848 }
849
850 return deletableGroupVersionResources, lookupErr
851 }
852
853 func (gc *GarbageCollector) Name() string {
854 return "garbagecollector"
855 }
856
857
858 func (gc *GarbageCollector) GetDependencyGraphBuilder() *GraphBuilder {
859 return gc.dependencyGraphBuilder
860 }
861
View as plain text