package clusterctl import ( "context" "errors" "fmt" "strings" "sync" "time" containerAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/container/v1beta1" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" k8sRuntime "k8s.io/apimachinery/pkg/runtime" kuberecorder "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/predicate" "edge-infra.dev/pkg/edge/api/totp" gkeClusterApi "edge-infra.dev/pkg/edge/apis/gkecluster/v1alpha1" bs "edge-infra.dev/pkg/edge/bootstrapping" "edge-infra.dev/pkg/edge/constants" clusterConstants "edge-infra.dev/pkg/edge/constants/api/cluster" "edge-infra.dev/pkg/edge/constants/api/fleet" "edge-infra.dev/pkg/edge/k8objectsutils" "edge-infra.dev/pkg/edge/registration" ipranger "edge-infra.dev/pkg/f8n/ipranger/server" "edge-infra.dev/pkg/k8s/konfigkonnector/apis/meta" "edge-infra.dev/pkg/k8s/meta/status" "edge-infra.dev/pkg/k8s/runtime/conditions" "edge-infra.dev/pkg/k8s/runtime/controller/metrics" "edge-infra.dev/pkg/k8s/runtime/controller/reconcile" "edge-infra.dev/pkg/k8s/runtime/controller/reconcile/recerr" "edge-infra.dev/pkg/k8s/runtime/inventory" "edge-infra.dev/pkg/k8s/runtime/patch" "edge-infra.dev/pkg/k8s/runtime/sap" unstructuredutil "edge-infra.dev/pkg/k8s/unstructured" ff "edge-infra.dev/pkg/lib/featureflag" "edge-infra.dev/pkg/lib/gcp/iam" "edge-infra.dev/pkg/lib/uuid" ) const ( ErrCreatingContainerCluster = "failed to create ContainerCluster" ErrGettingContainerCluster = "failed to get ContainerCluster" ErrCreatingContainerNodePool = "failed to create ContainerNodePool" ErrGettingContainerNodePool = "failed to get ContainerNodePool" ErrGKEClusterStatusUpdate = "unable to update GKECluster status" ErrContainerClusterNotReady = "ContainerCluster not yet ready" ErrContainerNodePoolNotReady = "ContainerNodePool not yet ready" ErrEdgeBootstrapAPIFailed = "edge-bootstrap api failed" ErrInvalidGKECluster = "invalid GKECluster spec" ErrCreatingConfigConnector = "failed to create config connector resource" ) var ( GKEClusterLabel = fmt.Sprintf("%s.%s", strings.ToLower(gkeClusterApi.Kind), gkeClusterApi.GKEClusterGVK.Group) ) type EdgeClusterConfig struct { Location string MachineType string NumNode int MinNodes int MaxNodes int Autoscale bool } var gkeClientCache = struct { sync.Mutex c map[string]client.Client // map[clusterEdgeID]Client }{ c: make(map[string]client.Client), } var clusterConfigs = map[string]EdgeClusterConfig{ fleet.Cluster: { Location: "us-east1-c", MachineType: "e2-highmem-4", NumNode: 2, MinNodes: 1, MaxNodes: 3, Autoscale: true, }, fleet.Store: { Location: "us-east1-c", MachineType: "e2-highmem-4", NumNode: 2, MinNodes: 1, MaxNodes: 3, Autoscale: true, }, fleet.CouchDB: { Location: "us-east1-c", MachineType: "n2d-highcpu-8", NumNode: 2, MinNodes: 1, MaxNodes: 3, Autoscale: true, }, fleet.BasicStore: { Location: "us-east1-c", MachineType: "e2-standard-4", NumNode: 2, MinNodes: 1, MaxNodes: 3, Autoscale: true, }, } // gkeClusterConditions is the reconcile summarization configuration for how // various conditions should be taken into account when the final condition is // summarized var gkeClusterConditions = reconcile.Conditions{ Target: status.ReadyCondition, Owned: []string{ status.ReadyCondition, status.ReconcilingCondition, status.StalledCondition, }, Summarize: []string{ status.StalledCondition, }, NegativePolarity: []string{ status.ReconcilingCondition, status.StalledCondition, }, } type ContainerClusterClientFunc func(cluster containerAPI.ContainerCluster, o client.Options) (client.Client, error) // GKEClusterReconciler reconciles a GKECluster object type GKEClusterReconciler struct { client.Client kuberecorder.EventRecorder manager ctrl.Manager Log logr.Logger Metrics metrics.Metrics Scheme *k8sRuntime.Scheme CreateClient ContainerClusterClientFunc EdgeAPI string IPRangerClient *ipranger.Client DefaultRequeue time.Duration WaitForSetTimeout time.Duration TopLevelProject string TopLevelCNRMSA string TotpSecret string ResourceManager *sap.ResourceManager Name string Conditions reconcile.Conditions Concurrency int } func gkeClusterPredicate() predicate.Predicate { return predicate.Funcs{ UpdateFunc: func(_ event.UpdateEvent) bool { return false }, CreateFunc: func(_ event.CreateEvent) bool { return true }, DeleteFunc: func(_ event.DeleteEvent) bool { return false }, } } // SetupWithManager sets up the controller with the Manager. func (r *GKEClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&gkeClusterApi.GKECluster{}). WithOptions(controller.Options{ MaxConcurrentReconciles: r.Concurrency, }). WithEventFilter(gkeClusterPredicate()). Complete(r) } func (r *GKEClusterReconciler) PatchOpts() []patch.Option { return []patch.Option{ patch.WithOwnedConditions{Conditions: r.Conditions.Owned}, patch.WithFieldOwner(r.Name), } } // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. func (r *GKEClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, recErr error) { var ( reconcileStart = time.Now() log = ctrl.LoggerFrom(ctx) result = reconcile.ResultEmpty gkeCluster = &gkeClusterApi.GKECluster{} ) if err := r.Client.Get(ctx, req.NamespacedName, gkeCluster); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } r.Metrics.RecordReconciling(ctx, gkeCluster) updateReconcileMetadata(ctx, gkeCluster, reconcileStart) oldStatus := gkeCluster.Status.DeepCopy() patcher := patch.NewSerialPatcher(gkeCluster, r.Client) defer func() { if recErrToCondition, ok := recErr.(recerr.Error); ok { recErrToCondition.ToCondition(gkeCluster, status.ReadyCondition) } summarizer := reconcile.NewSummarizer(patcher) res, recErr = summarizer.SummarizeAndPatch(ctx, gkeCluster, []reconcile.SummarizeOption{ reconcile.WithConditions(r.Conditions), reconcile.WithError(recErr), reconcile.WithResult(result), reconcile.WithIgnoreNotFound(), reconcile.WithProcessors( reconcile.RecordReconcileReq, reconcile.RecordResult, ), reconcile.WithFieldOwner(r.Name), reconcile.WithEventRecorder(r.EventRecorder), }...) r.Metrics.RecordDuration(ctx, gkeCluster, reconcileStart) r.Metrics.RecordReadiness(ctx, gkeCluster) deleteResourceEntry(gkeCluster) }() log = log.WithValues("name", gkeCluster.Spec.Name, "spec", gkeCluster.Spec, "gke reconciler concurrency", r.Concurrency) ctx = logr.NewContext(ctx, log) log.Info("reconciling started for GKECluster") // Check if GKECluster spec is valid if err := gkeCluster.Spec.Valid(); err != nil { log.Error(err, ErrInvalidGKECluster) recErr = recerr.NewStalled(fmt.Errorf("invalid spec: %w", err), gkeClusterApi.InvalidSpecReason) return } if err := reconcile.Progressing(ctx, gkeCluster, patcher, r.PatchOpts()...); err != nil { recErr = recerr.New(err, gkeClusterApi.ReconcileFailedReason) return } var unstructuredObjs []*unstructured.Unstructured uobj, recErr := r.createContainerCluster(ctx, gkeCluster) if recErr != nil { return } unstructuredObjs = append(unstructuredObjs, uobj) uobj, recErr = r.createContainerNodePool(ctx, gkeCluster) if recErr != nil { return } unstructuredObjs = append(unstructuredObjs, uobj) r.ResourceManager.SetOwnerLabels(unstructuredObjs, r.Name, "") changeSet, err := r.ResourceManager.ApplyAll(ctx, unstructuredObjs, sap.ApplyOptions{ Force: false, WaitTimeout: r.WaitForSetTimeout, }) if err != nil { recErr = recerr.New(fmt.Errorf("failed to apply resources: %w", err), gkeClusterApi.ApplyFailedReason) return } containerCluster, recErr := r.getContainerClusterWhenReady(ctx, gkeCluster) if recErr != nil { return } if containerCluster == nil { recErr = recerr.NewWait(errors.New(ErrContainerClusterNotReady), gkeClusterApi.ContainerClusterNotReadyReason, r.DefaultRequeue) return } clusterClient, err := r.getGKEClient(ctx, gkeCluster.Name, containerCluster) if err != nil { recErr = recerr.New(fmt.Errorf("failed to create client for container cluster: %w", err), gkeClusterApi.EdgeBootstrapFailedReason) return } totpToken, err := totp.GenerateTotp(r.TotpSecret) if err != nil { log.Error(err, "unable to create totp token from cluster id") recErr = recerr.New(fmt.Errorf("unable to create totp token from cluster id: %w", err), gkeClusterApi.EdgeBootstrapFailedReason) return } err = r.bootstrapCluster(ctx, clusterClient, gkeCluster, totpToken.Code) if err != nil { recErr = recerr.New(fmt.Errorf("failed to bootstrap cluster: %w", err), gkeClusterApi.EdgeBootstrapFailedReason) return } if err = bs.CleanUpKustomizations(ctx, gkeCluster.Name, constants.EdgeBucketName, "latest", log, clusterClient); err != nil { recErr = recerr.New(fmt.Errorf("failed to apply kustomizations: %w", err), gkeClusterApi.EdgeBootstrapFailedReason) return } if gkeCluster.Spec.Fleet == fleet.Cluster { //nolint:nestif hash := uuid.FromUUID(gkeCluster.ObjectMeta.Name).Hash() clusterCtlSAName := fmt.Sprintf("cctl-%s", hash) syncedObjectCtlSAName := fmt.Sprintf("soctl-%s", hash) clusterctlSA := &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ Name: "clusterctl", Namespace: "clusterctl", Labels: map[string]string{ constants.PlatformComponent: "clusterctl", }, Annotations: map[string]string{ "iam.gke.io/gcp-service-account": iam.SvcAccountEmail(clusterCtlSAName, gkeCluster.Spec.ProjectID), }, }, } clusterctlSACopy := clusterctlSA.DeepCopy() if _, err = controllerutil.CreateOrUpdate(ctx, clusterClient, clusterctlSACopy, func() error { clusterctlSACopy.Annotations = clusterctlSA.Annotations return nil }); err != nil { recErr = recerr.New(err, gkeClusterApi.ServiceAccountCreationFailedReason) return } syncedobjectctlSA := &corev1.ServiceAccount{ TypeMeta: metav1.TypeMeta{ Kind: "ServiceAccount", }, ObjectMeta: metav1.ObjectMeta{ Name: "syncedobjectctl", Namespace: "syncedobjectctl", Labels: map[string]string{ constants.PlatformComponent: "syncedobjectctl", }, Annotations: map[string]string{ "iam.gke.io/gcp-service-account": iam.SvcAccountEmail(syncedObjectCtlSAName, gkeCluster.Spec.ProjectID), }, }, } syncedobjectctlSACopy := syncedobjectctlSA.DeepCopy() if _, err = controllerutil.CreateOrUpdate(ctx, clusterClient, syncedobjectctlSACopy, func() error { syncedobjectctlSACopy.Annotations = syncedobjectctlSA.Annotations return nil }); err != nil { recErr = recerr.New(err, gkeClusterApi.ServiceAccountCreationFailedReason) return } } gkeCluster.Status.Inventory = inventory.New(inventory.FromSapChangeSet(changeSet)) if oldStatus.Inventory != nil { // nolint:nestif diff, err := inventory.Diff(oldStatus.Inventory, gkeCluster.GetInventory()) if err != nil { recErr = recerr.New(err, gkeClusterApi.PruneFailedReason) return } if len(diff) > 0 { log.Info("inventory diff", diff) prune, err := ff.FeatureEnabledForContext(ff.NewClusterContext(gkeCluster.Name), ff.UseClusterCTLPruning, true) if err != nil { log.Error(err, "unable to get ld flag for prunning, defaulting to prune enabled") } if prune { changeSet, err := r.ResourceManager.DeleteAll(ctx, diff, sap.DefaultDeleteOptions()) if err != nil { recErr = recerr.New(err, gkeClusterApi.PruneFailedReason) return } log.Info("pruned objects", "changeset", changeSet) } } } log.Info("GKECluster reconciled successfully") conditions.MarkTrue(gkeCluster, status.ReadyCondition, gkeClusterApi.GKEClusterReadyReason, "GKECluster reconciled successfully") result = reconcile.ResultSuccess return } func (r *GKEClusterReconciler) createContainerCluster(ctx context.Context, gkeCluster *gkeClusterApi.GKECluster) (*unstructured.Unstructured, recerr.Error) { log := ctrl.LoggerFrom(ctx).WithName("create-container-cluster") containerClusterKey := gkeCluster.ContainerClusterKey() cc := k8objectsutils.BuildContainerCluster(gkeCluster, containerClusterKey) cc.ObjectMeta.OwnerReferences = gkeClusterApi.OwnerReference(gkeCluster) existingCC := &containerAPI.ContainerCluster{} err := r.Client.Get(ctx, containerClusterKey, existingCC) if client.IgnoreNotFound(err) != nil { log.Error(err, "failed to get GKECluster") return nil, recerr.New(err, gkeClusterApi.ContainerClusterCreationFailedReason) } else if kerrors.IsNotFound(err) { // Create the immutable fields the first time // Add (sub)network configuration for new cc cc = r.setNetworkConfig(ctx, gkeCluster, cc) } else { // Map immutable fields if already exist mapExistingImmutableFieldsCC(cc, existingCC) } uobj, err := unstructuredutil.ToUnstructured(cc) if err != nil { return uobj, recerr.New(fmt.Errorf(ErrToUnstructured, cc.Kind, cc.Namespace, cc.Name, err), gkeClusterApi.ApplyFailedReason) } return uobj, nil } func (r *GKEClusterReconciler) setNetworkConfig(ctx context.Context, gkeCluster *gkeClusterApi.GKECluster, cc *containerAPI.ContainerCluster) *containerAPI.ContainerCluster { log := ctrl.LoggerFrom(ctx).WithName("set-network-config") if gkeCluster.Spec.Fleet == fleet.Cluster { netcfg, err := r.IPRangerClient.GetNetcfg(gkeCluster.Spec.ProjectID, gkeCluster.Spec.Location, gkeCluster.Spec.Name) if err != nil { log.Error(err, ErrCreatingContainerCluster, gkeClusterApi.ContainerClusterCreationFailedReason, "unable to get network spec from ipranger, using gke defaults") } else { cc = k8objectsutils.WithNetworkConfig(cc, netcfg.Network, netcfg.Subnetwork, netcfg.Netmask) } } return cc } // mapExistingImmutableFieldsCC function to map values from existing container cluster to new container cluster to prevent // SSA imutable fields from being changed func mapExistingImmutableFieldsCC(cc *containerAPI.ContainerCluster, existingCC *containerAPI.ContainerCluster) { cc.Spec.NetworkRef = existingCC.Spec.NetworkRef cc.Spec.SubnetworkRef = existingCC.Spec.SubnetworkRef cc.Spec.Location = existingCC.Spec.Location cc.Spec.IpAllocationPolicy = existingCC.Spec.IpAllocationPolicy cc.Spec.InitialNodeCount = existingCC.Spec.InitialNodeCount cc.Spec.ClusterAutoscaling = existingCC.Spec.ClusterAutoscaling cc.Spec.ClusterIpv4Cidr = existingCC.Spec.ClusterIpv4Cidr cc.Spec.ConfidentialNodes = existingCC.Spec.ConfidentialNodes cc.Spec.DefaultMaxPodsPerNode = existingCC.Spec.DefaultMaxPodsPerNode cc.Spec.Description = existingCC.Spec.Description cc.Spec.EnableAutopilot = existingCC.Spec.EnableAutopilot cc.Spec.EnableKubernetesAlpha = existingCC.Spec.EnableKubernetesAlpha cc.Spec.EnableTpu = existingCC.Spec.EnableTpu cc.Spec.Description = existingCC.Spec.Description cc.Spec.Description = existingCC.Spec.Description cc.Spec.MasterAuth = existingCC.Spec.MasterAuth cc.Spec.NetworkingMode = existingCC.Spec.NetworkingMode cc.Spec.NodeConfig = existingCC.Spec.NodeConfig cc.Spec.PrivateClusterConfig = existingCC.Spec.PrivateClusterConfig cc.Spec.ResourceID = existingCC.Spec.ResourceID } func (r *GKEClusterReconciler) createContainerNodePool(ctx context.Context, gkeCluster *gkeClusterApi.GKECluster) (*unstructured.Unstructured, recerr.Error) { log := ctrl.LoggerFrom(ctx).WithName("create-container-node-pool") containerClusterKey := gkeCluster.ContainerClusterKey() clusterConfig := clusterConfigs[gkeCluster.Spec.Fleet.String()] nodePool := k8objectsutils.BuildContainerNodePool( gkeCluster.Spec.ProjectID, gkeCluster.Spec.Banner, gkeCluster.Spec.Organization, containerClusterKey.Namespace, containerClusterKey.Name, gkeCluster.Spec.Location, clusterConfig.MachineType, clusterConfig.NumNode, clusterConfig.MinNodes, clusterConfig.MaxNodes, string(gkeCluster.Spec.Fleet), clusterConfig.Autoscale, ) nodePool, err := r.handleExistingNodePool(ctx, containerClusterKey, log, clusterConfig, nodePool) if err != nil { return nil, recerr.New(err, gkeClusterApi.ContainerNodePoolCreationFailedReason) } nodePool.ObjectMeta.OwnerReferences = gkeClusterApi.OwnerReference(gkeCluster) uobj, err := unstructuredutil.ToUnstructured(nodePool) if err != nil { return uobj, recerr.New(fmt.Errorf(ErrToUnstructured, nodePool.Kind, nodePool.Namespace, nodePool.Name, err), gkeClusterApi.ApplyFailedReason) } return uobj, nil } func (r *GKEClusterReconciler) handleExistingNodePool(ctx context.Context, containerClusterKey client.ObjectKey, log logr.Logger, clusterConfig EdgeClusterConfig, nodePool *containerAPI.ContainerNodePool) (*containerAPI.ContainerNodePool, error) { existingNP := &containerAPI.ContainerNodePool{} err := r.Client.Get(ctx, containerClusterKey, existingNP) if client.IgnoreNotFound(err) != nil { //nolint: nestif log.Error(err, "failed to get Node pool") return nodePool, err } else if err == nil { if nodePoolConfigChanged(clusterConfig, existingNP) { //cleanup existing nodepool err := r.RemoveExistingNodePool(ctx, existingNP) if err != nil { return nodePool, err } } else { nodePool = mapExistingImmutableFieldsNodePool(nodePool, existingNP) } } return nodePool, nil } func (r *GKEClusterReconciler) RemoveExistingNodePool(ctx context.Context, existingNP *containerAPI.ContainerNodePool) error { delete(existingNP.Annotations, meta.DeletionPolicyAnnotation) err := r.Client.Update(ctx, existingNP) if err != nil { return err } err = r.Client.Delete(ctx, existingNP) if err != nil { return err } return nil } func nodePoolConfigChanged(newPool EdgeClusterConfig, np *containerAPI.ContainerNodePool) bool { if np.Spec.NodeConfig == nil || np.Spec.NodeConfig.MachineType == nil || *np.Spec.NodeConfig.MachineType != newPool.MachineType { return true } if autoScaleChanged(newPool, np) { return true } return false } func autoScaleChanged(newPool EdgeClusterConfig, np *containerAPI.ContainerNodePool) bool { if newPool.Autoscale && np.Spec.Autoscaling == nil { return true } if np.Spec.Autoscaling == nil { return false } if np.Spec.Autoscaling.MaxNodeCount == nil || newPool.MaxNodes != *np.Spec.Autoscaling.MaxNodeCount { return true } if np.Spec.Autoscaling.MinNodeCount == nil || newPool.MinNodes != *np.Spec.Autoscaling.MinNodeCount { return true } return false } func mapExistingImmutableFieldsNodePool(pool *containerAPI.ContainerNodePool, np *containerAPI.ContainerNodePool) *containerAPI.ContainerNodePool { pool.Spec.InitialNodeCount = np.Spec.InitialNodeCount pool.Spec.Location = np.Spec.Location pool.Spec.MaxPodsPerNode = np.Spec.MaxPodsPerNode pool.Spec.NamePrefix = np.Spec.NamePrefix pool.Spec.PlacementPolicy = np.Spec.PlacementPolicy pool.Spec.NodeConfig = np.Spec.NodeConfig pool.Spec.ResourceID = np.Spec.ResourceID return pool } func (r *GKEClusterReconciler) getContainerClusterWhenReady(ctx context.Context, gkeCluster *gkeClusterApi.GKECluster) (*containerAPI.ContainerCluster, recerr.Error) { log := ctrl.LoggerFrom(ctx).WithName("get-container-cluster") containerClusterKey := gkeCluster.ContainerClusterKey() cluster := &containerAPI.ContainerCluster{} if err := r.Client.Get(ctx, containerClusterKey, cluster); err != nil { log.Error(err, ErrGettingContainerCluster) return nil, recerr.New(err, gkeClusterApi.ContainerClusterNotReadyReason) } // Check for readiness if ready, _ := meta.IsReady(cluster.Status.Conditions); !ready { return nil, nil } nodePool := &containerAPI.ContainerNodePool{} if err := r.Client.Get(ctx, containerClusterKey, nodePool); err != nil { log.Error(err, ErrGettingContainerNodePool) return nil, recerr.New(err, gkeClusterApi.ContainerNodePoolNotReadyReason) } // Check for readiness if ready, _ := meta.IsReady(nodePool.Status.Conditions); !ready { return nil, nil } return cluster, nil } func (r *GKEClusterReconciler) getGKEClient(ctx context.Context, clusterEdgeID string, cc *containerAPI.ContainerCluster) (client.Client, error) { log := ctrl.LoggerFrom(ctx).WithName("get-gke-client") gkeClientCache.Lock() defer gkeClientCache.Unlock() clusterClient, ok := gkeClientCache.c[clusterEdgeID] if !ok { var err error clusterClient, err = r.CreateClient(*cc, client.Options{Scheme: r.Scheme}) if err != nil { log.Error(err, "failed to create client for container cluster") return nil, err } gkeClientCache.c[clusterEdgeID] = clusterClient } return clusterClient, nil } func (r *GKEClusterReconciler) bootstrapCluster(ctx context.Context, cl client.Client, gkeCluster *gkeClusterApi.GKECluster, token string) error { log := ctrl.LoggerFrom(ctx).WithName("bootstrap-cluster") reg, err := registration.NewBuilder(). Banner(gkeCluster.Spec.Banner). Store(gkeCluster.Spec.Name). ClusterType(clusterConstants.GKE). BSLOrganization(gkeCluster.Spec.Organization). APIEndpoint(r.EdgeAPI). //todo TotpToken(token). ClusterEdgeID(gkeCluster.Name). CreateBSLSite(true). Fleet(string(gkeCluster.Spec.Fleet)). ForceBootstrap(true). BootstrapBuild() if err != nil { log.Error(err, "failed building a registration for edge bootstrap api", gkeClusterApi.EdgeBootstrapFailedReason, ErrEdgeBootstrapAPIFailed) return err } reg.Client = cl err = reg.BootstrapCluster(ctx) if err != nil { log.Error(err, "failed bootstrapping cluster with edge bootstrap api", gkeClusterApi.EdgeBootstrapFailedReason, ErrEdgeBootstrapAPIFailed) return err } return nil } func (r *GKEClusterReconciler) setResourceManager() error { if r.ResourceManager == nil { sapMngr, err := sap.NewResourceManagerFromConfig(r.manager.GetConfig(), client.Options{ HTTPClient: r.manager.GetHTTPClient(), Mapper: r.RESTMapper(), Scheme: r.Scheme, }, sap.Owner{Field: r.Name, Group: GKEClusterLabel}) if err != nil { return err } r.ResourceManager = sapMngr } return nil }