package bannerctl import ( "context" "encoding/base64" "encoding/json" "errors" "fmt" "os" "reflect" "strings" "time" registryAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/artifactregistry/v1beta1" computeAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" containerAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/container/v1beta1" iamAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/iam/v1beta1" k8sAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" loggingAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/logging/v1beta1" resourceAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/resourcemanager/v1beta1" serviceAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/serviceusage/v1beta1" storageAPI "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/storage/v1beta1" "github.com/fluxcd/pkg/ssa" "github.com/go-logr/logr" grpccodes "google.golang.org/grpc/codes" grpcstatus "google.golang.org/grpc/status" corev1 "k8s.io/api/core/v1" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/cli-utils/pkg/kstatus/polling" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" kms "cloud.google.com/go/kms/apiv1" bsltypes "edge-infra.dev/pkg/edge/api/bsl/types" "edge-infra.dev/pkg/edge/api/services/channels" "edge-infra.dev/pkg/edge/api/totp" apitypes "edge-infra.dev/pkg/edge/api/types" bannerAPI "edge-infra.dev/pkg/edge/apis/banner/v1alpha1" edgeCluster "edge-infra.dev/pkg/edge/apis/cluster/v1alpha1" edgeErrors "edge-infra.dev/pkg/edge/apis/errors" sequelApi "edge-infra.dev/pkg/edge/apis/sequel/k8s/v1alpha2" syncedobjectApi "edge-infra.dev/pkg/edge/apis/syncedobject/apis/v1alpha1" "edge-infra.dev/pkg/edge/bsl" "edge-infra.dev/pkg/edge/constants" bannerconstants "edge-infra.dev/pkg/edge/constants/api/banner" clusterConstants "edge-infra.dev/pkg/edge/constants/api/cluster" "edge-infra.dev/pkg/edge/constants/api/fleet" "edge-infra.dev/pkg/edge/controllers/dbmetrics" "edge-infra.dev/pkg/edge/controllers/util/edgedb" "edge-infra.dev/pkg/edge/edgeencrypt" "edge-infra.dev/pkg/edge/flux/bootstrap" "edge-infra.dev/pkg/edge/gcpinfra" gcpconstants "edge-infra.dev/pkg/edge/gcpinfra/constants" "edge-infra.dev/pkg/edge/k8objectsutils" "edge-infra.dev/pkg/edge/registration" "edge-infra.dev/pkg/edge/shipment/generator" "edge-infra.dev/pkg/f8n/warehouse/cluster" whv1 "edge-infra.dev/pkg/f8n/warehouse/k8s/apis/v1alpha1" kccmeta "edge-infra.dev/pkg/k8s/konfigkonnector/apis/meta" "edge-infra.dev/pkg/k8s/meta/status" "edge-infra.dev/pkg/k8s/runtime/conditions" "edge-infra.dev/pkg/k8s/runtime/controller" "edge-infra.dev/pkg/k8s/runtime/controller/metrics" "edge-infra.dev/pkg/k8s/runtime/controller/reconcile" "edge-infra.dev/pkg/k8s/runtime/controller/reconcile/recerr" "edge-infra.dev/pkg/k8s/runtime/inventory" "edge-infra.dev/pkg/k8s/runtime/patch" unstructuredutil "edge-infra.dev/pkg/k8s/unstructured" "edge-infra.dev/pkg/lib/gcp/metricsscopes" gcpProject "edge-infra.dev/pkg/lib/gcp/project" "edge-infra.dev/pkg/lib/logging" "edge-infra.dev/pkg/lib/uuid" ) const ( projectName = "banner-project" logSinkName = "siem" dbInstance = "-migrated" ) var ( clusterMaxNodes = 6 clusterMinNodes = 1 clusterMachineType = "e2-standard-4" // bannerConditions is the reconcile summarization configuration for how // various conditions should be taken into account when the final condition is // summarized bannerConditions = reconcile.Conditions{ Target: status.ReadyCondition, Owned: []string{ status.ReadyCondition, status.ReconcilingCondition, status.StalledCondition, }, Summarize: []string{ status.StalledCondition, }, NegativePolarity: []string{ status.ReconcilingCondition, status.StalledCondition, }, } ) // ErrProjectNotReady occurs when the project isn't ready during reconciliation, // used to signal to the Reconciler that it needs to requeue var ( ErrProjectNotReady = errors.New("project is not ready") ErrAPINotReady = errors.New("gcp api is not ready") ) // // +kubebuilder:rbac:groups=edge.ncr.com,resources=banners,verbs=* // +kubebuilder:rbac:groups=edge.ncr.com,resources=banners/status,verbs=* // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;create // +kubebuilder:rbac:groups=edge.ncr.com,resources=clusters,verbs=create;get;list;update;patch;watch // +kubebuilder:rbac:groups=edge.ncr.com,resources=clusters/status,verbs=get;watch // +kubebuilder:rbac:groups=edge.ncr.com,resources=syncedobjects,verbs=* // +kubebuilder:rbac:groups=edge.ncr.com,resources=syncedobjects/status,verbs=get;watch // +kubebuilder:rbac:groups=backend.edge.ncr.com,resources=databaseusers,verbs=* // +kubebuilder:rbac:groups=backend.edge.ncr.com,resources=databaseusers/status,verbs=get;watch // +kubebuilder:rbac:groups="resourcemanager.cnrm.cloud.google.com",resources=projects,verbs=get;create;list;watch;patch // +kubebuilder:rbac:groups="resourcemanager.cnrm.cloud.google.com",resources=projects/status,verbs=get;watch // +kubebuilder:rbac:groups="container.cnrm.cloud.google.com",resources=containerclusters,verbs=get;create;list;watch;patch // +kubebuilder:rbac:groups="container.cnrm.cloud.google.com",resources=containerclusters/status,verbs=get;watch // +kubebuilder:rbac:groups="container.cnrm.cloud.google.com",resources=containernodepools,verbs=get;create;list;watch;patch // +kubebuilder:rbac:groups="container.cnrm.cloud.google.com",resources=containernodepools/status,verbs=get;watch // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamcustomroles,verbs=* // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamcustomroles/status,verbs=get;watch // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iampolicymembers,verbs=* // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iampolicymembers/status,verbs=get;watch // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamserviceaccounts,verbs=* // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamserviceaccounts/status,verbs=get;watch // +kubebuilder:rbac:groups="pubsub.cnrm.cloud.google.com",resources=pubsubtopics,verbs=* // +kubebuilder:rbac:groups="pubsub.cnrm.cloud.google.com",resources=pubsubtopics/status,verbs=get;watch // +kubebuilder:rbac:groups="pubsub.cnrm.cloud.google.com",resources=pubsubsubscriptions,verbs=* // +kubebuilder:rbac:groups="pubsub.cnrm.cloud.google.com",resources=pubsubsubscriptions/status,verbs=get;watch // +kubebuilder:rbac:groups="serviceusage.cnrm.cloud.google.com",resources=services,verbs=* // +kubebuilder:rbac:groups="serviceusage.cnrm.cloud.google.com",resources=services/status,verbs=get;watch // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computenetworks,verbs=* // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computenetworks/status,verbs=get;watch // +kubebuilder:rbac:groups="storage.cnrm.cloud.google.com",resources=storagebuckets,verbs=* // +kubebuilder:rbac:groups="storage.cnrm.cloud.google.com",resources=storagebuckets/status,verbs=get;watch // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computeaddresses,verbs=* // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computeaddresses/status,verbs=get;watch // +kubebuilder:rbac:groups="logging.cnrm.cloud.google.com",resources=logginglogexclusions,verbs=* // +kubebuilder:rbac:groups="logging.cnrm.cloud.google.com",resources=logginglogexclusions/status,verbs=get;watch // +kubebuilder:rbac:groups="logging.cnrm.cloud.google.com",resources=logginglogsinks,verbs=* // +kubebuilder:rbac:groups="logging.cnrm.cloud.google.com",resources=logginglogsinks/status,verbs=get;watch // +kubebuilder:rbac:groups="artifactregistry.cnrm.cloud.google.com",resources=artifactregistryrepositories,verbs=* // +kubebuilder:rbac:groups="artifactregistry.cnrm.cloud.google.com",resources=artifactregistryrepositories/status,verbs=get;watch // +kubebuilder:rbac:groups=warehouse.edge.ncr.com,resources=shipments,verbs=* type BannerReconciler struct { client.Client Log logr.Logger Metrics metrics.Metrics Conditions reconcile.Conditions BillingAccount string FolderID string ForemanProjectID string PlatInfraProjectID string MetricsScopesClient metricsScopesClient SecretManager secretManager DefaultRequeue time.Duration Name string ResourceManager *ssa.ResourceManager EdgeAPI string TotpSecret string Domain string DatasyncDNSName string DatasyncDNSZone string EdgeDB *edgedb.EdgeDB DatabaseName string Recorder *dbmetrics.DBMetrics BSLClient *bsl.Client BSLConfig bsltypes.BSPConfig GCPRegion string GCPForemanProjectNumber string GCPZone string EdgeSecOptInCompliance bool EdgeSecMaxLeasePeriod string EdgeSecMaxValidityPeriod string } func Run(o ...controller.Option) error { ctrl.SetLogger(logging.NewLogger().Logger) log := ctrl.Log.WithName("setup") cfg, _, err := NewConfig(os.Args) if err != nil { log.Error(err, "failed to parse startup configuration") os.Exit(1) } mgr, err := create(cfg, o...) if err != nil { return err } log.Info("starting manager") if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { log.Error(err, "problem running manager") return err } return nil } func create(cfg Config, o ...controller.Option) (ctrl.Manager, error) { o = append(o, controller.WithMetricsAddress(cfg.MetricsAddr)) ctlCfg, opts := controller.ProcessOptions(o...) opts.LeaderElectionID = "2c625a13.edge.ncr.com" opts.Scheme = createScheme() ctx := context.Background() kmsClient, err := kms.NewKeyManagementClient(ctx) if err != nil { ctrl.Log.Error(err, "failed to create kms client") return nil, err } sm := edgeencrypt.NewSigningMethodKMS(kmsClient) err = CreateDecryptionInfra(ctx, kmsClient, sm, &gcpSecretManager{}, edgeencrypt.KmsKey{ ProjectID: cfg.ForemanProjectID, Location: cfg.GCPRegion, }, cfg.ResourceTimeout) if err != nil { ctrl.Log.Error(err, "failed to create decryption infra") return nil, err } dbm := dbmetrics.New("bannerctl") mgr, err := ctrl.NewManager(ctlCfg, opts) if err != nil { ctrl.Log.Error(err, "failed to create manager") return nil, err } bslClient := bsl.NewBSLClient(bsltypes.BSPConfig{ Endpoint: cfg.BSLConfig.Endpoint, Root: cfg.BSLConfig.Root, OrganizationPrefix: cfg.BSLConfig.OrganizationPrefix, }) bslClient.SetDefaultAccessKey(cfg.BSLAccessKey.SharedKey, cfg.BSLAccessKey.SecretKey) bslClient.SetTimeout(BSLDefaultTimeout) m := metrics.New(mgr, "bannerctl", metrics.WithCollectors(dbm.Collectors()...)) if err = (&BannerReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("bannerctl"), Metrics: m, Conditions: bannerConditions, SecretManager: &gcpSecretManager{}, MetricsScopesClient: &gcpMetricsScopesClient{ c: metricsscopes.New(cfg.ForemanProjectID), }, ForemanProjectID: cfg.ForemanProjectID, PlatInfraProjectID: cfg.PlatInfraProjectID, DefaultRequeue: 10 * time.Second, BillingAccount: gcpconstants.DefaultBillingAccountID, FolderID: cfg.ProjectBootstrapping.FolderID, Name: "bannerctl", EdgeAPI: cfg.EdgeAPI, TotpSecret: cfg.TotpSecret, Domain: cfg.Domain, DatasyncDNSName: cfg.DatasyncDNSName, DatasyncDNSZone: cfg.DatasyncDNSZone, EdgeDB: &edgedb.EdgeDB{DB: cfg.DB}, DatabaseName: cfg.DatabaseName, Recorder: dbm, BSLClient: bslClient, BSLConfig: cfg.BSLConfig, GCPRegion: cfg.GCPRegion, GCPZone: cfg.GCPZone, GCPForemanProjectNumber: cfg.GCPForemanProjectNumber, EdgeSecOptInCompliance: cfg.EdgeSecOptInCompliance, EdgeSecMaxLeasePeriod: cfg.EdgeSecMaxLeasePeriod, EdgeSecMaxValidityPeriod: cfg.EdgeSecMaxValidityPeriod, }).SetupWithManager(mgr); err != nil { ctrl.Log.Error(err, "failed to create controller and set up with manager") return nil, err } cs := channels.NewChannelService(cfg.DB, cfg.ForemanProjectID, nil) if err = (&EncryptionInfraReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("encryptioninfra"), Conditions: bannerConditions, SecretManager: &gcpSecretManager{}, KmsClient: kmsClient, ForemanProjectID: cfg.ForemanProjectID, GCPRegion: cfg.GCPRegion, IntervalTime: cfg.IntervalTime, RequeueTime: cfg.RequeueTime, ResourceTimeout: cfg.ResourceTimeout, ChannelService: cs, SigningMethod: sm, }).SetupWithManager(mgr); err != nil { ctrl.Log.Error(err, "failed to create EncryptionInfra controller and set up with manager") return nil, err } if err = (&EncryptionKeyManagementReconciler{ Client: mgr.GetClient(), Log: ctrl.Log.WithName("encryptionkeymanagement"), Conditions: bannerConditions, SecretManager: &gcpSecretManager{}, KmsClient: kmsClient, ForemanProjectID: cfg.ForemanProjectID, GCPRegion: cfg.GCPRegion, IntervalTime: cfg.IntervalTime, RequeueTime: cfg.RequeueTime, ResourceTimeout: cfg.ResourceTimeout, ChannelService: cs, }).SetupWithManager(mgr); err != nil { ctrl.Log.Error(err, "failed to create controller and set up with manager") return nil, err } return mgr, nil } func (r *BannerReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&bannerAPI.Banner{}). // leaving this here as we'll likely go back to something similar in the future // For(&bannerAPI.Banner{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Owns(&resourceAPI.Project{}). Owns(&edgeCluster.Cluster{}). Owns(&containerAPI.ContainerNodePool{}). Owns(&iamAPI.IAMCustomRole{}). Owns(&iamAPI.IAMPolicyMember{}). Owns(&serviceAPI.Service{}). Owns(&storageAPI.StorageBucket{}). Owns(&syncedobjectApi.SyncedObject{}). Owns(&loggingAPI.LoggingLogExclusion{}). Owns(®istryAPI.ArtifactRegistryRepository{}). Owns(&sequelApi.DatabaseUser{}). Complete(r) } func (r *BannerReconciler) PatchOpts() []patch.Option { return []patch.Option{ patch.WithOwnedConditions{Conditions: r.Conditions.Owned}, patch.WithFieldOwner(r.Name), } } func createScheme() *runtime.Scheme { scheme := runtime.NewScheme() utilruntime.Must(bannerAPI.AddToScheme(scheme)) utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(containerAPI.AddToScheme(scheme)) utilruntime.Must(computeAPI.AddToScheme(scheme)) utilruntime.Must(containerAPI.AddToScheme(scheme)) utilruntime.Must(edgeCluster.AddToScheme(scheme)) utilruntime.Must(iamAPI.AddToScheme(scheme)) utilruntime.Must(loggingAPI.AddToScheme(scheme)) utilruntime.Must(resourceAPI.AddToScheme(scheme)) utilruntime.Must(serviceAPI.AddToScheme(scheme)) utilruntime.Must(storageAPI.AddToScheme(scheme)) utilruntime.Must(syncedobjectApi.AddToScheme(scheme)) utilruntime.Must(loggingAPI.AddToScheme(scheme)) utilruntime.Must(registryAPI.AddToScheme(scheme)) utilruntime.Must(sequelApi.AddToScheme(scheme)) return scheme } func (r *BannerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, recErr error) { var ( reconcileStart = time.Now() log = ctrl.LoggerFrom(ctx) result = reconcile.ResultEmpty banner = &bannerAPI.Banner{} ) r.setResourceManager() if err := r.Get(ctx, req.NamespacedName, banner); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) } r.Metrics.RecordReconciling(ctx, banner) patcher := patch.NewSerialPatcher(banner, r.Client) defer func() { summarizer := reconcile.NewSummarizer(patcher) res, recErr = summarizer.SummarizeAndPatch(ctx, banner, []reconcile.SummarizeOption{ reconcile.WithConditions(r.Conditions), reconcile.WithResult(result), reconcile.WithError(recErr), reconcile.WithIgnoreNotFound(), reconcile.WithProcessors( reconcile.RecordReconcileReq, reconcile.RecordResult, ), reconcile.WithFieldOwner(r.Name), }...) r.Metrics.RecordDuration(ctx, banner, reconcileStart) r.Metrics.RecordReadiness(ctx, banner) r.EdgeDB.RecordInfraStatus(ctx, banner, *r.Recorder) }() // Banner fetched successfully, so decorate logger with basic banner info // and create a new context to store updated logger log.WithValues("banner", banner.Spec.DisplayName, "bannerEdgeID", banner.Name) ctx = logr.NewContext(ctx, log) log.Info("reconciling started for banner") recErr = r.reconcile(ctx, patcher, banner) if recErr == nil { result = reconcile.ResultSuccess } return } // reconcile handles the actual reconciliation of the banner object func (r *BannerReconciler) reconcile(ctx context.Context, patcher *patch.SerialPatcher, b *bannerAPI.Banner) recerr.Error { log := ctrl.LoggerFrom(ctx) // Validate Banner spec if err := b.IsValid(); err != nil { // TODO: validatingadmissionwebhook log.Error(err, "invalid banner object") return recerr.NewStalled(fmt.Errorf("invalid spec: %w", err), bannerAPI.InvalidSpecReason) } if err := reconcile.Progressing(ctx, b, patcher, r.PatchOpts()...); err != nil { return recerr.New(err, bannerAPI.ReconcileFailedReason) } recErr := r.reconcileNamespace(ctx, b) if recErr != nil { return recErr } // Create project so we can use the number to create dependent resources recErr = r.reconcileProject(ctx, b) if recErr != nil { // Reflect error for Project on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } // Project created successfully, so decorate logger with the projectID // and create a new context to store updated logger ctx = logr.NewContext(ctx, log.WithValues("projectID", b.Spec.GCP.ProjectID)) recErr = r.reconcileProjectInfra(ctx, b) if recErr != nil { // Reflect error for Project infra on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } recErr = r.reconcilePlatformSecrets(ctx, b) if recErr != nil { // Reflect error for Project infra on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } recErr = r.reconcileAutomatedEdgeLabels(ctx, b) if recErr != nil { // Reflect error for Project infra on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } recErr = r.reconcileCerts(ctx, b) if recErr != nil { log.Error(recErr, "failed to reconcile certificates") // Reflect error for Project infra on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } err := r.addMetricsScopes(ctx, b) if err != nil { recErr = recerr.New(err, bannerAPI.ProjectSetupFailedReason) // Reflect error for Project infra on Ready condition recErr.ToCondition(b, status.ReadyCondition) return recErr } log.Info("banner reconciled successfully") conditions.MarkTrue(b, status.ReadyCondition, bannerAPI.ProvisionSucceededReason, "banner reconciled successfully") return nil } func (r *BannerReconciler) reconcileNamespace(ctx context.Context, b *bannerAPI.Banner) recerr.Error { log := ctrl.LoggerFrom(ctx).WithName("namespace") namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: b.Name, OwnerReferences: r.ownerRef(b), Annotations: map[string]string{ kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, }, } kccmeta.SetProjectAnnotation(&namespace.ObjectMeta, b.Spec.GCP.ProjectID) err := client.IgnoreAlreadyExists(r.Create(ctx, namespace, r.createOpts())) if err != nil { log.Error(err, "failed to create namespace") return recerr.New(err, bannerAPI.NamespaceCreationFailedReason) } log.Info("namespace created", "namespace", namespace.Name, "ownerReferences", namespace.OwnerReferences) return nil } func (r *BannerReconciler) createSiemSink(b *bannerAPI.Banner) *loggingAPI.LoggingLogSink { description := "Route security relevant logs to a siem storage bucket" sinkFilter := "jsonPayload.enable_siem='true'" storageBucketName := fmt.Sprintf("%s-%s", r.ForemanProjectID, logSinkName) // bucketRef will be: storage.googleapis.com/ret-edge-dev1-foreman-siem // https://cloud.google.com/config-connector/docs/reference/resource-docs/logging/logginglogsink#spec bucketRef := fmt.Sprintf("%s/%s", "storage.googleapis.com", storageBucketName) uniqueWriter := true return &loggingAPI.LoggingLogSink{ ObjectMeta: metav1.ObjectMeta{ Name: logSinkName, Namespace: b.Name, OwnerReferences: r.ownerRef(b), }, TypeMeta: gvkToTypeMeta(loggingAPI.LoggingLogSinkGVK), Spec: loggingAPI.LoggingLogSinkSpec{ ProjectRef: &k8sAPI.ResourceRef{ External: b.Spec.GCP.ProjectID, }, Destination: loggingAPI.LogsinkDestination{ StorageBucketRef: &k8sAPI.ResourceRef{ External: bucketRef, }, }, Description: &description, Filter: &sinkFilter, UniqueWriterIdentity: &uniqueWriter, }, } } // createInfraSAResources generates iam service account resources func (r *BannerReconciler) createInfraSAResources(b *bannerAPI.Banner) []client.Object { hash := uuid.FromUUID(b.Status.ClusterInfraClusterEdgeID).Hash() kccResourceName := fmt.Sprintf("kcc-%s", hash) clusterctlSAName := fmt.Sprintf("cctl-%s", hash) soctlSAName := fmt.Sprintf("soctl-%s", hash) projectNumber := b.Status.ProjectNumber var objs []client.Object objs = append(objs, r.createClusterInfraKCCResources(b, kccResourceName, projectNumber)...) objs = append(objs, r.createClusterControllerSAResources(b, clusterctlSAName)...) objs = append(objs, r.createSyncedObjectCtlSAResources(b, soctlSAName)...) return objs } // reconcileProjectInfra creates the GCP Project and required KCC resources func (r *BannerReconciler) reconcileProjectInfra(ctx context.Context, b *bannerAPI.Banner) recerr.Error { var ( err error mgr = r.ResourceManager log = ctrl.LoggerFrom(ctx).WithName("project-infra") oldBannerStatus = b.Status.DeepCopy() ) err = r.createClusterInfraCluster(ctx, b) if err != nil { log.Error(err, fmt.Sprintf("failed to call registration api for banner %s cluster-infra cluster", b.Spec.DisplayName)) return recerr.New(err, bannerAPI.ClusterInfraCreationFailedReason) } // otherwise, the project is ready and we are good to go objs := []client.Object{ r.createStorageBucket(b), r.createSiemSink(b), r.createClusterctlDatabaseUser(b), r.createEdgeIssuerDatabaseUser(b), r.createAuthserverDatabaseUser(b), } roles := r.createCustomStorageRoles(b) for _, role := range roles { objs = append(objs, role) } svcs := r.createAPIEnablements(b) for _, svc := range svcs { objs = append(objs, svc) } // resources for cluster-infra in tenant project objs = append(objs, r.createInfraSAResources(b)...) // add banner-wide namespace to create the top-level 'chariot' folder nsObj, err := createBannerWideNamespace(b) if err != nil { log.Error(err, fmt.Sprintf("failed to create banner-wide namespace for %s", b.Spec.DisplayName)) return recerr.New(err, bannerAPI.ApplyFailedReason) } objs = append(objs, nsObj) // create remote access synced objects objs = append(objs, r.createRemoteAccessComputeAddress(ctx, b)) objs = append(objs, r.genForemanSO(r.createForemanProxyMapping(b), b)) // optional enablements for _, e := range b.Spec.Enablements { if e == CouchDBEnablement { err = r.createCouchServerCluster(ctx, b) if err != nil { log.Error(err, fmt.Sprintf("failed to call registration api for banner %s couch cluster", b.Spec.DisplayName)) return recerr.New(err, bannerAPI.CouchClusterCreationFailedReason) } err = r.bslFullSync(ctx, b) if err != nil { log.Error(err, fmt.Sprintf("fail to sync bsl data to couchdb banner %s", b.Spec.DisplayName)) return recerr.New(err, bannerAPI.CouchBSLDataSyncFailedReason) } shipments, err := r.generateShipments(b) if err != nil { return recerr.NewStalled(err, bannerAPI.InvalidShipmentSpecReason) } objs = append(objs, shipments...) // BSL EU ID is needed to move this to couchdb-masters cluster pallet objs = append(objs, r.createCouchCushionConfigMapSO(b)) } } // warehouse infrastructure objs = append(objs, r.createGARRepo(b)) var unstructuredObjs []*unstructured.Unstructured for _, obj := range objs { uobj, err := unstructuredutil.ToUnstructured(obj) if err != nil { goodErr := fmt.Errorf("failed to convert %s/%s/%s to unstructured: %w", obj.GetObjectKind(), obj.GetNamespace(), obj.GetName(), err) return recerr.New(goodErr, bannerAPI.ApplyFailedReason) } unstructuredObjs = append(unstructuredObjs, uobj) } changeSet, err := mgr.ApplyAll(ctx, unstructuredObjs, ssa.ApplyOptions{Force: true}) if err != nil { return recerr.New(err, bannerAPI.ApplyFailedReason) } log.Info("project infrastructure applied", "changeset", changeSet.ToMap()) b.Status.Inventory = inventory.New(inventory.FromChangeSet(changeSet)) if oldBannerStatus.Inventory != nil { diff, err := inventory.Diff(oldBannerStatus.Inventory, b.GetInventory()) if err != nil { return recerr.New(err, bannerAPI.PruneFailedReason) } log.Info("inventory", "diff", diff) if len(diff) > 0 { changeSet, err := r.ResourceManager.DeleteAll(ctx, diff, ssa.DefaultDeleteOptions()) if err != nil { return recerr.New(err, bannerAPI.PruneFailedReason) } log.Info("pruned objects", "changeset", changeSet.ToMap()) } } return nil } func (r *BannerReconciler) reconcileProject(ctx context.Context, b *bannerAPI.Banner) recerr.Error { var ( err error project *resourceAPI.Project log = ctrl.LoggerFrom(ctx).WithName("project") ) // TODO: validate project shape in validationadmissionwebhook project, err = r.createProject(b) if err != nil { log.Error(err, "failed to declare project resource") return recerr.New(err, bannerAPI.ProjectSetupFailedReason) } if err := client.IgnoreAlreadyExists(r.Create(ctx, project)); err != nil { log.Error(err, "failed to create project") return recerr.New(err, bannerAPI.ProjectSetupFailedReason) } // Project resource now exists, even if its not ready b.Status.ProjectRef = fmt.Sprintf("%s/%s", project.Namespace, project.Name) if err := r.Get(ctx, types.NamespacedName{Name: project.Name, Namespace: project.Namespace}, project); err != nil { log.Error(err, "failed to get project") return recerr.New(err, bannerAPI.ProjectSetupFailedReason) } if ready, reason := kccmeta.IsReady(project.Status.Conditions); !ready || project.Status.Number == nil { log.Info("project is not ready", "reason", reason) return recerr.NewWait(fmt.Errorf("%w: %s", ErrProjectNotReady, reason), bannerAPI.ProjectNotReadyReason, r.DefaultRequeue) } b.Status.ProjectNumber = *project.Status.Number log.Info("project is ready", "project", project.ObjectMeta) return nil } func (r *BannerReconciler) createProject(b *bannerAPI.Banner) (*resourceAPI.Project, error) { if b.Spec.GCP.ProjectID == "" { b.Spec.GCP.ProjectID = fmt.Sprintf("%s-%s", gcpinfra.ProjectIDPrefix, gcpProject.RandAN(29-(len(gcpinfra.ProjectIDPrefix)))) } // TODO: validate project shape in validationadmissionwebhook if err := gcpProject.IsValidProjectID(b.Spec.GCP.ProjectID); err != nil { return &resourceAPI.Project{}, err } return &resourceAPI.Project{ ObjectMeta: metav1.ObjectMeta{ Name: projectName, Namespace: b.Name, Annotations: map[string]string{ constants.Banner: b.Spec.DisplayName, constants.Organization: b.Spec.BSL.Organization.Name, kccmeta.ProjectAnnotation: b.Spec.GCP.ProjectID, }, OwnerReferences: r.ownerRef(b), }, TypeMeta: gvkToTypeMeta(resourceAPI.ProjectGVK), Spec: resourceAPI.ProjectSpec{ BillingAccountRef: kccmeta.BillingAccountRef(r.BillingAccount), Name: b.Spec.DisplayName, ResourceID: &b.Spec.GCP.ProjectID, FolderRef: kccmeta.FolderRef(r.FolderID), }, }, nil } func (r *BannerReconciler) createCustomStorageRoles(b *bannerAPI.Banner) []*iamAPI.IAMCustomRole { iamRoleDescriptionGet := "IAM role to get bucket and object metadata needed for flux source controller" iamRoleDescriptionList := "IAM role to list bucket objects needed for flux source controller" fluxRoleGet := &iamAPI.IAMCustomRole{ ObjectMeta: metav1.ObjectMeta{ Name: bootstrap.FluxRoleGet, Namespace: b.Name, Annotations: map[string]string{ kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, OwnerReferences: r.ownerRef(b), }, Spec: iamAPI.IAMCustomRoleSpec{ Description: &iamRoleDescriptionGet, Permissions: []string{"storage.objects.get"}, Title: bootstrap.FluxRoleGet, }, TypeMeta: gvkToTypeMeta(iamAPI.IAMCustomRoleGVK), } fluxRoleList := &iamAPI.IAMCustomRole{ ObjectMeta: metav1.ObjectMeta{ Name: bootstrap.FluxRoleList, Namespace: b.Name, Annotations: map[string]string{ kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, OwnerReferences: r.ownerRef(b), }, Spec: iamAPI.IAMCustomRoleSpec{ Description: &iamRoleDescriptionList, Permissions: []string{"storage.objects.list", "storage.buckets.get"}, Title: bootstrap.FluxRoleList, }, TypeMeta: gvkToTypeMeta(iamAPI.IAMCustomRoleGVK), } return []*iamAPI.IAMCustomRole{ fluxRoleGet, fluxRoleList, } } func (r *BannerReconciler) createAPIEnablements(b *bannerAPI.Banner) []*serviceAPI.Service { // svcMeta will be applied to all Service CRDs we create svcMeta := metav1.ObjectMeta{ Annotations: map[string]string{ kccmeta.DisableDepSvcAnnotation: "false", kccmeta.DisableSvcOnDestroyAnnotation: "false", kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, Namespace: b.Name, OwnerReferences: r.ownerRef(b), } var objs []*serviceAPI.Service // create a service object with the common metadata for each API the control // plane needs enabled for _, api := range gcpinfra.TenantAPIs { svcMeta.Name = api objs = append(objs, &serviceAPI.Service{ ObjectMeta: svcMeta, TypeMeta: gvkToTypeMeta(serviceAPI.ServiceGVK), }) } return objs } func (r *BannerReconciler) checkAPI(ctx context.Context, ns string, api string) (bool, error) { log := ctrl.LoggerFrom(ctx).WithName("check-API") service := &serviceAPI.Service{} err := r.Client.Get(ctx, types.NamespacedName{Name: api, Namespace: ns}, service) if err != nil && !kerrors.IsNotFound(err) { log.Error(err, "Error checking api enablement", "service name", api) return false, err } ready, reason := kccmeta.IsReady(service.Status.Conditions) if !ready { log.Info("waiting for service api to become Ready", "reason", reason, "api", api) } return ready, nil } func (r *BannerReconciler) copyPlatformSecrets(ctx context.Context, projectID string) error { log := ctrl.LoggerFrom(ctx).WithName("copy-platform-secrets").WithValues("projectID", projectID) log.Info("copying platform secrets to new banner project", "source", r.ForemanProjectID, "destination", projectID) reader, err := r.SecretManager.NewWithOptions(ctx, r.ForemanProjectID) if err != nil { return fmt.Errorf("error creating secretmanager reader client, err: %v", err) } writer, err := r.SecretManager.NewWithOptions(ctx, projectID) if err != nil { return fmt.Errorf("error creating secretmanager writer client, err: %v", err) } for _, secretID := range constants.PlatformSecretIDs { secretVal, err := reader.GetLatestSecretValue(ctx, secretID) if err != nil { return fmt.Errorf("error reading latest secret value, secretID: %v, err: %v", secretID, err) } s, err := reader.GetSecret(ctx, secretID) if err != nil { return fmt.Errorf("error getting secret, secretID: %v, err: %v", secretID, err) } err = writer.AddSecret(ctx, secretID, secretVal, s.Labels, true, nil, "") if err != nil { return fmt.Errorf("error adding secret, secretID: %v, labels: %v, err: %v", secretVal, s.Labels, err) } } log.Info("copied platform secrets to new banner project") return nil } func (r *BannerReconciler) createStorageBucket(b *bannerAPI.Banner) *storageAPI.StorageBucket { return &storageAPI.StorageBucket{ ObjectMeta: metav1.ObjectMeta{ Name: b.Spec.GCP.ProjectID, Namespace: b.Name, Annotations: map[string]string{ kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, OwnerReferences: r.ownerRef(b), }, TypeMeta: gvkToTypeMeta(storageAPI.StorageBucketGVK), Spec: storageAPI.StorageBucketSpec{ Versioning: &storageAPI.BucketVersioning{Enabled: true}, }, } } func (r *BannerReconciler) addMetricsScopes(ctx context.Context, b *bannerAPI.Banner) error { log := ctrl.LoggerFrom(ctx).WithName("metrics-scopes") resp, err := r.MetricsScopesClient.AddMonitoredProject(ctx, b.Spec.GCP.ProjectID) if err != nil && grpcstatus.Code(err) != grpccodes.AlreadyExists { log.Error(err, "failed to register monitored project", "host-project", r.ForemanProjectID, "response", resp) return err } return nil } func (r *BannerReconciler) createClusterInfraCluster(ctx context.Context, b *bannerAPI.Banner) error { // Check if cluster resource already exists if b.Status.ClusterInfraClusterEdgeID != "" { return nil } // If not, call registration api for cluster totpToken, err := totp.GenerateTotp(r.TotpSecret) if err != nil { return err } GCPLocation := fmt.Sprintf("%s-%s", r.GCPRegion, r.GCPZone) reg, err := registration.NewBuilder(). Banner(b.Spec.DisplayName). Store(bannerconstants.CreateBannerClusterInfraName(b.Spec.DisplayName)). ClusterType(clusterConstants.GKE). BSLOrganization(b.Spec.BSL.Organization.Name). APIEndpoint(r.EdgeAPI). //todo TotpToken(totpToken.Code). CreateBSLSite(false). Fleet(fleet.Cluster). MachineType(clusterMachineType). MinNodes(clusterMinNodes). MaxNodes(clusterMaxNodes). Autoscale(true). Location(GCPLocation). FleetVersion(apitypes.DefaultVersionTag). Build() if err != nil { return err } reg.Client = r.Client registrationResponse, err := reg.RegisterCluster(ctx) if err != nil && !strings.Contains(err.Error(), edgeErrors.ErrClusterAlreadyExists) { return err } if registrationResponse != nil { b.Status.ClusterInfraClusterEdgeID = registrationResponse.ClusterEdgeID // New cluster infra clusters the project id will be the banner project id b.Status.ClusterInfraClusterProjectID = b.Spec.GCP.ProjectID } return nil } func (r *BannerReconciler) createAuthserverDatabaseUser(b *bannerAPI.Banner) *sequelApi.DatabaseUser { hash := uuid.FromUUID(b.Status.ClusterInfraClusterEdgeID).Hash() authserverName := fmt.Sprintf("authserver-%s", hash) iamUsername := fmt.Sprintf("authserver@%s.iam", b.Spec.GCP.ProjectID) grant := sequelApi.Grant{ Schema: "public", TableGrant: []sequelApi.TableGrant{ { Table: "tenants", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "banners", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "banner_configs", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "clusters", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "cluster_config", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "http_sessions", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "DELETE", }, }, }, }, } return &sequelApi.DatabaseUser{ TypeMeta: gvkToTypeMeta(sequelApi.UserGVK), ObjectMeta: metav1.ObjectMeta{ Name: authserverName, Namespace: b.Name, }, Spec: sequelApi.UserSpec{ Type: sequelApi.CloudSAUserType, CommonOptions: sequelApi.CommonOptions{ Prune: true, Force: true, }, InstanceRef: sequelApi.InstanceReference{ Name: r.DatabaseName + dbInstance, ProjectID: r.ForemanProjectID, }, ServiceAccount: &sequelApi.ServiceAccount{ EmailRef: fmt.Sprintf("%s.gserviceaccount.com", iamUsername), IAMUsername: iamUsername, }, Grants: []sequelApi.Grant{ grant, }, }, } } func (r *BannerReconciler) createClusterctlDatabaseUser(b *bannerAPI.Banner) *sequelApi.DatabaseUser { hash := uuid.FromUUID(b.Status.ClusterInfraClusterEdgeID).Hash() clusterctlSAName := fmt.Sprintf("cctl-%s", hash) iamUsername := fmt.Sprintf("%s@%s.iam", clusterctlSAName, b.Spec.GCP.ProjectID) grant := sequelApi.Grant{ Schema: "public", TableGrant: []sequelApi.TableGrant{ { Table: "clusters", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "TRIGGER", }, { Permission: "UPDATE", }, }, }, { Table: "cluster_artifact_versions", Permissions: []sequelApi.Permissions{ { Permission: "DELETE", }, { Permission: "INSERT", }, { Permission: "SELECT", }, }, }, { Table: "cluster_config", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "id_provider_owner", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "id_provider_settings", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "banners", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "UPDATE", }, }, }, { Table: "cluster_bootstrap_tokens", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "DELETE", }, }, }, { Table: "terminals", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "artifact_registries", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "cluster_artifact_registries", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "helm_workloads", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "DELETE", }, }, }, { Table: "helm_secrets", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "helm_workload_config_maps", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "cluster_labels", Permissions: []sequelApi.Permissions{ { Permission: "DELETE", }, { Permission: "INSERT", }, { Permission: "SELECT", }, }, }, { Table: "log_replays", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, { Permission: "UPDATE", }, }, }, { Table: "helm_workload_labels", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "labels", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "watched_field_objects", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "watched_field_values", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "cluster_network_services", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "workload_cluster_mapping", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "capabilities", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "capabilities_to_banners", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "cluster_secret_leases", Permissions: []sequelApi.Permissions{ { Permission: "INSERT", }, { Permission: "SELECT", }, { Permission: "UPDATE", }, }, }, { Table: "cluster_secrets", Permissions: []sequelApi.Permissions{ { Permission: "INSERT", }, { Permission: "SELECT", }, { Permission: "UPDATE", }, { Permission: "DELETE", }, }, }, { Table: "channels", Permissions: []sequelApi.Permissions{ { Permission: "SELECT", }, }, }, { Table: "helm_workloads_channels", Permissions: []sequelApi.Permissions{ { Permission: "INSERT", }, { Permission: "SELECT", }, }, }, }, } return &sequelApi.DatabaseUser{ TypeMeta: gvkToTypeMeta(sequelApi.UserGVK), ObjectMeta: metav1.ObjectMeta{ Name: clusterctlSAName, Namespace: b.Name, }, Spec: sequelApi.UserSpec{ Type: sequelApi.CloudSAUserType, CommonOptions: sequelApi.CommonOptions{ Prune: true, Force: true, }, InstanceRef: sequelApi.InstanceReference{ Name: r.DatabaseName + dbInstance, ProjectID: r.ForemanProjectID, }, ServiceAccount: &sequelApi.ServiceAccount{ EmailRef: fmt.Sprintf("%s.gserviceaccount.com", iamUsername), IAMUsername: iamUsername, }, Grants: []sequelApi.Grant{ grant, }, }, } } func createBannerWideNamespace(b *bannerAPI.Banner) (*syncedobjectApi.SyncedObject, error) { ns := corev1.Namespace{ TypeMeta: metav1.TypeMeta{ Kind: "Namespace", APIVersion: "v1", }, ObjectMeta: metav1.ObjectMeta{ Name: b.Name, }, } sobj, err := k8objectsutils.BuildSyncedObjectCoreV1(ns, b.Spec.GCP.ProjectID, "", b.Name, constants.BannerWideNamespace) if err != nil { return nil, err } return sobj, nil } func (r *BannerReconciler) reconcilePlatformSecrets(ctx context.Context, b *bannerAPI.Banner) recerr.Error { log := ctrl.LoggerFrom(ctx).WithName("platform-secrets") // create platform secrets when the secretmanager api is ready ready, err := r.checkAPI(ctx, b.Name, "secretmanager.googleapis.com") if err != nil { return recerr.New(err, bannerAPI.PlatformSecretsCreationFailedReason) } // return specific error indicating that the service isn't ready, so we requeue if !ready { err := fmt.Errorf("%w", ErrAPINotReady) return recerr.NewWait(err, bannerAPI.PlatformSecretsCreationFailedReason, r.DefaultRequeue) } err = r.copyPlatformSecrets(ctx, b.Spec.GCP.ProjectID) if err != nil { log.Error(err, "error copying platform secrets for project") return recerr.New(err, bannerAPI.PlatformSecretsCreationFailedReason) } return nil } // ownerRef creates an owner reference for this controller that should be added // to objects this controller object owns. this enables things like // automated garbage collection func (r *BannerReconciler) ownerRef(b *bannerAPI.Banner) []metav1.OwnerReference { return []metav1.OwnerReference{ *metav1.NewControllerRef( b, bannerAPI.GroupVersion.WithKind(reflect.TypeOf(bannerAPI.Banner{}).Name()), ), } } // createOpts returns client.CreatOptions marking this controller as the owner // the result string should match what fluxcd/pkg/ssa.Onwer adds to resources func (r *BannerReconciler) createOpts() *client.CreateOptions { return &client.CreateOptions{FieldManager: fmt.Sprintf("%s/%s", constants.Domain, r.Name)} } func (r *BannerReconciler) setResourceManager() { if r.ResourceManager == nil { mgr := ssa.NewResourceManager( r.Client, polling.NewStatusPoller(r.Client, r.Client.RESTMapper(), polling.Options{}), ssa.Owner{Field: r.Name}, ) r.ResourceManager = mgr } } func gvkToTypeMeta(gvk schema.GroupVersionKind) metav1.TypeMeta { v, k := gvk.ToAPIVersionAndKind() return metav1.TypeMeta{ APIVersion: v, Kind: k, } } func (r *BannerReconciler) genForemanSO(obj client.Object, b *bannerAPI.Banner) *syncedobjectApi.SyncedObject { data, _ := json.Marshal(obj) data64 := base64.StdEncoding.EncodeToString(data) return &syncedobjectApi.SyncedObject{ TypeMeta: metav1.TypeMeta{ APIVersion: syncedobjectApi.GroupVersion.String(), Kind: "SyncedObject", }, ObjectMeta: metav1.ObjectMeta{ Name: obj.GetName(), Namespace: b.Name, OwnerReferences: r.ownerRef(b), }, Spec: syncedobjectApi.SyncedObjectSpec{ Banner: r.ForemanProjectID, Cluster: "foreman0", Object: data64, }, } } func (r *BannerReconciler) createGARRepo(b *bannerAPI.Banner) *registryAPI.ArtifactRegistryRepository { desc := "Warehouse registry for K8s configuration packages" return ®istryAPI.ArtifactRegistryRepository{ ObjectMeta: metav1.ObjectMeta{ Name: "warehouse", Namespace: b.Name, Annotations: map[string]string{ kccmeta.DeletionPolicyAnnotation: kccmeta.DeletionPolicyAbandon, }, OwnerReferences: r.ownerRef(b), }, TypeMeta: gvkToTypeMeta(registryAPI.ArtifactRegistryRepositoryGVK), Spec: registryAPI.ArtifactRegistryRepositorySpec{ Description: &desc, Format: "DOCKER", Location: r.GCPRegion, }, } } func (r *BannerReconciler) generateShipments(b *bannerAPI.Banner) ([]client.Object, error) { capabilities := generator.InfraCapabilities params := generator.BannerRenderParams{ ClusterRenderParams: generator.ClusterRenderParams{ ClusterType: string(cluster.GKE), UUID: b.Name, Region: r.GCPRegion, Zone: r.GCPZone, ForemanGCPProjectID: r.ForemanProjectID, GCPForemanProjectNumber: r.GCPForemanProjectNumber, BannerID: b.Name, GCPProjectID: b.Spec.GCP.ProjectID, BSLEUID: b.Spec.BSL.EnterpriseUnit.ID, BSLEdgeEnvPrefix: r.BSLConfig.OrganizationPrefix, BSLEndpoint: r.BSLConfig.Endpoint, BSLRootOrg: r.BSLConfig.Root, Domain: r.Domain, DatasyncDNSName: r.DatasyncDNSName, DatasyncDNSZone: r.DatasyncDNSZone, DatabaseName: r.DatabaseName, EdgeSecOptInCompliance: r.EdgeSecOptInCompliance, EdgeSecMaxLeasePeriod: r.EdgeSecMaxLeasePeriod, EdgeSecMaxValidityPeriod: r.EdgeSecMaxValidityPeriod, }, PlatformInfraGCPProjectID: r.PlatInfraProjectID, } shipmentOpts := &generator.ShipmentOpts{ Prune: true, Force: true, Pallets: []whv1.BaseArtifact{{Name: "couchdb-bannerinfra", Tag: "latest"}}, Repository: generator.GenerateShipmentRepo(r.GCPRegion, r.ForemanProjectID), Capabilities: capabilities, Interval: &metav1.Duration{Duration: 120 * time.Second}, RetryInterval: &metav1.Duration{Duration: 20 * time.Second}, Timeout: &metav1.Duration{Duration: 90 * time.Second}, } shipmentOpts.AddBannerRenderParams(params) shipment, err := shipmentOpts.BuildShipment(true, false) if err != nil { return nil, fmt.Errorf("unable to build couchdb-masters pallet: %w", err) } return []client.Object{shipment}, nil }