package projectinit import ( "context" "encoding/base64" "encoding/json" "fmt" "time" ar "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/artifactregistry/v1beta1" compute "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/compute/v1beta1" iam "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/iam/v1beta1" "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" kcc "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/k8s/v1alpha1" resourcemgr "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/resourcemanager/v1beta1" secretmgr "github.com/GoogleCloudPlatform/k8s-config-connector/pkg/clients/generated/apis/secretmanager/v1beta1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "edge-infra.dev/pkg/k8s/konfigkonnector/apis/meta" "edge-infra.dev/pkg/k8s/runtime/inventory" "edge-infra.dev/pkg/k8s/runtime/sap" unstructuredutil "edge-infra.dev/pkg/k8s/unstructured" iamutil "edge-infra.dev/pkg/lib/gcp/iam" "edge-infra.dev/pkg/lib/gcp/iam/roles" "edge-infra.dev/pkg/lib/ncr/gcp/security" ) // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computefirewalls;computerouternats;computerouters;computesslpolicies,verbs=create;get;list;update;patch;watch;delete // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computefirewalls/status;computerouternats/status;computerouters/status;computesslpolicies/status,verbs=get // +kubebuilder:rbac:groups="resourcemanager.cnrm.cloud.google.com",resources=projects,verbs=get;list;watch // +kubebuilder:rbac:groups="resourcemanager.cnrm.cloud.google.com",resources=projects/status,verbs=get;watch // +kubebuilder:rbac:groups="secretmanager.cnrm.cloud.google.com",resources=secretmanagersecrets;secretmanagersecretversions,verbs=create;get;list;update;patch;watch;delete // +kubebuilder:rbac:groups="secretmanager.cnrm.cloud.google.com",resources=secretmanagersecrets/status;secretmanagersecretversions/status,verbs=get;list;watch // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamserviceaccounts;iampolicymembers;iamserviceaccountkeys,verbs=create;get;list;update;patch;watch;delete // +kubebuilder:rbac:groups="iam.cnrm.cloud.google.com",resources=iamserviceaccounts/status;iampolicymembers/status;iamserviceaccountkeys/status,verbs=get;watch // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computenetworks,verbs=create;get;list;update;patch;watch;delete // +kubebuilder:rbac:groups="compute.cnrm.cloud.google.com",resources=computenetworks/status,verbs=get;watch // +kubebuilder:rbac:groups="",resources=configmaps;secrets,verbs=create;get;list;update;watch;patch;delete // TODO: Update name once Edge vs Foundation is settled const ( DockerPullSA = "docker-pull-sa" PltfDockerPullCfgSAKey = "pltf-pull-cfg-sa-key" K8sPltfDockerPullCfg = "platform-docker-pull-config" PltfDockerPullCfg = "platform-docker-pull-cfg" DefaultRouterName = "default-router" DefaultNATGatewayName = "default-nat-gateway" ) var ( defaultNetworkType = "REGIONAL" defaultNetworkName = "default" ) // Reconciler reconciles Project objects to in order to // create and configure required infrastructure in GCP, e.g.: // // - Firewall configurations type Reconciler struct { client.Client ResourceManager *sap.ResourceManager FirewallConfig Firewall ArtifactRegistries []ArtifactRegistry // Inventories is the ConfigMap backed-storage listing objects that this // controller creates, to be used for pruning. Inventories *inventory.Storage // Name is the controller's name, used to consistently represent the controller // in various cluster interactions, e.g., as field manager Name string // Namespace is the namespace this controller is deployed to Namespace string GCPRegion string retryInterval time.Duration } // SetupWithManager sets up Reconciler with the manager, such that the Reconciler // will be triggered for updates to // If configuration hasn't been explicitly set on the Reconciler struct // (e.g., FirewallConfig, controller name), defaults will be set by this function. func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { // instantiate server-side apply client var err error r.ResourceManager, err = sap.NewResourceManagerFromConfig( mgr.GetConfig(), client.Options{}, sap.Owner{Field: r.Name}, ) if err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&resourcemgr.Project{}). Owns(&compute.ComputeFirewall{}). Owns(&compute.ComputeSSLPolicy{}). Owns(&iam.IAMPolicyMember{}). Owns(&iam.IAMServiceAccountKey{}). Owns(&v1.Secret{}). Owns(&secretmgr.SecretManagerSecret{}). Owns(&secretmgr.SecretManagerSecretVersion{}). Complete(r) } func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) // instantiate inventories if we have not done so already if r.Inventories == nil { var err error r.Inventories, err = inventory.NewStorage(ctx, r.Client, r.Name, r.Namespace) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to instantiate inventory: %w", err) } } p := resourcemgr.Project{} if err := r.Get(ctx, req.NamespacedName, &p); err != nil { // do nothing if object isnt found return ctrl.Result{}, client.IgnoreNotFound(err) } if ready, reason := meta.IsReady(p.Status.Conditions); !ready { log.Info("waiting for project to become ready", "reason", reason) return ctrl.Result{Requeue: true, RequeueAfter: r.retryInterval}, nil } if p.Spec.ResourceID == nil { log.Info("waiting for spec.resourceID to be set") return ctrl.Result{Requeue: true, RequeueAfter: r.retryInterval}, nil } if p.Status.Number == nil { log.Info("waiting for status.projectNumber to be set") return ctrl.Result{Requeue: true, RequeueAfter: r.retryInterval}, nil } if err := r.reconcile(ctx, req, p); err != nil { log.Error(err, "failed to reconcile") return ctrl.Result{}, err } return ctrl.Result{}, nil } // for the reconciled gcp project: // - disables the default rules allowing SSH and RDP // - creates new firewall rules that only allow zscaler traffic for SSH and RDP func (r *Reconciler) reconcile(ctx context.Context, req ctrl.Request, project resourcemgr.Project) error { mgr := r.ResourceManager log := ctrl.LoggerFrom(ctx).WithName("reconcile") computeSAMember := iamutil.ComputeEngineSvcAccountMember(*project.Status.Number) dockerPullSAMember := iamutil.StandardSvcAccountMember(DockerPullSA, *project.Spec.ResourceID) var objs []client.Object objs = append(objs, CreateDefaultNetwork(project)) objs = append(objs, GenerateFirewallRules(project, r.FirewallConfig)...) objs = append(objs, GenerateSSLPolicies(project)...) objs = append(objs, GenerateIAMServiceAccount(project)...) objs = append(objs, GenerateArtifactRegistryPermissions(project, computeSAMember, r.ArtifactRegistries)...) objs = append(objs, GenerateArtifactRegistryPermissions(project, dockerPullSAMember, r.ArtifactRegistries)...) objs = append(objs, GenerateIAMServiceAccountKey(project)...) objs = append(objs, GenerateDefaultRouter(project, r.GCPRegion)...) objs = append(objs, GenerateDefaultNATGateway(project, r.GCPRegion)...) var unstructuredObjs []*unstructured.Unstructured for _, obj := range objs { uobj, err := unstructuredutil.ToUnstructured(obj) if err != nil { return fmt.Errorf("failed to convert %s/%s/%s to unstructured: %w", obj.GetObjectKind(), obj.GetNamespace(), obj.GetName(), err) } unstructuredObjs = append(unstructuredObjs, uobj) } changeSet, err := mgr.ApplyAll(ctx, unstructuredObjs, sap.ApplyOptions{Force: true}) if err != nil { return fmt.Errorf("failed to apply resources: %w", err) } log.Info("resources applied", "changeset", changeSet) // retrieve newly created IAMServiceAccountKey and confirm its readiness svcAcc := iam.IAMServiceAccountKey{} objKey := client.ObjectKey{Namespace: project.Namespace, Name: NameWithProjectPrefix(PltfDockerPullCfgSAKey, project.Name)} if err := r.Get(ctx, objKey, &svcAcc); err != nil { return fmt.Errorf("failed to retrieve iamSAKey: %w", err) } if ready, reason := meta.IsReady(svcAcc.Status.Conditions); !ready { return fmt.Errorf("waiting for IAMSAKey to become ready: %s", reason) } // Create pull secret object that depends on the IAMServiceAccountKey pullSecretObj, err := GenerateDockerPullSecret(project, svcAcc, r.ArtifactRegistries) if err != nil { return err } // Apply pull secret object pullSecretUnstructuredObj, err := unstructuredutil.ToUnstructured(pullSecretObj) if err != nil { return fmt.Errorf("failed to convert %s/%s/%s to unstructured: %w", pullSecretObj.GetObjectKind(), pullSecretObj.GetNamespace(), pullSecretObj.GetName(), err) } changeSetEntry, err := mgr.Apply(ctx, pullSecretUnstructuredObj, sap.ApplyOptions{Force: true}) if err != nil { return fmt.Errorf("failed to apply pull secret resources: %w", err) } log.Info("pull secret resources applied", "changeset", changeSetEntry) // Add pull secret object to changeSet changeSet.Add(*changeSetEntry) // Create SecretManager objects that depend on the newly created pull secret secret := v1.Secret{} objKey = client.ObjectKey{Namespace: project.Namespace, Name: NameWithProjectPrefix(K8sPltfDockerPullCfg, project.Name)} if err := r.Get(ctx, objKey, &secret); err != nil { return fmt.Errorf("failed to retrieve v1.Secret: %w", err) } secretManagerObjs := GenerateSecretManagerObjects(project, secret) var unstructuredSecretManagerObjs []*unstructured.Unstructured for _, obj := range secretManagerObjs { uobj, err := unstructuredutil.ToUnstructured(obj) if err != nil { return fmt.Errorf("failed to convert %s/%s/%s to unstructured: %w", obj.GetObjectKind(), obj.GetNamespace(), obj.GetName(), err) } unstructuredSecretManagerObjs = append(unstructuredSecretManagerObjs, uobj) } // Apply Secret Manager unstructured objects secretManagerChangeSet, err := mgr.ApplyAll(ctx, unstructuredSecretManagerObjs, sap.ApplyOptions{Force: true}) if err != nil { return fmt.Errorf("failed to apply secretManager resources: %w", err) } log.Info("secretManager resources applied", "changeset", secretManagerChangeSet) newInventory := inventory.New(inventory.FromSapChangeSet(changeSet)) newInventory.AddSapObjects(secretManagerChangeSet) if inv := r.Inventories.Get(req.NamespacedName); inv != nil { diff, err := inv.Diff(newInventory) if err != nil { return err } if len(diff) > 0 { deleted, err := mgr.DeleteAll(ctx, diff, sap.DefaultDeleteOptions()) if err != nil { return fmt.Errorf("failed to prune resources: %w", err) } log.Info("pruned", "changeset", deleted) } } log.Info("updating inventory") return r.Inventories.Set(ctx, req.NamespacedName, newInventory) } func CreateDefaultNetwork(project resourcemgr.Project) *compute.ComputeNetwork { shouldCreateSubnets := true return &compute.ComputeNetwork{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-default", *project.Spec.ResourceID), Namespace: project.Namespace, Annotations: map[string]string{ meta.DeletionPolicyAnnotation: meta.DeletionPolicyAbandon, }, OwnerReferences: ownerRef(&project), }, TypeMeta: metav1.TypeMeta{ Kind: compute.ComputeNetworkGVK.Kind, APIVersion: compute.SchemeGroupVersion.String(), }, Spec: compute.ComputeNetworkSpec{ RoutingMode: &defaultNetworkType, AutoCreateSubnetworks: &shouldCreateSubnets, ResourceID: &defaultNetworkName, }, } } func GenerateFirewallRules(project resourcemgr.Project, cfg Firewall) []client.Object { projectID := *project.Spec.ResourceID disabled := true defaultAllowSSH := "default-allow-ssh" defaultAllowRDP := "default-allow-rdp" return []client.Object{ // deny all SSH/RDP by default computeFirewall( objMeta("deny-ssh-rdp", project.Namespace, project), compute.ComputeFirewallSpec{ Deny: []compute.FirewallDeny{ {Protocol: "tcp", Ports: []string{"22", "3389"}}, }, Priority: &cfg.DenyPriority, NetworkRef: NetworkRef(projectID), }, ), // allow SSH/RDP from Zscaler IPs computeFirewall( objMeta("allow-zscaler-ssh-rdp", project.Namespace, project), compute.ComputeFirewallSpec{ Allow: []compute.FirewallAllow{ {Protocol: "tcp", Ports: []string{"22", "3398"}}, }, SourceRanges: security.ZscalerIPs(), Priority: &cfg.ZScalerAllowPriority, NetworkRef: NetworkRef(projectID), }, ), // allow SSH from IAP forwarding computeFirewall( objMeta("allow-iap-ssh", project.Namespace, project), compute.ComputeFirewallSpec{ Allow: []compute.FirewallAllow{ {Protocol: "tcp", Ports: []string{"22"}}, }, SourceRanges: security.GoogleIAPIPs(), Priority: &cfg.ZScalerAllowPriority, NetworkRef: NetworkRef(projectID), }, ), // allow proxy port from IAP forwarding to k8s control plane computeFirewall( objMeta("allow-iap-k8s-proxy", project.Namespace, project), compute.ComputeFirewallSpec{ Allow: []compute.FirewallAllow{ {Protocol: "tcp", Ports: []string{"30443"}}, }, SourceRanges: security.GoogleIAPIPs(), Priority: &cfg.ZScalerAllowPriority, NetworkRef: NetworkRef(projectID), }, ), // disable default SSH/RDP rules, created for all GCP projects by default computeFirewall( objMeta(defaultAllowSSH, project.Namespace, project), compute.ComputeFirewallSpec{ Disabled: &disabled, NetworkRef: NetworkRef(projectID), Allow: []compute.FirewallAllow{{Ports: []string{"22"}, Protocol: "tcp"}}, SourceRanges: []string{"0.0.0.0/0"}, ResourceID: &defaultAllowSSH, }, ), computeFirewall( objMeta(defaultAllowRDP, project.Namespace, project), compute.ComputeFirewallSpec{ Disabled: &disabled, NetworkRef: NetworkRef(projectID), Allow: []compute.FirewallAllow{{Ports: []string{"3389"}, Protocol: "tcp"}}, SourceRanges: []string{"0.0.0.0/0"}, ResourceID: &defaultAllowRDP, }, ), } } func GenerateSSLPolicies(project resourcemgr.Project) []client.Object { profile := "MODERN" return []client.Object{ computeSSLPolicy(objMeta(security.DefaultSSLPolicyName, project.Namespace, project), compute.ComputeSSLPolicySpec{ MinTlsVersion: &security.MinTLSVersion, Profile: &profile, ResourceID: &security.DefaultSSLPolicyName, }), } } func GenerateIAMServiceAccount(project resourcemgr.Project) []client.Object { displayName := NameWithProjectPrefix(DockerPullSA, project.Name) return []client.Object{&iam.IAMServiceAccount{ TypeMeta: metav1.TypeMeta{ Kind: iam.IAMServiceAccountGVK.Kind, APIVersion: iam.SchemeGroupVersion.String(), }, // Not using objMeta here since it returns a relatively long name. We don't want that here since // the SA's resourceID will default to ObjectMeta.Name, and we need to stay between 6-30 chars. // // See https://cloud.google.com/iam/docs/service-accounts-create#creating ObjectMeta: metav1.ObjectMeta{ Name: DockerPullSA, Namespace: project.Namespace, Annotations: map[string]string{meta.ProjectAnnotation: *project.Spec.ResourceID}, OwnerReferences: ownerRef(&project), }, Spec: iam.IAMServiceAccountSpec{ DisplayName: &displayName, }, }} } func GenerateArtifactRegistryPermissions(project resourcemgr.Project, member string, cfgs []ArtifactRegistry) []client.Object { var policyMembers []client.Object for _, cfg := range cfgs { var policyName string if member == iamutil.ComputeEngineSvcAccountMember(*project.Status.Number) { policyName = cfg.ArtifactRegistryBindingNameCompute() } else { policyName = cfg.ArtifactRegistryBindingName() } policyMember := &iam.IAMPolicyMember{ TypeMeta: metav1.TypeMeta{ Kind: iam.IAMPolicyMemberGVK.Kind, APIVersion: iam.SchemeGroupVersion.String(), }, // dont use objMeta here because technically the project annotation is // not valid for policy member objects ObjectMeta: metav1.ObjectMeta{ Name: policyName, Namespace: project.Namespace, OwnerReferences: ownerRef(&project), }, Spec: iam.IAMPolicyMemberSpec{ Member: &member, ResourceRef: kcc.IAMResourceRef{ APIVersion: ar.SchemeGroupVersion.String(), Kind: ar.ArtifactRegistryRepositoryGVK.Kind, External: cfg.ExternalRef(), }, Role: roles.ArtifactoryReader, }, } policyMembers = append(policyMembers, policyMember) } return policyMembers } func GenerateIAMServiceAccountKey(project resourcemgr.Project) []client.Object { svcAcc := iamutil.SvcAccountEmail(DockerPullSA, *project.Spec.ResourceID) return []client.Object{ &iam.IAMServiceAccountKey{ TypeMeta: metav1.TypeMeta{ Kind: iam.IAMServiceAccountKeyGVK.Kind, APIVersion: iam.SchemeGroupVersion.String(), }, ObjectMeta: objMeta(PltfDockerPullCfgSAKey, project.Namespace, project), Spec: iam.IAMServiceAccountKeySpec{ ServiceAccountRef: kcc.ResourceRef{ External: svcAcc, }, }, }, } } func GenerateDefaultRouter(project resourcemgr.Project, gcpregion string) []client.Object { projectID := *project.Spec.ResourceID return []client.Object{ &compute.ComputeRouter{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultRouterName, Namespace: project.Namespace, Annotations: map[string]string{ meta.DeletionPolicyAnnotation: meta.DeletionPolicyAbandon, meta.ProjectAnnotation: projectID, }, OwnerReferences: ownerRef(&project), }, TypeMeta: metav1.TypeMeta{ Kind: compute.ComputeRouterGVK.Kind, APIVersion: compute.SchemeGroupVersion.String(), }, Spec: compute.ComputeRouterSpec{ NetworkRef: NetworkRef(projectID), Region: gcpregion, }, }, } } func GenerateDefaultNATGateway(project resourcemgr.Project, gcpregion string) []client.Object { projectID := *project.Spec.ResourceID return []client.Object{ &compute.ComputeRouterNAT{ ObjectMeta: metav1.ObjectMeta{ Name: DefaultNATGatewayName, Namespace: project.Namespace, Annotations: map[string]string{ meta.DeletionPolicyAnnotation: meta.DeletionPolicyAbandon, meta.ProjectAnnotation: projectID, }, OwnerReferences: ownerRef(&project), }, TypeMeta: metav1.TypeMeta{ Kind: compute.ComputeRouterNATGVK.Kind, APIVersion: compute.SchemeGroupVersion.String(), }, Spec: compute.ComputeRouterNATSpec{ NatIpAllocateOption: "AUTO_ONLY", Region: gcpregion, RouterRef: v1alpha1.ResourceRef{ Name: DefaultRouterName, Namespace: project.Namespace, }, SourceSubnetworkIpRangesToNat: "ALL_SUBNETWORKS_ALL_IP_RANGES", }, }, } } func GenerateDockerPullSecret(project resourcemgr.Project, svcAcc iam.IAMServiceAccountKey, cfgs []ArtifactRegistry) (client.Object, error) { svcAccKey := svcAcc.Status.PrivateKey jsonKey := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("_json_key_base64:%s", *svcAccKey))) authMap := make(map[string]interface{}) authMap["auth"] = jsonKey secrets := make(map[string]interface{}) for _, cfg := range cfgs { registry := fmt.Sprintf("%s-docker.pkg.dev", cfg.Location) if _, exists := secrets[registry]; exists { continue } secrets[registry] = authMap } auths := make(map[string]interface{}) auths["auths"] = secrets secretJSON, err := json.Marshal(auths) if err != nil { return nil, fmt.Errorf("failed to marshal secret into JSON: %w", err) } return &v1.Secret{ TypeMeta: metav1.TypeMeta{ Kind: "Secret", APIVersion: v1.SchemeGroupVersion.String(), }, ObjectMeta: objMeta(K8sPltfDockerPullCfg, project.Namespace, project), Data: map[string][]byte{ ".dockerconfigjson": secretJSON, }, Type: "kubernetes.io/dockerconfigjson", }, nil } func GenerateSecretManagerObjects(project resourcemgr.Project, secret v1.Secret) []client.Object { automatic := true secretData := secret.Data[".dockerconfigjson"] data := string(secretData) resource := PltfDockerPullCfg return []client.Object{ &secretmgr.SecretManagerSecret{ TypeMeta: metav1.TypeMeta{ Kind: secretmgr.SecretManagerSecretGVK.Kind, APIVersion: secretmgr.SchemeGroupVersion.String(), }, ObjectMeta: objMeta(PltfDockerPullCfg, project.Namespace, project), Spec: secretmgr.SecretManagerSecretSpec{ Replication: secretmgr.SecretReplication{ Automatic: &automatic, }, ResourceID: &resource, }, }, &secretmgr.SecretManagerSecretVersion{ TypeMeta: metav1.TypeMeta{ Kind: secretmgr.SecretManagerSecretVersionGVK.Kind, APIVersion: secretmgr.SchemeGroupVersion.String(), }, ObjectMeta: objMeta(PltfDockerPullCfg, project.Namespace, project), Spec: secretmgr.SecretManagerSecretVersionSpec{ SecretRef: kcc.ResourceRef{ Name: NameWithProjectPrefix(PltfDockerPullCfg, project.Name), }, SecretData: secretmgr.SecretversionSecretData{ // TODO: Replace Value with ValueFrom once Key field is available in ResourceRef Value: &data, }, }, }, } } func ownerRef(p *resourcemgr.Project) []metav1.OwnerReference { return []metav1.OwnerReference{ *metav1.NewControllerRef( p, resourcemgr.ProjectGVK, ), } }