...

Source file src/cloud.google.com/go/storage/bucket.go

Documentation: cloud.google.com/go/storage

     1  // Copyright 2014 Google LLC
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package storage
    16  
    17  import (
    18  	"context"
    19  	"encoding/base64"
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"reflect"
    24  	"strings"
    25  	"time"
    26  
    27  	"cloud.google.com/go/compute/metadata"
    28  	"cloud.google.com/go/internal/optional"
    29  	"cloud.google.com/go/internal/trace"
    30  	"cloud.google.com/go/storage/internal/apiv2/storagepb"
    31  	"google.golang.org/api/googleapi"
    32  	"google.golang.org/api/iamcredentials/v1"
    33  	"google.golang.org/api/iterator"
    34  	"google.golang.org/api/option"
    35  	raw "google.golang.org/api/storage/v1"
    36  	dpb "google.golang.org/genproto/googleapis/type/date"
    37  	"google.golang.org/protobuf/proto"
    38  	"google.golang.org/protobuf/types/known/durationpb"
    39  )
    40  
    41  // BucketHandle provides operations on a Google Cloud Storage bucket.
    42  // Use Client.Bucket to get a handle.
    43  type BucketHandle struct {
    44  	c                     *Client
    45  	name                  string
    46  	acl                   ACLHandle
    47  	defaultObjectACL      ACLHandle
    48  	conds                 *BucketConditions
    49  	userProject           string // project for Requester Pays buckets
    50  	retry                 *retryConfig
    51  	enableObjectRetention *bool
    52  }
    53  
    54  // Bucket returns a BucketHandle, which provides operations on the named bucket.
    55  // This call does not perform any network operations.
    56  //
    57  // The supplied name must contain only lowercase letters, numbers, dashes,
    58  // underscores, and dots. The full specification for valid bucket names can be
    59  // found at:
    60  //
    61  //	https://cloud.google.com/storage/docs/bucket-naming
    62  func (c *Client) Bucket(name string) *BucketHandle {
    63  	retry := c.retry.clone()
    64  	return &BucketHandle{
    65  		c:    c,
    66  		name: name,
    67  		acl: ACLHandle{
    68  			c:      c,
    69  			bucket: name,
    70  			retry:  retry,
    71  		},
    72  		defaultObjectACL: ACLHandle{
    73  			c:         c,
    74  			bucket:    name,
    75  			isDefault: true,
    76  			retry:     retry,
    77  		},
    78  		retry: retry,
    79  	}
    80  }
    81  
    82  // Create creates the Bucket in the project.
    83  // If attrs is nil the API defaults will be used.
    84  func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
    85  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
    86  	defer func() { trace.EndSpan(ctx, err) }()
    87  
    88  	o := makeStorageOpts(true, b.retry, b.userProject)
    89  
    90  	if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil {
    91  		return err
    92  	}
    93  	return nil
    94  }
    95  
    96  // Delete deletes the Bucket.
    97  func (b *BucketHandle) Delete(ctx context.Context) (err error) {
    98  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
    99  	defer func() { trace.EndSpan(ctx, err) }()
   100  
   101  	o := makeStorageOpts(true, b.retry, b.userProject)
   102  	return b.c.tc.DeleteBucket(ctx, b.name, b.conds, o...)
   103  }
   104  
   105  // ACL returns an ACLHandle, which provides access to the bucket's access control list.
   106  // This controls who can list, create or overwrite the objects in a bucket.
   107  // This call does not perform any network operations.
   108  func (b *BucketHandle) ACL() *ACLHandle {
   109  	return &b.acl
   110  }
   111  
   112  // DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
   113  // These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
   114  // This call does not perform any network operations.
   115  func (b *BucketHandle) DefaultObjectACL() *ACLHandle {
   116  	return &b.defaultObjectACL
   117  }
   118  
   119  // Object returns an ObjectHandle, which provides operations on the named object.
   120  // This call does not perform any network operations such as fetching the object or verifying its existence.
   121  // Use methods on ObjectHandle to perform network operations.
   122  //
   123  // name must consist entirely of valid UTF-8-encoded runes. The full specification
   124  // for valid object names can be found at:
   125  //
   126  //	https://cloud.google.com/storage/docs/naming-objects
   127  func (b *BucketHandle) Object(name string) *ObjectHandle {
   128  	retry := b.retry.clone()
   129  	return &ObjectHandle{
   130  		c:      b.c,
   131  		bucket: b.name,
   132  		object: name,
   133  		acl: ACLHandle{
   134  			c:           b.c,
   135  			bucket:      b.name,
   136  			object:      name,
   137  			userProject: b.userProject,
   138  			retry:       retry,
   139  		},
   140  		gen:         -1,
   141  		userProject: b.userProject,
   142  		retry:       retry,
   143  	}
   144  }
   145  
   146  // Attrs returns the metadata for the bucket.
   147  func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
   148  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
   149  	defer func() { trace.EndSpan(ctx, err) }()
   150  
   151  	o := makeStorageOpts(true, b.retry, b.userProject)
   152  	return b.c.tc.GetBucket(ctx, b.name, b.conds, o...)
   153  }
   154  
   155  // Update updates a bucket's attributes.
   156  func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
   157  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Update")
   158  	defer func() { trace.EndSpan(ctx, err) }()
   159  
   160  	isIdempotent := b.conds != nil && b.conds.MetagenerationMatch != 0
   161  	o := makeStorageOpts(isIdempotent, b.retry, b.userProject)
   162  	return b.c.tc.UpdateBucket(ctx, b.name, &uattrs, b.conds, o...)
   163  }
   164  
   165  // SignedURL returns a URL for the specified object. Signed URLs allow anyone
   166  // access to a restricted resource for a limited time without needing a Google
   167  // account or signing in.
   168  // For more information about signed URLs, see "[Overview of access control]."
   169  //
   170  // This method requires the Method and Expires fields in the specified
   171  // SignedURLOptions to be non-nil. You may need to set the GoogleAccessID and
   172  // PrivateKey fields in some cases. Read more on the [automatic detection of credentials]
   173  // for this method.
   174  //
   175  // [Overview of access control]: https://cloud.google.com/storage/docs/accesscontrol#signed_urls_query_string_authentication
   176  // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
   177  func (b *BucketHandle) SignedURL(object string, opts *SignedURLOptions) (string, error) {
   178  	// Make a copy of opts so we don't modify the pointer parameter.
   179  	newopts := opts.clone()
   180  
   181  	if newopts.Hostname == "" {
   182  		// Extract the correct host from the readhost set on the client
   183  		newopts.Hostname = b.c.xmlHost
   184  	}
   185  
   186  	if opts.GoogleAccessID != "" && (opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
   187  		return SignedURL(b.name, object, newopts)
   188  	}
   189  
   190  	if newopts.GoogleAccessID == "" {
   191  		id, err := b.detectDefaultGoogleAccessID()
   192  		if err != nil {
   193  			return "", err
   194  		}
   195  		newopts.GoogleAccessID = id
   196  	}
   197  	if newopts.SignBytes == nil && len(newopts.PrivateKey) == 0 {
   198  		if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
   199  			var sa struct {
   200  				PrivateKey string `json:"private_key"`
   201  			}
   202  			err := json.Unmarshal(b.c.creds.JSON, &sa)
   203  			if err == nil && sa.PrivateKey != "" {
   204  				newopts.PrivateKey = []byte(sa.PrivateKey)
   205  			}
   206  		}
   207  
   208  		// Don't error out if we can't unmarshal the private key from the client,
   209  		// fallback to the default sign function for the service account.
   210  		if len(newopts.PrivateKey) == 0 {
   211  			newopts.SignBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID)
   212  		}
   213  	}
   214  	return SignedURL(b.name, object, newopts)
   215  }
   216  
   217  // GenerateSignedPostPolicyV4 generates a PostPolicyV4 value from bucket, object and opts.
   218  // The generated URL and fields will then allow an unauthenticated client to perform multipart uploads.
   219  //
   220  // This method requires the Expires field in the specified PostPolicyV4Options
   221  // to be non-nil. You may need to set the GoogleAccessID and PrivateKey fields
   222  // in some cases. Read more on the [automatic detection of credentials] for this method.
   223  //
   224  // [automatic detection of credentials]: https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing
   225  func (b *BucketHandle) GenerateSignedPostPolicyV4(object string, opts *PostPolicyV4Options) (*PostPolicyV4, error) {
   226  	// Make a copy of opts so we don't modify the pointer parameter.
   227  	newopts := opts.clone()
   228  
   229  	if newopts.Hostname == "" {
   230  		// Extract the correct host from the readhost set on the client
   231  		newopts.Hostname = b.c.xmlHost
   232  	}
   233  
   234  	if opts.GoogleAccessID != "" && (opts.SignRawBytes != nil || opts.SignBytes != nil || len(opts.PrivateKey) > 0) {
   235  		return GenerateSignedPostPolicyV4(b.name, object, newopts)
   236  	}
   237  
   238  	if newopts.GoogleAccessID == "" {
   239  		id, err := b.detectDefaultGoogleAccessID()
   240  		if err != nil {
   241  			return nil, err
   242  		}
   243  		newopts.GoogleAccessID = id
   244  	}
   245  	if newopts.SignBytes == nil && newopts.SignRawBytes == nil && len(newopts.PrivateKey) == 0 {
   246  		if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
   247  			var sa struct {
   248  				PrivateKey string `json:"private_key"`
   249  			}
   250  			err := json.Unmarshal(b.c.creds.JSON, &sa)
   251  			if err == nil && sa.PrivateKey != "" {
   252  				newopts.PrivateKey = []byte(sa.PrivateKey)
   253  			}
   254  		}
   255  
   256  		// Don't error out if we can't unmarshal the private key from the client,
   257  		// fallback to the default sign function for the service account.
   258  		if len(newopts.PrivateKey) == 0 {
   259  			newopts.SignRawBytes = b.defaultSignBytesFunc(newopts.GoogleAccessID)
   260  		}
   261  	}
   262  	return GenerateSignedPostPolicyV4(b.name, object, newopts)
   263  }
   264  
   265  func (b *BucketHandle) detectDefaultGoogleAccessID() (string, error) {
   266  	returnErr := errors.New("no credentials found on client and not on GCE (Google Compute Engine)")
   267  
   268  	if b.c.creds != nil && len(b.c.creds.JSON) > 0 {
   269  		var sa struct {
   270  			ClientEmail        string `json:"client_email"`
   271  			SAImpersonationURL string `json:"service_account_impersonation_url"`
   272  			CredType           string `json:"type"`
   273  		}
   274  
   275  		err := json.Unmarshal(b.c.creds.JSON, &sa)
   276  		if err != nil {
   277  			returnErr = err
   278  		} else {
   279  			switch sa.CredType {
   280  			case "impersonated_service_account", "external_account":
   281  				start, end := strings.LastIndex(sa.SAImpersonationURL, "/"), strings.LastIndex(sa.SAImpersonationURL, ":")
   282  
   283  				if end <= start {
   284  					returnErr = errors.New("error parsing external or impersonated service account credentials")
   285  				} else {
   286  					return sa.SAImpersonationURL[start+1 : end], nil
   287  				}
   288  			case "service_account":
   289  				if sa.ClientEmail != "" {
   290  					return sa.ClientEmail, nil
   291  				}
   292  				returnErr = errors.New("empty service account client email")
   293  			default:
   294  				returnErr = errors.New("unable to parse credentials; only service_account, external_account and impersonated_service_account credentials are supported")
   295  			}
   296  		}
   297  	}
   298  
   299  	// Don't error out if we can't unmarshal, fallback to GCE check.
   300  	if metadata.OnGCE() {
   301  		email, err := metadata.Email("default")
   302  		if err == nil && email != "" {
   303  			return email, nil
   304  		} else if err != nil {
   305  			returnErr = err
   306  		} else {
   307  			returnErr = errors.New("empty email from GCE metadata service")
   308  		}
   309  
   310  	}
   311  	return "", fmt.Errorf("storage: unable to detect default GoogleAccessID: %w. Please provide the GoogleAccessID or use a supported means for autodetecting it (see https://pkg.go.dev/cloud.google.com/go/storage#hdr-Credential_requirements_for_signing)", returnErr)
   312  }
   313  
   314  func (b *BucketHandle) defaultSignBytesFunc(email string) func([]byte) ([]byte, error) {
   315  	return func(in []byte) ([]byte, error) {
   316  		ctx := context.Background()
   317  
   318  		// It's ok to recreate this service per call since we pass in the http client,
   319  		// circumventing the cost of recreating the auth/transport layer
   320  		svc, err := iamcredentials.NewService(ctx, option.WithHTTPClient(b.c.hc))
   321  		if err != nil {
   322  			return nil, fmt.Errorf("unable to create iamcredentials client: %w", err)
   323  		}
   324  
   325  		resp, err := svc.Projects.ServiceAccounts.SignBlob(fmt.Sprintf("projects/-/serviceAccounts/%s", email), &iamcredentials.SignBlobRequest{
   326  			Payload: base64.StdEncoding.EncodeToString(in),
   327  		}).Do()
   328  		if err != nil {
   329  			return nil, fmt.Errorf("unable to sign bytes: %w", err)
   330  		}
   331  		out, err := base64.StdEncoding.DecodeString(resp.SignedBlob)
   332  		if err != nil {
   333  			return nil, fmt.Errorf("unable to base64 decode response: %w", err)
   334  		}
   335  		return out, nil
   336  	}
   337  }
   338  
   339  // BucketAttrs represents the metadata for a Google Cloud Storage bucket.
   340  // Read-only fields are ignored by BucketHandle.Create.
   341  type BucketAttrs struct {
   342  	// Name is the name of the bucket.
   343  	// This field is read-only.
   344  	Name string
   345  
   346  	// ACL is the list of access control rules on the bucket.
   347  	ACL []ACLRule
   348  
   349  	// BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of
   350  	// UniformBucketLevelAccess is recommended above the use of this field.
   351  	// Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to
   352  	// true, will enable UniformBucketLevelAccess.
   353  	BucketPolicyOnly BucketPolicyOnly
   354  
   355  	// UniformBucketLevelAccess configures access checks to use only bucket-level IAM
   356  	// policies and ignore any ACL rules for the bucket.
   357  	// See https://cloud.google.com/storage/docs/uniform-bucket-level-access
   358  	// for more information.
   359  	UniformBucketLevelAccess UniformBucketLevelAccess
   360  
   361  	// PublicAccessPrevention is the setting for the bucket's
   362  	// PublicAccessPrevention policy, which can be used to prevent public access
   363  	// of data in the bucket. See
   364  	// https://cloud.google.com/storage/docs/public-access-prevention for more
   365  	// information.
   366  	PublicAccessPrevention PublicAccessPrevention
   367  
   368  	// DefaultObjectACL is the list of access controls to
   369  	// apply to new objects when no object ACL is provided.
   370  	DefaultObjectACL []ACLRule
   371  
   372  	// DefaultEventBasedHold is the default value for event-based hold on
   373  	// newly created objects in this bucket. It defaults to false.
   374  	DefaultEventBasedHold bool
   375  
   376  	// If not empty, applies a predefined set of access controls. It should be set
   377  	// only when creating a bucket.
   378  	// It is always empty for BucketAttrs returned from the service.
   379  	// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
   380  	// for valid values.
   381  	PredefinedACL string
   382  
   383  	// If not empty, applies a predefined set of default object access controls.
   384  	// It should be set only when creating a bucket.
   385  	// It is always empty for BucketAttrs returned from the service.
   386  	// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
   387  	// for valid values.
   388  	PredefinedDefaultObjectACL string
   389  
   390  	// Location is the location of the bucket. It defaults to "US".
   391  	// If specifying a dual-region, CustomPlacementConfig should be set in conjunction.
   392  	Location string
   393  
   394  	// The bucket's custom placement configuration that holds a list of
   395  	// regional locations for custom dual regions.
   396  	CustomPlacementConfig *CustomPlacementConfig
   397  
   398  	// MetaGeneration is the metadata generation of the bucket.
   399  	// This field is read-only.
   400  	MetaGeneration int64
   401  
   402  	// StorageClass is the default storage class of the bucket. This defines
   403  	// how objects in the bucket are stored and determines the SLA
   404  	// and the cost of storage. Typical values are "STANDARD", "NEARLINE",
   405  	// "COLDLINE" and "ARCHIVE". Defaults to "STANDARD".
   406  	// See https://cloud.google.com/storage/docs/storage-classes for all
   407  	// valid values.
   408  	StorageClass string
   409  
   410  	// Created is the creation time of the bucket.
   411  	// This field is read-only.
   412  	Created time.Time
   413  
   414  	// VersioningEnabled reports whether this bucket has versioning enabled.
   415  	VersioningEnabled bool
   416  
   417  	// Labels are the bucket's labels.
   418  	Labels map[string]string
   419  
   420  	// RequesterPays reports whether the bucket is a Requester Pays bucket.
   421  	// Clients performing operations on Requester Pays buckets must provide
   422  	// a user project (see BucketHandle.UserProject), which will be billed
   423  	// for the operations.
   424  	RequesterPays bool
   425  
   426  	// Lifecycle is the lifecycle configuration for objects in the bucket.
   427  	Lifecycle Lifecycle
   428  
   429  	// Retention policy enforces a minimum retention time for all objects
   430  	// contained in the bucket. A RetentionPolicy of nil implies the bucket
   431  	// has no minimum data retention.
   432  	//
   433  	// This feature is in private alpha release. It is not currently available to
   434  	// most customers. It might be changed in backwards-incompatible ways and is not
   435  	// subject to any SLA or deprecation policy.
   436  	RetentionPolicy *RetentionPolicy
   437  
   438  	// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
   439  	CORS []CORS
   440  
   441  	// The encryption configuration used by default for newly inserted objects.
   442  	Encryption *BucketEncryption
   443  
   444  	// The logging configuration.
   445  	Logging *BucketLogging
   446  
   447  	// The website configuration.
   448  	Website *BucketWebsite
   449  
   450  	// Etag is the HTTP/1.1 Entity tag for the bucket.
   451  	// This field is read-only.
   452  	Etag string
   453  
   454  	// LocationType describes how data is stored and replicated.
   455  	// Typical values are "multi-region", "region" and "dual-region".
   456  	// This field is read-only.
   457  	LocationType string
   458  
   459  	// The project number of the project the bucket belongs to.
   460  	// This field is read-only.
   461  	ProjectNumber uint64
   462  
   463  	// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
   464  	// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
   465  	// See https://cloud.google.com/storage/docs/managing-turbo-replication for
   466  	// more information.
   467  	RPO RPO
   468  
   469  	// Autoclass holds the bucket's autoclass configuration. If enabled,
   470  	// allows for the automatic selection of the best storage class
   471  	// based on object access patterns.
   472  	Autoclass *Autoclass
   473  
   474  	// ObjectRetentionMode reports whether individual objects in the bucket can
   475  	// be configured with a retention policy. An empty value means that object
   476  	// retention is disabled.
   477  	// This field is read-only. Object retention can be enabled only by creating
   478  	// a bucket with SetObjectRetention set to true on the BucketHandle. It
   479  	// cannot be modified once the bucket is created.
   480  	// ObjectRetention cannot be configured or reported through the gRPC API.
   481  	ObjectRetentionMode string
   482  
   483  	// SoftDeletePolicy contains the bucket's soft delete policy, which defines
   484  	// the period of time that soft-deleted objects will be retained, and cannot
   485  	// be permanently deleted. By default, new buckets will be created with a
   486  	// 7 day retention duration. In order to fully disable soft delete, you need
   487  	// to set a policy with a RetentionDuration of 0.
   488  	SoftDeletePolicy *SoftDeletePolicy
   489  }
   490  
   491  // BucketPolicyOnly is an alias for UniformBucketLevelAccess.
   492  // Use of UniformBucketLevelAccess is preferred above BucketPolicyOnly.
   493  type BucketPolicyOnly struct {
   494  	// Enabled specifies whether access checks use only bucket-level IAM
   495  	// policies. Enabled may be disabled until the locked time.
   496  	Enabled bool
   497  	// LockedTime specifies the deadline for changing Enabled from true to
   498  	// false.
   499  	LockedTime time.Time
   500  }
   501  
   502  // UniformBucketLevelAccess configures access checks to use only bucket-level IAM
   503  // policies.
   504  type UniformBucketLevelAccess struct {
   505  	// Enabled specifies whether access checks use only bucket-level IAM
   506  	// policies. Enabled may be disabled until the locked time.
   507  	Enabled bool
   508  	// LockedTime specifies the deadline for changing Enabled from true to
   509  	// false.
   510  	LockedTime time.Time
   511  }
   512  
   513  // PublicAccessPrevention configures the Public Access Prevention feature, which
   514  // can be used to disallow public access to any data in a bucket. See
   515  // https://cloud.google.com/storage/docs/public-access-prevention for more
   516  // information.
   517  type PublicAccessPrevention int
   518  
   519  const (
   520  	// PublicAccessPreventionUnknown is a zero value, used only if this field is
   521  	// not set in a call to GCS.
   522  	PublicAccessPreventionUnknown PublicAccessPrevention = iota
   523  
   524  	// PublicAccessPreventionUnspecified corresponds to a value of "unspecified".
   525  	// Deprecated: use PublicAccessPreventionInherited
   526  	PublicAccessPreventionUnspecified
   527  
   528  	// PublicAccessPreventionEnforced corresponds to a value of "enforced". This
   529  	// enforces Public Access Prevention on the bucket.
   530  	PublicAccessPreventionEnforced
   531  
   532  	// PublicAccessPreventionInherited corresponds to a value of "inherited"
   533  	// and is the default for buckets.
   534  	PublicAccessPreventionInherited
   535  
   536  	publicAccessPreventionUnknown string = ""
   537  	// TODO: remove unspecified when change is fully completed
   538  	publicAccessPreventionUnspecified = "unspecified"
   539  	publicAccessPreventionEnforced    = "enforced"
   540  	publicAccessPreventionInherited   = "inherited"
   541  )
   542  
   543  func (p PublicAccessPrevention) String() string {
   544  	switch p {
   545  	case PublicAccessPreventionInherited, PublicAccessPreventionUnspecified:
   546  		return publicAccessPreventionInherited
   547  	case PublicAccessPreventionEnforced:
   548  		return publicAccessPreventionEnforced
   549  	default:
   550  		return publicAccessPreventionUnknown
   551  	}
   552  }
   553  
   554  // Lifecycle is the lifecycle configuration for objects in the bucket.
   555  type Lifecycle struct {
   556  	Rules []LifecycleRule
   557  }
   558  
   559  // RetentionPolicy enforces a minimum retention time for all objects
   560  // contained in the bucket.
   561  //
   562  // Any attempt to overwrite or delete objects younger than the retention
   563  // period will result in an error. An unlocked retention policy can be
   564  // modified or removed from the bucket via the Update method. A
   565  // locked retention policy cannot be removed or shortened in duration
   566  // for the lifetime of the bucket.
   567  //
   568  // This feature is in private alpha release. It is not currently available to
   569  // most customers. It might be changed in backwards-incompatible ways and is not
   570  // subject to any SLA or deprecation policy.
   571  type RetentionPolicy struct {
   572  	// RetentionPeriod specifies the duration that objects need to be
   573  	// retained. Retention duration must be greater than zero and less than
   574  	// 100 years. Note that enforcement of retention periods less than a day
   575  	// is not guaranteed. Such periods should only be used for testing
   576  	// purposes.
   577  	RetentionPeriod time.Duration
   578  
   579  	// EffectiveTime is the time from which the policy was enforced and
   580  	// effective. This field is read-only.
   581  	EffectiveTime time.Time
   582  
   583  	// IsLocked describes whether the bucket is locked. Once locked, an
   584  	// object retention policy cannot be modified.
   585  	// This field is read-only.
   586  	IsLocked bool
   587  }
   588  
   589  const (
   590  	// RFC3339 timestamp with only the date segment, used for CreatedBefore,
   591  	// CustomTimeBefore, and NoncurrentTimeBefore in LifecycleRule.
   592  	rfc3339Date = "2006-01-02"
   593  
   594  	// DeleteAction is a lifecycle action that deletes a live and/or archived
   595  	// objects. Takes precedence over SetStorageClass actions.
   596  	DeleteAction = "Delete"
   597  
   598  	// SetStorageClassAction changes the storage class of live and/or archived
   599  	// objects.
   600  	SetStorageClassAction = "SetStorageClass"
   601  
   602  	// AbortIncompleteMPUAction is a lifecycle action that aborts an incomplete
   603  	// multipart upload when the multipart upload meets the conditions specified
   604  	// in the lifecycle rule. The AgeInDays condition is the only allowed
   605  	// condition for this action. AgeInDays is measured from the time the
   606  	// multipart upload was created.
   607  	AbortIncompleteMPUAction = "AbortIncompleteMultipartUpload"
   608  )
   609  
   610  // LifecycleRule is a lifecycle configuration rule.
   611  //
   612  // When all the configured conditions are met by an object in the bucket, the
   613  // configured action will automatically be taken on that object.
   614  type LifecycleRule struct {
   615  	// Action is the action to take when all of the associated conditions are
   616  	// met.
   617  	Action LifecycleAction
   618  
   619  	// Condition is the set of conditions that must be met for the associated
   620  	// action to be taken.
   621  	Condition LifecycleCondition
   622  }
   623  
   624  // LifecycleAction is a lifecycle configuration action.
   625  type LifecycleAction struct {
   626  	// Type is the type of action to take on matching objects.
   627  	//
   628  	// Acceptable values are storage.DeleteAction, storage.SetStorageClassAction,
   629  	// and storage.AbortIncompleteMPUAction.
   630  	Type string
   631  
   632  	// StorageClass is the storage class to set on matching objects if the Action
   633  	// is "SetStorageClass".
   634  	StorageClass string
   635  }
   636  
   637  // Liveness specifies whether the object is live or not.
   638  type Liveness int
   639  
   640  const (
   641  	// LiveAndArchived includes both live and archived objects.
   642  	LiveAndArchived Liveness = iota
   643  	// Live specifies that the object is still live.
   644  	Live
   645  	// Archived specifies that the object is archived.
   646  	Archived
   647  )
   648  
   649  // LifecycleCondition is a set of conditions used to match objects and take an
   650  // action automatically.
   651  //
   652  // All configured conditions must be met for the associated action to be taken.
   653  type LifecycleCondition struct {
   654  	// AllObjects is used to select all objects in a bucket by
   655  	// setting AgeInDays to 0.
   656  	AllObjects bool
   657  
   658  	// AgeInDays is the age of the object in days.
   659  	// If you want to set AgeInDays to `0` use AllObjects set to `true`.
   660  	AgeInDays int64
   661  
   662  	// CreatedBefore is the time the object was created.
   663  	//
   664  	// This condition is satisfied when an object is created before midnight of
   665  	// the specified date in UTC.
   666  	CreatedBefore time.Time
   667  
   668  	// CustomTimeBefore is the CustomTime metadata field of the object. This
   669  	// condition is satisfied when an object's CustomTime timestamp is before
   670  	// midnight of the specified date in UTC.
   671  	//
   672  	// This condition can only be satisfied if CustomTime has been set.
   673  	CustomTimeBefore time.Time
   674  
   675  	// DaysSinceCustomTime is the days elapsed since the CustomTime date of the
   676  	// object. This condition can only be satisfied if CustomTime has been set.
   677  	// Note: Using `0` as the value will be ignored by the library and not sent to the API.
   678  	DaysSinceCustomTime int64
   679  
   680  	// DaysSinceNoncurrentTime is the days elapsed since the noncurrent timestamp
   681  	// of the object. This condition is relevant only for versioned objects.
   682  	// Note: Using `0` as the value will be ignored by the library and not sent to the API.
   683  	DaysSinceNoncurrentTime int64
   684  
   685  	// Liveness specifies the object's liveness. Relevant only for versioned objects
   686  	Liveness Liveness
   687  
   688  	// MatchesPrefix is the condition matching an object if any of the
   689  	// matches_prefix strings are an exact prefix of the object's name.
   690  	MatchesPrefix []string
   691  
   692  	// MatchesStorageClasses is the condition matching the object's storage
   693  	// class.
   694  	//
   695  	// Values include "STANDARD", "NEARLINE", "COLDLINE" and "ARCHIVE".
   696  	MatchesStorageClasses []string
   697  
   698  	// MatchesSuffix is the condition matching an object if any of the
   699  	// matches_suffix strings are an exact suffix of the object's name.
   700  	MatchesSuffix []string
   701  
   702  	// NoncurrentTimeBefore is the noncurrent timestamp of the object. This
   703  	// condition is satisfied when an object's noncurrent timestamp is before
   704  	// midnight of the specified date in UTC.
   705  	//
   706  	// This condition is relevant only for versioned objects.
   707  	NoncurrentTimeBefore time.Time
   708  
   709  	// NumNewerVersions is the condition matching objects with a number of newer versions.
   710  	//
   711  	// If the value is N, this condition is satisfied when there are at least N
   712  	// versions (including the live version) newer than this version of the
   713  	// object.
   714  	// Note: Using `0` as the value will be ignored by the library and not sent to the API.
   715  	NumNewerVersions int64
   716  }
   717  
   718  // BucketLogging holds the bucket's logging configuration, which defines the
   719  // destination bucket and optional name prefix for the current bucket's
   720  // logs.
   721  type BucketLogging struct {
   722  	// The destination bucket where the current bucket's logs
   723  	// should be placed.
   724  	LogBucket string
   725  
   726  	// A prefix for log object names.
   727  	LogObjectPrefix string
   728  }
   729  
   730  // BucketWebsite holds the bucket's website configuration, controlling how the
   731  // service behaves when accessing bucket contents as a web site. See
   732  // https://cloud.google.com/storage/docs/static-website for more information.
   733  type BucketWebsite struct {
   734  	// If the requested object path is missing, the service will ensure the path has
   735  	// a trailing '/', append this suffix, and attempt to retrieve the resulting
   736  	// object. This allows the creation of index.html objects to represent directory
   737  	// pages.
   738  	MainPageSuffix string
   739  
   740  	// If the requested object path is missing, and any mainPageSuffix object is
   741  	// missing, if applicable, the service will return the named object from this
   742  	// bucket as the content for a 404 Not Found result.
   743  	NotFoundPage string
   744  }
   745  
   746  // CustomPlacementConfig holds the bucket's custom placement
   747  // configuration for Custom Dual Regions. See
   748  // https://cloud.google.com/storage/docs/locations#location-dr for more information.
   749  type CustomPlacementConfig struct {
   750  	// The list of regional locations in which data is placed.
   751  	// Custom Dual Regions require exactly 2 regional locations.
   752  	DataLocations []string
   753  }
   754  
   755  // Autoclass holds the bucket's autoclass configuration. If enabled,
   756  // allows for the automatic selection of the best storage class
   757  // based on object access patterns. See
   758  // https://cloud.google.com/storage/docs/using-autoclass for more information.
   759  type Autoclass struct {
   760  	// Enabled specifies whether the autoclass feature is enabled
   761  	// on the bucket.
   762  	Enabled bool
   763  	// ToggleTime is the time from which Autoclass was last toggled.
   764  	// If Autoclass is enabled when the bucket is created, the ToggleTime
   765  	// is set to the bucket creation time. This field is read-only.
   766  	ToggleTime time.Time
   767  	// TerminalStorageClass: The storage class that objects in the bucket
   768  	// eventually transition to if they are not read for a certain length of
   769  	// time. Valid values are NEARLINE and ARCHIVE.
   770  	TerminalStorageClass string
   771  	// TerminalStorageClassUpdateTime represents the time of the most recent
   772  	// update to "TerminalStorageClass".
   773  	TerminalStorageClassUpdateTime time.Time
   774  }
   775  
   776  // SoftDeletePolicy contains the bucket's soft delete policy, which defines the
   777  // period of time that soft-deleted objects will be retained, and cannot be
   778  // permanently deleted.
   779  type SoftDeletePolicy struct {
   780  	// EffectiveTime indicates the time from which the policy, or one with a
   781  	// greater retention, was effective. This field is read-only.
   782  	EffectiveTime time.Time
   783  
   784  	// RetentionDuration is the amount of time that soft-deleted objects in the
   785  	// bucket will be retained and cannot be permanently deleted.
   786  	RetentionDuration time.Duration
   787  }
   788  
   789  func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
   790  	if b == nil {
   791  		return nil, nil
   792  	}
   793  	rp, err := toRetentionPolicy(b.RetentionPolicy)
   794  	if err != nil {
   795  		return nil, err
   796  	}
   797  
   798  	return &BucketAttrs{
   799  		Name:                     b.Name,
   800  		Location:                 b.Location,
   801  		MetaGeneration:           b.Metageneration,
   802  		DefaultEventBasedHold:    b.DefaultEventBasedHold,
   803  		StorageClass:             b.StorageClass,
   804  		Created:                  convertTime(b.TimeCreated),
   805  		VersioningEnabled:        b.Versioning != nil && b.Versioning.Enabled,
   806  		ACL:                      toBucketACLRules(b.Acl),
   807  		DefaultObjectACL:         toObjectACLRules(b.DefaultObjectAcl),
   808  		Labels:                   b.Labels,
   809  		RequesterPays:            b.Billing != nil && b.Billing.RequesterPays,
   810  		Lifecycle:                toLifecycle(b.Lifecycle),
   811  		RetentionPolicy:          rp,
   812  		ObjectRetentionMode:      toBucketObjectRetention(b.ObjectRetention),
   813  		CORS:                     toCORS(b.Cors),
   814  		Encryption:               toBucketEncryption(b.Encryption),
   815  		Logging:                  toBucketLogging(b.Logging),
   816  		Website:                  toBucketWebsite(b.Website),
   817  		BucketPolicyOnly:         toBucketPolicyOnly(b.IamConfiguration),
   818  		UniformBucketLevelAccess: toUniformBucketLevelAccess(b.IamConfiguration),
   819  		PublicAccessPrevention:   toPublicAccessPrevention(b.IamConfiguration),
   820  		Etag:                     b.Etag,
   821  		LocationType:             b.LocationType,
   822  		ProjectNumber:            b.ProjectNumber,
   823  		RPO:                      toRPO(b),
   824  		CustomPlacementConfig:    customPlacementFromRaw(b.CustomPlacementConfig),
   825  		Autoclass:                toAutoclassFromRaw(b.Autoclass),
   826  		SoftDeletePolicy:         toSoftDeletePolicyFromRaw(b.SoftDeletePolicy),
   827  	}, nil
   828  }
   829  
   830  func newBucketFromProto(b *storagepb.Bucket) *BucketAttrs {
   831  	if b == nil {
   832  		return nil
   833  	}
   834  	return &BucketAttrs{
   835  		Name:                     parseBucketName(b.GetName()),
   836  		Location:                 b.GetLocation(),
   837  		MetaGeneration:           b.GetMetageneration(),
   838  		DefaultEventBasedHold:    b.GetDefaultEventBasedHold(),
   839  		StorageClass:             b.GetStorageClass(),
   840  		Created:                  b.GetCreateTime().AsTime(),
   841  		VersioningEnabled:        b.GetVersioning().GetEnabled(),
   842  		ACL:                      toBucketACLRulesFromProto(b.GetAcl()),
   843  		DefaultObjectACL:         toObjectACLRulesFromProto(b.GetDefaultObjectAcl()),
   844  		Labels:                   b.GetLabels(),
   845  		RequesterPays:            b.GetBilling().GetRequesterPays(),
   846  		Lifecycle:                toLifecycleFromProto(b.GetLifecycle()),
   847  		RetentionPolicy:          toRetentionPolicyFromProto(b.GetRetentionPolicy()),
   848  		CORS:                     toCORSFromProto(b.GetCors()),
   849  		Encryption:               toBucketEncryptionFromProto(b.GetEncryption()),
   850  		Logging:                  toBucketLoggingFromProto(b.GetLogging()),
   851  		Website:                  toBucketWebsiteFromProto(b.GetWebsite()),
   852  		BucketPolicyOnly:         toBucketPolicyOnlyFromProto(b.GetIamConfig()),
   853  		UniformBucketLevelAccess: toUniformBucketLevelAccessFromProto(b.GetIamConfig()),
   854  		PublicAccessPrevention:   toPublicAccessPreventionFromProto(b.GetIamConfig()),
   855  		LocationType:             b.GetLocationType(),
   856  		RPO:                      toRPOFromProto(b),
   857  		CustomPlacementConfig:    customPlacementFromProto(b.GetCustomPlacementConfig()),
   858  		ProjectNumber:            parseProjectNumber(b.GetProject()), // this can return 0 the project resource name is ID based
   859  		Autoclass:                toAutoclassFromProto(b.GetAutoclass()),
   860  		SoftDeletePolicy:         toSoftDeletePolicyFromProto(b.SoftDeletePolicy),
   861  	}
   862  }
   863  
   864  // toRawBucket copies the editable attribute from b to the raw library's Bucket type.
   865  func (b *BucketAttrs) toRawBucket() *raw.Bucket {
   866  	// Copy label map.
   867  	var labels map[string]string
   868  	if len(b.Labels) > 0 {
   869  		labels = make(map[string]string, len(b.Labels))
   870  		for k, v := range b.Labels {
   871  			labels[k] = v
   872  		}
   873  	}
   874  	// Ignore VersioningEnabled if it is false. This is OK because
   875  	// we only call this method when creating a bucket, and by default
   876  	// new buckets have versioning off.
   877  	var v *raw.BucketVersioning
   878  	if b.VersioningEnabled {
   879  		v = &raw.BucketVersioning{Enabled: true}
   880  	}
   881  	var bb *raw.BucketBilling
   882  	if b.RequesterPays {
   883  		bb = &raw.BucketBilling{RequesterPays: true}
   884  	}
   885  	var bktIAM *raw.BucketIamConfiguration
   886  	if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
   887  		bktIAM = &raw.BucketIamConfiguration{}
   888  		if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled {
   889  			bktIAM.UniformBucketLevelAccess = &raw.BucketIamConfigurationUniformBucketLevelAccess{
   890  				Enabled: true,
   891  			}
   892  		}
   893  		if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
   894  			bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
   895  		}
   896  	}
   897  	return &raw.Bucket{
   898  		Name:                  b.Name,
   899  		Location:              b.Location,
   900  		StorageClass:          b.StorageClass,
   901  		Acl:                   toRawBucketACL(b.ACL),
   902  		DefaultObjectAcl:      toRawObjectACL(b.DefaultObjectACL),
   903  		Versioning:            v,
   904  		Labels:                labels,
   905  		Billing:               bb,
   906  		Lifecycle:             toRawLifecycle(b.Lifecycle),
   907  		RetentionPolicy:       b.RetentionPolicy.toRawRetentionPolicy(),
   908  		Cors:                  toRawCORS(b.CORS),
   909  		Encryption:            b.Encryption.toRawBucketEncryption(),
   910  		Logging:               b.Logging.toRawBucketLogging(),
   911  		Website:               b.Website.toRawBucketWebsite(),
   912  		IamConfiguration:      bktIAM,
   913  		Rpo:                   b.RPO.String(),
   914  		CustomPlacementConfig: b.CustomPlacementConfig.toRawCustomPlacement(),
   915  		Autoclass:             b.Autoclass.toRawAutoclass(),
   916  		SoftDeletePolicy:      b.SoftDeletePolicy.toRawSoftDeletePolicy(),
   917  	}
   918  }
   919  
   920  func (b *BucketAttrs) toProtoBucket() *storagepb.Bucket {
   921  	if b == nil {
   922  		return &storagepb.Bucket{}
   923  	}
   924  
   925  	// Copy label map.
   926  	var labels map[string]string
   927  	if len(b.Labels) > 0 {
   928  		labels = make(map[string]string, len(b.Labels))
   929  		for k, v := range b.Labels {
   930  			labels[k] = v
   931  		}
   932  	}
   933  
   934  	// Ignore VersioningEnabled if it is false. This is OK because
   935  	// we only call this method when creating a bucket, and by default
   936  	// new buckets have versioning off.
   937  	var v *storagepb.Bucket_Versioning
   938  	if b.VersioningEnabled {
   939  		v = &storagepb.Bucket_Versioning{Enabled: true}
   940  	}
   941  	var bb *storagepb.Bucket_Billing
   942  	if b.RequesterPays {
   943  		bb = &storagepb.Bucket_Billing{RequesterPays: true}
   944  	}
   945  	var bktIAM *storagepb.Bucket_IamConfig
   946  	if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled || b.PublicAccessPrevention != PublicAccessPreventionUnknown {
   947  		bktIAM = &storagepb.Bucket_IamConfig{}
   948  		if b.UniformBucketLevelAccess.Enabled || b.BucketPolicyOnly.Enabled {
   949  			bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
   950  				Enabled: true,
   951  			}
   952  		}
   953  		if b.PublicAccessPrevention != PublicAccessPreventionUnknown {
   954  			bktIAM.PublicAccessPrevention = b.PublicAccessPrevention.String()
   955  		}
   956  	}
   957  
   958  	return &storagepb.Bucket{
   959  		Name:                  b.Name,
   960  		Location:              b.Location,
   961  		StorageClass:          b.StorageClass,
   962  		Acl:                   toProtoBucketACL(b.ACL),
   963  		DefaultObjectAcl:      toProtoObjectACL(b.DefaultObjectACL),
   964  		Versioning:            v,
   965  		Labels:                labels,
   966  		Billing:               bb,
   967  		Lifecycle:             toProtoLifecycle(b.Lifecycle),
   968  		RetentionPolicy:       b.RetentionPolicy.toProtoRetentionPolicy(),
   969  		Cors:                  toProtoCORS(b.CORS),
   970  		Encryption:            b.Encryption.toProtoBucketEncryption(),
   971  		Logging:               b.Logging.toProtoBucketLogging(),
   972  		Website:               b.Website.toProtoBucketWebsite(),
   973  		IamConfig:             bktIAM,
   974  		Rpo:                   b.RPO.String(),
   975  		CustomPlacementConfig: b.CustomPlacementConfig.toProtoCustomPlacement(),
   976  		Autoclass:             b.Autoclass.toProtoAutoclass(),
   977  		SoftDeletePolicy:      b.SoftDeletePolicy.toProtoSoftDeletePolicy(),
   978  	}
   979  }
   980  
   981  func (ua *BucketAttrsToUpdate) toProtoBucket() *storagepb.Bucket {
   982  	if ua == nil {
   983  		return &storagepb.Bucket{}
   984  	}
   985  
   986  	var v *storagepb.Bucket_Versioning
   987  	if ua.VersioningEnabled != nil {
   988  		v = &storagepb.Bucket_Versioning{Enabled: optional.ToBool(ua.VersioningEnabled)}
   989  	}
   990  	var bb *storagepb.Bucket_Billing
   991  	if ua.RequesterPays != nil {
   992  		bb = &storagepb.Bucket_Billing{RequesterPays: optional.ToBool(ua.RequesterPays)}
   993  	}
   994  
   995  	var bktIAM *storagepb.Bucket_IamConfig
   996  	if ua.UniformBucketLevelAccess != nil || ua.BucketPolicyOnly != nil || ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
   997  		bktIAM = &storagepb.Bucket_IamConfig{}
   998  
   999  		if ua.BucketPolicyOnly != nil {
  1000  			bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
  1001  				Enabled: optional.ToBool(ua.BucketPolicyOnly.Enabled),
  1002  			}
  1003  		}
  1004  
  1005  		if ua.UniformBucketLevelAccess != nil {
  1006  			// UniformBucketLevelAccess takes precedence over BucketPolicyOnly,
  1007  			// so Enabled will be overriden here if both are set
  1008  			bktIAM.UniformBucketLevelAccess = &storagepb.Bucket_IamConfig_UniformBucketLevelAccess{
  1009  				Enabled: optional.ToBool(ua.UniformBucketLevelAccess.Enabled),
  1010  			}
  1011  		}
  1012  
  1013  		if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
  1014  			bktIAM.PublicAccessPrevention = ua.PublicAccessPrevention.String()
  1015  		}
  1016  	}
  1017  
  1018  	var defaultHold bool
  1019  	if ua.DefaultEventBasedHold != nil {
  1020  		defaultHold = optional.ToBool(ua.DefaultEventBasedHold)
  1021  	}
  1022  	var lifecycle Lifecycle
  1023  	if ua.Lifecycle != nil {
  1024  		lifecycle = *ua.Lifecycle
  1025  	}
  1026  	var bktACL []*storagepb.BucketAccessControl
  1027  	if ua.acl != nil {
  1028  		bktACL = toProtoBucketACL(ua.acl)
  1029  	}
  1030  	if ua.PredefinedACL != "" {
  1031  		// Clear ACL or the call will fail.
  1032  		bktACL = nil
  1033  	}
  1034  	var bktDefaultObjectACL []*storagepb.ObjectAccessControl
  1035  	if ua.defaultObjectACL != nil {
  1036  		bktDefaultObjectACL = toProtoObjectACL(ua.defaultObjectACL)
  1037  	}
  1038  	if ua.PredefinedDefaultObjectACL != "" {
  1039  		// Clear ACLs or the call will fail.
  1040  		bktDefaultObjectACL = nil
  1041  	}
  1042  
  1043  	return &storagepb.Bucket{
  1044  		StorageClass:          ua.StorageClass,
  1045  		Acl:                   bktACL,
  1046  		DefaultObjectAcl:      bktDefaultObjectACL,
  1047  		DefaultEventBasedHold: defaultHold,
  1048  		Versioning:            v,
  1049  		Billing:               bb,
  1050  		Lifecycle:             toProtoLifecycle(lifecycle),
  1051  		RetentionPolicy:       ua.RetentionPolicy.toProtoRetentionPolicy(),
  1052  		Cors:                  toProtoCORS(ua.CORS),
  1053  		Encryption:            ua.Encryption.toProtoBucketEncryption(),
  1054  		Logging:               ua.Logging.toProtoBucketLogging(),
  1055  		Website:               ua.Website.toProtoBucketWebsite(),
  1056  		IamConfig:             bktIAM,
  1057  		Rpo:                   ua.RPO.String(),
  1058  		Autoclass:             ua.Autoclass.toProtoAutoclass(),
  1059  		SoftDeletePolicy:      ua.SoftDeletePolicy.toProtoSoftDeletePolicy(),
  1060  		Labels:                ua.setLabels,
  1061  	}
  1062  }
  1063  
  1064  // CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
  1065  type CORS struct {
  1066  	// MaxAge is the value to return in the Access-Control-Max-Age
  1067  	// header used in preflight responses.
  1068  	MaxAge time.Duration
  1069  
  1070  	// Methods is the list of HTTP methods on which to include CORS response
  1071  	// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
  1072  	// of methods, and means "any method".
  1073  	Methods []string
  1074  
  1075  	// Origins is the list of Origins eligible to receive CORS response
  1076  	// headers. Note: "*" is permitted in the list of origins, and means
  1077  	// "any Origin".
  1078  	Origins []string
  1079  
  1080  	// ResponseHeaders is the list of HTTP headers other than the simple
  1081  	// response headers to give permission for the user-agent to share
  1082  	// across domains.
  1083  	ResponseHeaders []string
  1084  }
  1085  
  1086  // BucketEncryption is a bucket's encryption configuration.
  1087  type BucketEncryption struct {
  1088  	// A Cloud KMS key name, in the form
  1089  	// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
  1090  	// objects inserted into this bucket, if no encryption method is specified.
  1091  	// The key's location must be the same as the bucket's.
  1092  	DefaultKMSKeyName string
  1093  }
  1094  
  1095  // BucketAttrsToUpdate define the attributes to update during an Update call.
  1096  type BucketAttrsToUpdate struct {
  1097  	// If set, updates whether the bucket uses versioning.
  1098  	VersioningEnabled optional.Bool
  1099  
  1100  	// If set, updates whether the bucket is a Requester Pays bucket.
  1101  	RequesterPays optional.Bool
  1102  
  1103  	// DefaultEventBasedHold is the default value for event-based hold on
  1104  	// newly created objects in this bucket.
  1105  	DefaultEventBasedHold optional.Bool
  1106  
  1107  	// BucketPolicyOnly is an alias for UniformBucketLevelAccess. Use of
  1108  	// UniformBucketLevelAccess is recommended above the use of this field.
  1109  	// Setting BucketPolicyOnly.Enabled OR UniformBucketLevelAccess.Enabled to
  1110  	// true, will enable UniformBucketLevelAccess. If both BucketPolicyOnly and
  1111  	// UniformBucketLevelAccess are set, the value of UniformBucketLevelAccess
  1112  	// will take precedence.
  1113  	BucketPolicyOnly *BucketPolicyOnly
  1114  
  1115  	// UniformBucketLevelAccess configures access checks to use only bucket-level IAM
  1116  	// policies and ignore any ACL rules for the bucket.
  1117  	// See https://cloud.google.com/storage/docs/uniform-bucket-level-access
  1118  	// for more information.
  1119  	UniformBucketLevelAccess *UniformBucketLevelAccess
  1120  
  1121  	// PublicAccessPrevention is the setting for the bucket's
  1122  	// PublicAccessPrevention policy, which can be used to prevent public access
  1123  	// of data in the bucket. See
  1124  	// https://cloud.google.com/storage/docs/public-access-prevention for more
  1125  	// information.
  1126  	PublicAccessPrevention PublicAccessPrevention
  1127  
  1128  	// StorageClass is the default storage class of the bucket. This defines
  1129  	// how objects in the bucket are stored and determines the SLA
  1130  	// and the cost of storage. Typical values are "STANDARD", "NEARLINE",
  1131  	// "COLDLINE" and "ARCHIVE". Defaults to "STANDARD".
  1132  	// See https://cloud.google.com/storage/docs/storage-classes for all
  1133  	// valid values.
  1134  	StorageClass string
  1135  
  1136  	// If set, updates the retention policy of the bucket. Using
  1137  	// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
  1138  	//
  1139  	// This feature is in private alpha release. It is not currently available to
  1140  	// most customers. It might be changed in backwards-incompatible ways and is not
  1141  	// subject to any SLA or deprecation policy.
  1142  	RetentionPolicy *RetentionPolicy
  1143  
  1144  	// If set, replaces the CORS configuration with a new configuration.
  1145  	// An empty (rather than nil) slice causes all CORS policies to be removed.
  1146  	CORS []CORS
  1147  
  1148  	// If set, replaces the encryption configuration of the bucket. Using
  1149  	// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
  1150  	// configuration.
  1151  	Encryption *BucketEncryption
  1152  
  1153  	// If set, replaces the lifecycle configuration of the bucket.
  1154  	Lifecycle *Lifecycle
  1155  
  1156  	// If set, replaces the logging configuration of the bucket.
  1157  	Logging *BucketLogging
  1158  
  1159  	// If set, replaces the website configuration of the bucket.
  1160  	Website *BucketWebsite
  1161  
  1162  	// If not empty, applies a predefined set of access controls.
  1163  	// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
  1164  	PredefinedACL string
  1165  
  1166  	// If not empty, applies a predefined set of default object access controls.
  1167  	// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
  1168  	PredefinedDefaultObjectACL string
  1169  
  1170  	// RPO configures the Recovery Point Objective (RPO) policy of the bucket.
  1171  	// Set to RPOAsyncTurbo to turn on Turbo Replication for a bucket.
  1172  	// See https://cloud.google.com/storage/docs/managing-turbo-replication for
  1173  	// more information.
  1174  	RPO RPO
  1175  
  1176  	// If set, updates the autoclass configuration of the bucket.
  1177  	// See https://cloud.google.com/storage/docs/using-autoclass for more information.
  1178  	Autoclass *Autoclass
  1179  
  1180  	// If set, updates the soft delete policy of the bucket.
  1181  	SoftDeletePolicy *SoftDeletePolicy
  1182  
  1183  	// acl is the list of access control rules on the bucket.
  1184  	// It is unexported and only used internally by the gRPC client.
  1185  	// Library users should use ACLHandle methods directly.
  1186  	acl []ACLRule
  1187  
  1188  	// defaultObjectACL is the list of access controls to
  1189  	// apply to new objects when no object ACL is provided.
  1190  	// It is unexported and only used internally by the gRPC client.
  1191  	// Library users should use ACLHandle methods directly.
  1192  	defaultObjectACL []ACLRule
  1193  
  1194  	setLabels    map[string]string
  1195  	deleteLabels map[string]bool
  1196  }
  1197  
  1198  // SetLabel causes a label to be added or modified when ua is used
  1199  // in a call to Bucket.Update.
  1200  func (ua *BucketAttrsToUpdate) SetLabel(name, value string) {
  1201  	if ua.setLabels == nil {
  1202  		ua.setLabels = map[string]string{}
  1203  	}
  1204  	ua.setLabels[name] = value
  1205  }
  1206  
  1207  // DeleteLabel causes a label to be deleted when ua is used in a
  1208  // call to Bucket.Update.
  1209  func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
  1210  	if ua.deleteLabels == nil {
  1211  		ua.deleteLabels = map[string]bool{}
  1212  	}
  1213  	ua.deleteLabels[name] = true
  1214  }
  1215  
  1216  func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
  1217  	rb := &raw.Bucket{}
  1218  	if ua.CORS != nil {
  1219  		rb.Cors = toRawCORS(ua.CORS)
  1220  		rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
  1221  	}
  1222  	if ua.DefaultEventBasedHold != nil {
  1223  		rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold)
  1224  		rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold")
  1225  	}
  1226  	if ua.RetentionPolicy != nil {
  1227  		if ua.RetentionPolicy.RetentionPeriod == 0 {
  1228  			rb.NullFields = append(rb.NullFields, "RetentionPolicy")
  1229  			rb.RetentionPolicy = nil
  1230  		} else {
  1231  			rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy()
  1232  		}
  1233  	}
  1234  	if ua.VersioningEnabled != nil {
  1235  		rb.Versioning = &raw.BucketVersioning{
  1236  			Enabled:         optional.ToBool(ua.VersioningEnabled),
  1237  			ForceSendFields: []string{"Enabled"},
  1238  		}
  1239  	}
  1240  	if ua.RequesterPays != nil {
  1241  		rb.Billing = &raw.BucketBilling{
  1242  			RequesterPays:   optional.ToBool(ua.RequesterPays),
  1243  			ForceSendFields: []string{"RequesterPays"},
  1244  		}
  1245  	}
  1246  	if ua.BucketPolicyOnly != nil {
  1247  		rb.IamConfiguration = &raw.BucketIamConfiguration{
  1248  			UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{
  1249  				Enabled:         ua.BucketPolicyOnly.Enabled,
  1250  				ForceSendFields: []string{"Enabled"},
  1251  			},
  1252  		}
  1253  	}
  1254  	if ua.UniformBucketLevelAccess != nil {
  1255  		rb.IamConfiguration = &raw.BucketIamConfiguration{
  1256  			UniformBucketLevelAccess: &raw.BucketIamConfigurationUniformBucketLevelAccess{
  1257  				Enabled:         ua.UniformBucketLevelAccess.Enabled,
  1258  				ForceSendFields: []string{"Enabled"},
  1259  			},
  1260  		}
  1261  	}
  1262  	if ua.PublicAccessPrevention != PublicAccessPreventionUnknown {
  1263  		if rb.IamConfiguration == nil {
  1264  			rb.IamConfiguration = &raw.BucketIamConfiguration{}
  1265  		}
  1266  		rb.IamConfiguration.PublicAccessPrevention = ua.PublicAccessPrevention.String()
  1267  	}
  1268  	if ua.Encryption != nil {
  1269  		if ua.Encryption.DefaultKMSKeyName == "" {
  1270  			rb.NullFields = append(rb.NullFields, "Encryption")
  1271  			rb.Encryption = nil
  1272  		} else {
  1273  			rb.Encryption = ua.Encryption.toRawBucketEncryption()
  1274  		}
  1275  	}
  1276  	if ua.Lifecycle != nil {
  1277  		rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
  1278  		rb.ForceSendFields = append(rb.ForceSendFields, "Lifecycle")
  1279  	}
  1280  	if ua.Logging != nil {
  1281  		if *ua.Logging == (BucketLogging{}) {
  1282  			rb.NullFields = append(rb.NullFields, "Logging")
  1283  			rb.Logging = nil
  1284  		} else {
  1285  			rb.Logging = ua.Logging.toRawBucketLogging()
  1286  		}
  1287  	}
  1288  	if ua.Website != nil {
  1289  		if *ua.Website == (BucketWebsite{}) {
  1290  			rb.NullFields = append(rb.NullFields, "Website")
  1291  			rb.Website = nil
  1292  		} else {
  1293  			rb.Website = ua.Website.toRawBucketWebsite()
  1294  		}
  1295  	}
  1296  	if ua.Autoclass != nil {
  1297  		rb.Autoclass = &raw.BucketAutoclass{
  1298  			Enabled:              ua.Autoclass.Enabled,
  1299  			TerminalStorageClass: ua.Autoclass.TerminalStorageClass,
  1300  			ForceSendFields:      []string{"Enabled"},
  1301  		}
  1302  		rb.ForceSendFields = append(rb.ForceSendFields, "Autoclass")
  1303  	}
  1304  	if ua.SoftDeletePolicy != nil {
  1305  		if ua.SoftDeletePolicy.RetentionDuration == 0 {
  1306  			rb.NullFields = append(rb.NullFields, "SoftDeletePolicy")
  1307  			rb.SoftDeletePolicy = nil
  1308  		} else {
  1309  			rb.SoftDeletePolicy = ua.SoftDeletePolicy.toRawSoftDeletePolicy()
  1310  		}
  1311  	}
  1312  	if ua.PredefinedACL != "" {
  1313  		// Clear ACL or the call will fail.
  1314  		rb.Acl = nil
  1315  		rb.ForceSendFields = append(rb.ForceSendFields, "Acl")
  1316  	}
  1317  	if ua.PredefinedDefaultObjectACL != "" {
  1318  		// Clear ACLs or the call will fail.
  1319  		rb.DefaultObjectAcl = nil
  1320  		rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
  1321  	}
  1322  
  1323  	rb.StorageClass = ua.StorageClass
  1324  	rb.Rpo = ua.RPO.String()
  1325  
  1326  	if ua.setLabels != nil || ua.deleteLabels != nil {
  1327  		rb.Labels = map[string]string{}
  1328  		for k, v := range ua.setLabels {
  1329  			rb.Labels[k] = v
  1330  		}
  1331  		if len(rb.Labels) == 0 && len(ua.deleteLabels) > 0 {
  1332  			rb.ForceSendFields = append(rb.ForceSendFields, "Labels")
  1333  		}
  1334  		for l := range ua.deleteLabels {
  1335  			rb.NullFields = append(rb.NullFields, "Labels."+l)
  1336  		}
  1337  	}
  1338  	return rb
  1339  }
  1340  
  1341  // If returns a new BucketHandle that applies a set of preconditions.
  1342  // Preconditions already set on the BucketHandle are ignored. The supplied
  1343  // BucketConditions must have exactly one field set to a non-zero value;
  1344  // otherwise an error will be returned from any operation on the BucketHandle.
  1345  // Operations on the new handle will return an error if the preconditions are not
  1346  // satisfied. The only valid preconditions for buckets are MetagenerationMatch
  1347  // and MetagenerationNotMatch.
  1348  func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
  1349  	b2 := *b
  1350  	b2.conds = &conds
  1351  	return &b2
  1352  }
  1353  
  1354  // BucketConditions constrain bucket methods to act on specific metagenerations.
  1355  //
  1356  // The zero value is an empty set of constraints.
  1357  type BucketConditions struct {
  1358  	// MetagenerationMatch specifies that the bucket must have the given
  1359  	// metageneration for the operation to occur.
  1360  	// If MetagenerationMatch is zero, it has no effect.
  1361  	MetagenerationMatch int64
  1362  
  1363  	// MetagenerationNotMatch specifies that the bucket must not have the given
  1364  	// metageneration for the operation to occur.
  1365  	// If MetagenerationNotMatch is zero, it has no effect.
  1366  	MetagenerationNotMatch int64
  1367  }
  1368  
  1369  func (c *BucketConditions) validate(method string) error {
  1370  	if *c == (BucketConditions{}) {
  1371  		return fmt.Errorf("storage: %s: empty conditions", method)
  1372  	}
  1373  	if c.MetagenerationMatch != 0 && c.MetagenerationNotMatch != 0 {
  1374  		return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method)
  1375  	}
  1376  	return nil
  1377  }
  1378  
  1379  // UserProject returns a new BucketHandle that passes the project ID as the user
  1380  // project for all subsequent calls. Calls with a user project will be billed to that
  1381  // project rather than to the bucket's owning project.
  1382  //
  1383  // A user project is required for all operations on Requester Pays buckets.
  1384  func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
  1385  	b2 := *b
  1386  	b2.userProject = projectID
  1387  	b2.acl.userProject = projectID
  1388  	b2.defaultObjectACL.userProject = projectID
  1389  	return &b2
  1390  }
  1391  
  1392  // LockRetentionPolicy locks a bucket's retention policy until a previously-configured
  1393  // RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
  1394  // than a day, the retention policy is treated as a development configuration and locking
  1395  // will have no effect. The BucketHandle must have a metageneration condition that
  1396  // matches the bucket's metageneration. See BucketHandle.If.
  1397  //
  1398  // This feature is in private alpha release. It is not currently available to
  1399  // most customers. It might be changed in backwards-incompatible ways and is not
  1400  // subject to any SLA or deprecation policy.
  1401  func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
  1402  	o := makeStorageOpts(true, b.retry, b.userProject)
  1403  	return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...)
  1404  }
  1405  
  1406  // SetObjectRetention returns a new BucketHandle that will enable object retention
  1407  // on bucket creation. To enable object retention, you must use the returned
  1408  // handle to create the bucket. This has no effect on an already existing bucket.
  1409  // ObjectRetention is not enabled by default.
  1410  // ObjectRetention cannot be configured through the gRPC API.
  1411  func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle {
  1412  	b2 := *b
  1413  	b2.enableObjectRetention = &enable
  1414  	return &b2
  1415  }
  1416  
  1417  // applyBucketConds modifies the provided call using the conditions in conds.
  1418  // call is something that quacks like a *raw.WhateverCall.
  1419  func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
  1420  	if conds == nil {
  1421  		return nil
  1422  	}
  1423  	if err := conds.validate(method); err != nil {
  1424  		return err
  1425  	}
  1426  	cval := reflect.ValueOf(call)
  1427  	switch {
  1428  	case conds.MetagenerationMatch != 0:
  1429  		if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) {
  1430  			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
  1431  		}
  1432  	case conds.MetagenerationNotMatch != 0:
  1433  		if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) {
  1434  			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
  1435  		}
  1436  	}
  1437  	return nil
  1438  }
  1439  
  1440  // applyBucketConds modifies the provided request message using the conditions
  1441  // in conds. msg is a protobuf Message that has fields if_metageneration_match
  1442  // and if_metageneration_not_match.
  1443  func applyBucketCondsProto(method string, conds *BucketConditions, msg proto.Message) error {
  1444  	rmsg := msg.ProtoReflect()
  1445  
  1446  	if conds == nil {
  1447  		return nil
  1448  	}
  1449  	if err := conds.validate(method); err != nil {
  1450  		return err
  1451  	}
  1452  
  1453  	switch {
  1454  	case conds.MetagenerationMatch != 0:
  1455  		if !setConditionProtoField(rmsg, "if_metageneration_match", conds.MetagenerationMatch) {
  1456  			return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method)
  1457  		}
  1458  	case conds.MetagenerationNotMatch != 0:
  1459  		if !setConditionProtoField(rmsg, "if_metageneration_not_match", conds.MetagenerationNotMatch) {
  1460  			return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method)
  1461  		}
  1462  	}
  1463  	return nil
  1464  }
  1465  
  1466  func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
  1467  	if rp == nil {
  1468  		return nil
  1469  	}
  1470  	return &raw.BucketRetentionPolicy{
  1471  		RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
  1472  	}
  1473  }
  1474  
  1475  func (rp *RetentionPolicy) toProtoRetentionPolicy() *storagepb.Bucket_RetentionPolicy {
  1476  	if rp == nil {
  1477  		return nil
  1478  	}
  1479  	// RetentionPeriod must be greater than 0, so if it is 0, the user left it
  1480  	// unset, and so we should not send it in the request i.e. nil is sent.
  1481  	var dur *durationpb.Duration
  1482  	if rp.RetentionPeriod != 0 {
  1483  		dur = durationpb.New(rp.RetentionPeriod)
  1484  	}
  1485  	return &storagepb.Bucket_RetentionPolicy{
  1486  		RetentionDuration: dur,
  1487  	}
  1488  }
  1489  
  1490  func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
  1491  	if rp == nil || rp.EffectiveTime == "" {
  1492  		return nil, nil
  1493  	}
  1494  	t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
  1495  	if err != nil {
  1496  		return nil, err
  1497  	}
  1498  	return &RetentionPolicy{
  1499  		RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
  1500  		EffectiveTime:   t,
  1501  		IsLocked:        rp.IsLocked,
  1502  	}, nil
  1503  }
  1504  
  1505  func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *RetentionPolicy {
  1506  	if rp == nil || rp.GetEffectiveTime().AsTime().Unix() == 0 {
  1507  		return nil
  1508  	}
  1509  	return &RetentionPolicy{
  1510  		RetentionPeriod: rp.GetRetentionDuration().AsDuration(),
  1511  		EffectiveTime:   rp.GetEffectiveTime().AsTime(),
  1512  		IsLocked:        rp.GetIsLocked(),
  1513  	}
  1514  }
  1515  
  1516  func toBucketObjectRetention(or *raw.BucketObjectRetention) string {
  1517  	if or == nil {
  1518  		return ""
  1519  	}
  1520  	return or.Mode
  1521  }
  1522  
  1523  func toRawCORS(c []CORS) []*raw.BucketCors {
  1524  	var out []*raw.BucketCors
  1525  	for _, v := range c {
  1526  		out = append(out, &raw.BucketCors{
  1527  			MaxAgeSeconds:  int64(v.MaxAge / time.Second),
  1528  			Method:         v.Methods,
  1529  			Origin:         v.Origins,
  1530  			ResponseHeader: v.ResponseHeaders,
  1531  		})
  1532  	}
  1533  	return out
  1534  }
  1535  
  1536  func toProtoCORS(c []CORS) []*storagepb.Bucket_Cors {
  1537  	var out []*storagepb.Bucket_Cors
  1538  	for _, v := range c {
  1539  		out = append(out, &storagepb.Bucket_Cors{
  1540  			MaxAgeSeconds:  int32(v.MaxAge / time.Second),
  1541  			Method:         v.Methods,
  1542  			Origin:         v.Origins,
  1543  			ResponseHeader: v.ResponseHeaders,
  1544  		})
  1545  	}
  1546  	return out
  1547  }
  1548  
  1549  func toCORS(rc []*raw.BucketCors) []CORS {
  1550  	var out []CORS
  1551  	for _, v := range rc {
  1552  		out = append(out, CORS{
  1553  			MaxAge:          time.Duration(v.MaxAgeSeconds) * time.Second,
  1554  			Methods:         v.Method,
  1555  			Origins:         v.Origin,
  1556  			ResponseHeaders: v.ResponseHeader,
  1557  		})
  1558  	}
  1559  	return out
  1560  }
  1561  
  1562  func toCORSFromProto(rc []*storagepb.Bucket_Cors) []CORS {
  1563  	var out []CORS
  1564  	for _, v := range rc {
  1565  		out = append(out, CORS{
  1566  			MaxAge:          time.Duration(v.GetMaxAgeSeconds()) * time.Second,
  1567  			Methods:         v.GetMethod(),
  1568  			Origins:         v.GetOrigin(),
  1569  			ResponseHeaders: v.GetResponseHeader(),
  1570  		})
  1571  	}
  1572  	return out
  1573  }
  1574  
  1575  func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
  1576  	var rl raw.BucketLifecycle
  1577  	if len(l.Rules) == 0 {
  1578  		rl.ForceSendFields = []string{"Rule"}
  1579  	}
  1580  	for _, r := range l.Rules {
  1581  		rr := &raw.BucketLifecycleRule{
  1582  			Action: &raw.BucketLifecycleRuleAction{
  1583  				Type:         r.Action.Type,
  1584  				StorageClass: r.Action.StorageClass,
  1585  			},
  1586  			Condition: &raw.BucketLifecycleRuleCondition{
  1587  				DaysSinceCustomTime:     r.Condition.DaysSinceCustomTime,
  1588  				DaysSinceNoncurrentTime: r.Condition.DaysSinceNoncurrentTime,
  1589  				MatchesPrefix:           r.Condition.MatchesPrefix,
  1590  				MatchesStorageClass:     r.Condition.MatchesStorageClasses,
  1591  				MatchesSuffix:           r.Condition.MatchesSuffix,
  1592  				NumNewerVersions:        r.Condition.NumNewerVersions,
  1593  			},
  1594  		}
  1595  
  1596  		// AllObjects takes precedent when both AllObjects and AgeInDays are set
  1597  		// Rationale: If you've opted into using AllObjects, it makes sense that you
  1598  		// understand the implications of how this option works with AgeInDays.
  1599  		if r.Condition.AllObjects {
  1600  			rr.Condition.Age = googleapi.Int64(0)
  1601  			rr.Condition.ForceSendFields = []string{"Age"}
  1602  		} else if r.Condition.AgeInDays > 0 {
  1603  			rr.Condition.Age = googleapi.Int64(r.Condition.AgeInDays)
  1604  		}
  1605  
  1606  		switch r.Condition.Liveness {
  1607  		case LiveAndArchived:
  1608  			rr.Condition.IsLive = nil
  1609  		case Live:
  1610  			rr.Condition.IsLive = googleapi.Bool(true)
  1611  		case Archived:
  1612  			rr.Condition.IsLive = googleapi.Bool(false)
  1613  		}
  1614  
  1615  		if !r.Condition.CreatedBefore.IsZero() {
  1616  			rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
  1617  		}
  1618  		if !r.Condition.CustomTimeBefore.IsZero() {
  1619  			rr.Condition.CustomTimeBefore = r.Condition.CustomTimeBefore.Format(rfc3339Date)
  1620  		}
  1621  		if !r.Condition.NoncurrentTimeBefore.IsZero() {
  1622  			rr.Condition.NoncurrentTimeBefore = r.Condition.NoncurrentTimeBefore.Format(rfc3339Date)
  1623  		}
  1624  		rl.Rule = append(rl.Rule, rr)
  1625  	}
  1626  	return &rl
  1627  }
  1628  
  1629  func toProtoLifecycle(l Lifecycle) *storagepb.Bucket_Lifecycle {
  1630  	var rl storagepb.Bucket_Lifecycle
  1631  
  1632  	for _, r := range l.Rules {
  1633  		rr := &storagepb.Bucket_Lifecycle_Rule{
  1634  			Action: &storagepb.Bucket_Lifecycle_Rule_Action{
  1635  				Type:         r.Action.Type,
  1636  				StorageClass: r.Action.StorageClass,
  1637  			},
  1638  			Condition: &storagepb.Bucket_Lifecycle_Rule_Condition{
  1639  				// Note: The Apiary types use int64 (even though the Discovery
  1640  				// doc states "format: int32"), so the client types used int64,
  1641  				// but the proto uses int32 so we have a potentially lossy
  1642  				// conversion.
  1643  				DaysSinceCustomTime:     proto.Int32(int32(r.Condition.DaysSinceCustomTime)),
  1644  				DaysSinceNoncurrentTime: proto.Int32(int32(r.Condition.DaysSinceNoncurrentTime)),
  1645  				MatchesPrefix:           r.Condition.MatchesPrefix,
  1646  				MatchesStorageClass:     r.Condition.MatchesStorageClasses,
  1647  				MatchesSuffix:           r.Condition.MatchesSuffix,
  1648  				NumNewerVersions:        proto.Int32(int32(r.Condition.NumNewerVersions)),
  1649  			},
  1650  		}
  1651  
  1652  		// Only set AgeDays in the proto if it is non-zero, or if the user has set
  1653  		// Condition.AllObjects.
  1654  		if r.Condition.AgeInDays != 0 {
  1655  			rr.Condition.AgeDays = proto.Int32(int32(r.Condition.AgeInDays))
  1656  		}
  1657  		if r.Condition.AllObjects {
  1658  			rr.Condition.AgeDays = proto.Int32(0)
  1659  		}
  1660  
  1661  		switch r.Condition.Liveness {
  1662  		case LiveAndArchived:
  1663  			rr.Condition.IsLive = nil
  1664  		case Live:
  1665  			rr.Condition.IsLive = proto.Bool(true)
  1666  		case Archived:
  1667  			rr.Condition.IsLive = proto.Bool(false)
  1668  		}
  1669  
  1670  		if !r.Condition.CreatedBefore.IsZero() {
  1671  			rr.Condition.CreatedBefore = timeToProtoDate(r.Condition.CreatedBefore)
  1672  		}
  1673  		if !r.Condition.CustomTimeBefore.IsZero() {
  1674  			rr.Condition.CustomTimeBefore = timeToProtoDate(r.Condition.CustomTimeBefore)
  1675  		}
  1676  		if !r.Condition.NoncurrentTimeBefore.IsZero() {
  1677  			rr.Condition.NoncurrentTimeBefore = timeToProtoDate(r.Condition.NoncurrentTimeBefore)
  1678  		}
  1679  		rl.Rule = append(rl.Rule, rr)
  1680  	}
  1681  	return &rl
  1682  }
  1683  
  1684  func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
  1685  	var l Lifecycle
  1686  	if rl == nil {
  1687  		return l
  1688  	}
  1689  	for _, rr := range rl.Rule {
  1690  		r := LifecycleRule{
  1691  			Action: LifecycleAction{
  1692  				Type:         rr.Action.Type,
  1693  				StorageClass: rr.Action.StorageClass,
  1694  			},
  1695  			Condition: LifecycleCondition{
  1696  				DaysSinceCustomTime:     rr.Condition.DaysSinceCustomTime,
  1697  				DaysSinceNoncurrentTime: rr.Condition.DaysSinceNoncurrentTime,
  1698  				MatchesPrefix:           rr.Condition.MatchesPrefix,
  1699  				MatchesStorageClasses:   rr.Condition.MatchesStorageClass,
  1700  				MatchesSuffix:           rr.Condition.MatchesSuffix,
  1701  				NumNewerVersions:        rr.Condition.NumNewerVersions,
  1702  			},
  1703  		}
  1704  		if rr.Condition.Age != nil {
  1705  			r.Condition.AgeInDays = *rr.Condition.Age
  1706  			if *rr.Condition.Age == 0 {
  1707  				r.Condition.AllObjects = true
  1708  			}
  1709  		}
  1710  
  1711  		if rr.Condition.IsLive == nil {
  1712  			r.Condition.Liveness = LiveAndArchived
  1713  		} else if *rr.Condition.IsLive {
  1714  			r.Condition.Liveness = Live
  1715  		} else {
  1716  			r.Condition.Liveness = Archived
  1717  		}
  1718  
  1719  		if rr.Condition.CreatedBefore != "" {
  1720  			r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
  1721  		}
  1722  		if rr.Condition.CustomTimeBefore != "" {
  1723  			r.Condition.CustomTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.CustomTimeBefore)
  1724  		}
  1725  		if rr.Condition.NoncurrentTimeBefore != "" {
  1726  			r.Condition.NoncurrentTimeBefore, _ = time.Parse(rfc3339Date, rr.Condition.NoncurrentTimeBefore)
  1727  		}
  1728  		l.Rules = append(l.Rules, r)
  1729  	}
  1730  	return l
  1731  }
  1732  
  1733  func toLifecycleFromProto(rl *storagepb.Bucket_Lifecycle) Lifecycle {
  1734  	var l Lifecycle
  1735  	if rl == nil {
  1736  		return l
  1737  	}
  1738  	for _, rr := range rl.GetRule() {
  1739  		r := LifecycleRule{
  1740  			Action: LifecycleAction{
  1741  				Type:         rr.GetAction().GetType(),
  1742  				StorageClass: rr.GetAction().GetStorageClass(),
  1743  			},
  1744  			Condition: LifecycleCondition{
  1745  				AgeInDays:               int64(rr.GetCondition().GetAgeDays()),
  1746  				DaysSinceCustomTime:     int64(rr.GetCondition().GetDaysSinceCustomTime()),
  1747  				DaysSinceNoncurrentTime: int64(rr.GetCondition().GetDaysSinceNoncurrentTime()),
  1748  				MatchesPrefix:           rr.GetCondition().GetMatchesPrefix(),
  1749  				MatchesStorageClasses:   rr.GetCondition().GetMatchesStorageClass(),
  1750  				MatchesSuffix:           rr.GetCondition().GetMatchesSuffix(),
  1751  				NumNewerVersions:        int64(rr.GetCondition().GetNumNewerVersions()),
  1752  			},
  1753  		}
  1754  
  1755  		// Only set Condition.AllObjects if AgeDays is zero, not if it is nil.
  1756  		if rr.GetCondition().AgeDays != nil && rr.GetCondition().GetAgeDays() == 0 {
  1757  			r.Condition.AllObjects = true
  1758  		}
  1759  
  1760  		if rr.GetCondition().IsLive == nil {
  1761  			r.Condition.Liveness = LiveAndArchived
  1762  		} else if rr.GetCondition().GetIsLive() {
  1763  			r.Condition.Liveness = Live
  1764  		} else {
  1765  			r.Condition.Liveness = Archived
  1766  		}
  1767  
  1768  		if rr.GetCondition().GetCreatedBefore() != nil {
  1769  			r.Condition.CreatedBefore = protoDateToUTCTime(rr.GetCondition().GetCreatedBefore())
  1770  		}
  1771  		if rr.GetCondition().GetCustomTimeBefore() != nil {
  1772  			r.Condition.CustomTimeBefore = protoDateToUTCTime(rr.GetCondition().GetCustomTimeBefore())
  1773  		}
  1774  		if rr.GetCondition().GetNoncurrentTimeBefore() != nil {
  1775  			r.Condition.NoncurrentTimeBefore = protoDateToUTCTime(rr.GetCondition().GetNoncurrentTimeBefore())
  1776  		}
  1777  		l.Rules = append(l.Rules, r)
  1778  	}
  1779  	return l
  1780  }
  1781  
  1782  func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
  1783  	if e == nil {
  1784  		return nil
  1785  	}
  1786  	return &raw.BucketEncryption{
  1787  		DefaultKmsKeyName: e.DefaultKMSKeyName,
  1788  	}
  1789  }
  1790  
  1791  func (e *BucketEncryption) toProtoBucketEncryption() *storagepb.Bucket_Encryption {
  1792  	if e == nil {
  1793  		return nil
  1794  	}
  1795  	return &storagepb.Bucket_Encryption{
  1796  		DefaultKmsKey: e.DefaultKMSKeyName,
  1797  	}
  1798  }
  1799  
  1800  func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
  1801  	if e == nil {
  1802  		return nil
  1803  	}
  1804  	return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
  1805  }
  1806  
  1807  func toBucketEncryptionFromProto(e *storagepb.Bucket_Encryption) *BucketEncryption {
  1808  	if e == nil {
  1809  		return nil
  1810  	}
  1811  	return &BucketEncryption{DefaultKMSKeyName: e.GetDefaultKmsKey()}
  1812  }
  1813  
  1814  func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
  1815  	if b == nil {
  1816  		return nil
  1817  	}
  1818  	return &raw.BucketLogging{
  1819  		LogBucket:       b.LogBucket,
  1820  		LogObjectPrefix: b.LogObjectPrefix,
  1821  	}
  1822  }
  1823  
  1824  func (b *BucketLogging) toProtoBucketLogging() *storagepb.Bucket_Logging {
  1825  	if b == nil {
  1826  		return nil
  1827  	}
  1828  	return &storagepb.Bucket_Logging{
  1829  		LogBucket:       bucketResourceName(globalProjectAlias, b.LogBucket),
  1830  		LogObjectPrefix: b.LogObjectPrefix,
  1831  	}
  1832  }
  1833  
  1834  func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
  1835  	if b == nil {
  1836  		return nil
  1837  	}
  1838  	return &BucketLogging{
  1839  		LogBucket:       b.LogBucket,
  1840  		LogObjectPrefix: b.LogObjectPrefix,
  1841  	}
  1842  }
  1843  
  1844  func toBucketLoggingFromProto(b *storagepb.Bucket_Logging) *BucketLogging {
  1845  	if b == nil {
  1846  		return nil
  1847  	}
  1848  	lb := parseBucketName(b.GetLogBucket())
  1849  	return &BucketLogging{
  1850  		LogBucket:       lb,
  1851  		LogObjectPrefix: b.GetLogObjectPrefix(),
  1852  	}
  1853  }
  1854  
  1855  func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
  1856  	if w == nil {
  1857  		return nil
  1858  	}
  1859  	return &raw.BucketWebsite{
  1860  		MainPageSuffix: w.MainPageSuffix,
  1861  		NotFoundPage:   w.NotFoundPage,
  1862  	}
  1863  }
  1864  
  1865  func (w *BucketWebsite) toProtoBucketWebsite() *storagepb.Bucket_Website {
  1866  	if w == nil {
  1867  		return nil
  1868  	}
  1869  	return &storagepb.Bucket_Website{
  1870  		MainPageSuffix: w.MainPageSuffix,
  1871  		NotFoundPage:   w.NotFoundPage,
  1872  	}
  1873  }
  1874  
  1875  func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
  1876  	if w == nil {
  1877  		return nil
  1878  	}
  1879  	return &BucketWebsite{
  1880  		MainPageSuffix: w.MainPageSuffix,
  1881  		NotFoundPage:   w.NotFoundPage,
  1882  	}
  1883  }
  1884  
  1885  func toBucketWebsiteFromProto(w *storagepb.Bucket_Website) *BucketWebsite {
  1886  	if w == nil {
  1887  		return nil
  1888  	}
  1889  	return &BucketWebsite{
  1890  		MainPageSuffix: w.GetMainPageSuffix(),
  1891  		NotFoundPage:   w.GetNotFoundPage(),
  1892  	}
  1893  }
  1894  
  1895  func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
  1896  	if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled {
  1897  		return BucketPolicyOnly{}
  1898  	}
  1899  	lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime)
  1900  	if err != nil {
  1901  		return BucketPolicyOnly{
  1902  			Enabled: true,
  1903  		}
  1904  	}
  1905  	return BucketPolicyOnly{
  1906  		Enabled:    true,
  1907  		LockedTime: lt,
  1908  	}
  1909  }
  1910  
  1911  func toBucketPolicyOnlyFromProto(b *storagepb.Bucket_IamConfig) BucketPolicyOnly {
  1912  	if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
  1913  		return BucketPolicyOnly{}
  1914  	}
  1915  	return BucketPolicyOnly{
  1916  		Enabled:    true,
  1917  		LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
  1918  	}
  1919  }
  1920  
  1921  func toUniformBucketLevelAccess(b *raw.BucketIamConfiguration) UniformBucketLevelAccess {
  1922  	if b == nil || b.UniformBucketLevelAccess == nil || !b.UniformBucketLevelAccess.Enabled {
  1923  		return UniformBucketLevelAccess{}
  1924  	}
  1925  	lt, err := time.Parse(time.RFC3339, b.UniformBucketLevelAccess.LockedTime)
  1926  	if err != nil {
  1927  		return UniformBucketLevelAccess{
  1928  			Enabled: true,
  1929  		}
  1930  	}
  1931  	return UniformBucketLevelAccess{
  1932  		Enabled:    true,
  1933  		LockedTime: lt,
  1934  	}
  1935  }
  1936  
  1937  func toUniformBucketLevelAccessFromProto(b *storagepb.Bucket_IamConfig) UniformBucketLevelAccess {
  1938  	if b == nil || !b.GetUniformBucketLevelAccess().GetEnabled() {
  1939  		return UniformBucketLevelAccess{}
  1940  	}
  1941  	return UniformBucketLevelAccess{
  1942  		Enabled:    true,
  1943  		LockedTime: b.GetUniformBucketLevelAccess().GetLockTime().AsTime(),
  1944  	}
  1945  }
  1946  
  1947  func toPublicAccessPrevention(b *raw.BucketIamConfiguration) PublicAccessPrevention {
  1948  	if b == nil {
  1949  		return PublicAccessPreventionUnknown
  1950  	}
  1951  	switch b.PublicAccessPrevention {
  1952  	case publicAccessPreventionInherited, publicAccessPreventionUnspecified:
  1953  		return PublicAccessPreventionInherited
  1954  	case publicAccessPreventionEnforced:
  1955  		return PublicAccessPreventionEnforced
  1956  	default:
  1957  		return PublicAccessPreventionUnknown
  1958  	}
  1959  }
  1960  
  1961  func toPublicAccessPreventionFromProto(b *storagepb.Bucket_IamConfig) PublicAccessPrevention {
  1962  	if b == nil {
  1963  		return PublicAccessPreventionUnknown
  1964  	}
  1965  	switch b.GetPublicAccessPrevention() {
  1966  	case publicAccessPreventionInherited, publicAccessPreventionUnspecified:
  1967  		return PublicAccessPreventionInherited
  1968  	case publicAccessPreventionEnforced:
  1969  		return PublicAccessPreventionEnforced
  1970  	default:
  1971  		return PublicAccessPreventionUnknown
  1972  	}
  1973  }
  1974  
  1975  func toRPO(b *raw.Bucket) RPO {
  1976  	if b == nil {
  1977  		return RPOUnknown
  1978  	}
  1979  	switch b.Rpo {
  1980  	case rpoDefault:
  1981  		return RPODefault
  1982  	case rpoAsyncTurbo:
  1983  		return RPOAsyncTurbo
  1984  	default:
  1985  		return RPOUnknown
  1986  	}
  1987  }
  1988  
  1989  func toRPOFromProto(b *storagepb.Bucket) RPO {
  1990  	if b == nil {
  1991  		return RPOUnknown
  1992  	}
  1993  	switch b.GetRpo() {
  1994  	case rpoDefault:
  1995  		return RPODefault
  1996  	case rpoAsyncTurbo:
  1997  		return RPOAsyncTurbo
  1998  	default:
  1999  		return RPOUnknown
  2000  	}
  2001  }
  2002  
  2003  func customPlacementFromRaw(c *raw.BucketCustomPlacementConfig) *CustomPlacementConfig {
  2004  	if c == nil {
  2005  		return nil
  2006  	}
  2007  	return &CustomPlacementConfig{DataLocations: c.DataLocations}
  2008  }
  2009  
  2010  func (c *CustomPlacementConfig) toRawCustomPlacement() *raw.BucketCustomPlacementConfig {
  2011  	if c == nil {
  2012  		return nil
  2013  	}
  2014  	return &raw.BucketCustomPlacementConfig{
  2015  		DataLocations: c.DataLocations,
  2016  	}
  2017  }
  2018  
  2019  func (c *CustomPlacementConfig) toProtoCustomPlacement() *storagepb.Bucket_CustomPlacementConfig {
  2020  	if c == nil {
  2021  		return nil
  2022  	}
  2023  	return &storagepb.Bucket_CustomPlacementConfig{
  2024  		DataLocations: c.DataLocations,
  2025  	}
  2026  }
  2027  
  2028  func customPlacementFromProto(c *storagepb.Bucket_CustomPlacementConfig) *CustomPlacementConfig {
  2029  	if c == nil {
  2030  		return nil
  2031  	}
  2032  	return &CustomPlacementConfig{DataLocations: c.GetDataLocations()}
  2033  }
  2034  
  2035  func (a *Autoclass) toRawAutoclass() *raw.BucketAutoclass {
  2036  	if a == nil {
  2037  		return nil
  2038  	}
  2039  	// Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime.
  2040  	return &raw.BucketAutoclass{
  2041  		Enabled:              a.Enabled,
  2042  		TerminalStorageClass: a.TerminalStorageClass,
  2043  	}
  2044  }
  2045  
  2046  func (a *Autoclass) toProtoAutoclass() *storagepb.Bucket_Autoclass {
  2047  	if a == nil {
  2048  		return nil
  2049  	}
  2050  	// Excluding read only fields ToggleTime and TerminalStorageClassUpdateTime.
  2051  	ba := &storagepb.Bucket_Autoclass{
  2052  		Enabled: a.Enabled,
  2053  	}
  2054  	if a.TerminalStorageClass != "" {
  2055  		ba.TerminalStorageClass = &a.TerminalStorageClass
  2056  	}
  2057  	return ba
  2058  }
  2059  
  2060  func toAutoclassFromRaw(a *raw.BucketAutoclass) *Autoclass {
  2061  	if a == nil || a.ToggleTime == "" {
  2062  		return nil
  2063  	}
  2064  	ac := &Autoclass{
  2065  		Enabled:              a.Enabled,
  2066  		TerminalStorageClass: a.TerminalStorageClass,
  2067  	}
  2068  	// Return ToggleTime and TSCUpdateTime only if parsed with valid values.
  2069  	t, err := time.Parse(time.RFC3339, a.ToggleTime)
  2070  	if err == nil {
  2071  		ac.ToggleTime = t
  2072  	}
  2073  	ut, err := time.Parse(time.RFC3339, a.TerminalStorageClassUpdateTime)
  2074  	if err == nil {
  2075  		ac.TerminalStorageClassUpdateTime = ut
  2076  	}
  2077  	return ac
  2078  }
  2079  
  2080  func toAutoclassFromProto(a *storagepb.Bucket_Autoclass) *Autoclass {
  2081  	if a == nil || a.GetToggleTime().AsTime().Unix() == 0 {
  2082  		return nil
  2083  	}
  2084  	return &Autoclass{
  2085  		Enabled:                        a.GetEnabled(),
  2086  		ToggleTime:                     a.GetToggleTime().AsTime(),
  2087  		TerminalStorageClass:           a.GetTerminalStorageClass(),
  2088  		TerminalStorageClassUpdateTime: a.GetTerminalStorageClassUpdateTime().AsTime(),
  2089  	}
  2090  }
  2091  
  2092  func (p *SoftDeletePolicy) toRawSoftDeletePolicy() *raw.BucketSoftDeletePolicy {
  2093  	if p == nil {
  2094  		return nil
  2095  	}
  2096  	// Excluding read only field EffectiveTime.
  2097  	return &raw.BucketSoftDeletePolicy{
  2098  		RetentionDurationSeconds: int64(p.RetentionDuration.Seconds()),
  2099  	}
  2100  }
  2101  
  2102  func (p *SoftDeletePolicy) toProtoSoftDeletePolicy() *storagepb.Bucket_SoftDeletePolicy {
  2103  	if p == nil {
  2104  		return nil
  2105  	}
  2106  	// Excluding read only field EffectiveTime.
  2107  	return &storagepb.Bucket_SoftDeletePolicy{
  2108  		RetentionDuration: durationpb.New(p.RetentionDuration),
  2109  	}
  2110  }
  2111  
  2112  func toSoftDeletePolicyFromRaw(p *raw.BucketSoftDeletePolicy) *SoftDeletePolicy {
  2113  	if p == nil {
  2114  		return nil
  2115  	}
  2116  
  2117  	policy := &SoftDeletePolicy{
  2118  		RetentionDuration: time.Duration(p.RetentionDurationSeconds) * time.Second,
  2119  	}
  2120  
  2121  	// Return EffectiveTime only if parsed to a valid value.
  2122  	if t, err := time.Parse(time.RFC3339, p.EffectiveTime); err == nil {
  2123  		policy.EffectiveTime = t
  2124  	}
  2125  
  2126  	return policy
  2127  }
  2128  
  2129  func toSoftDeletePolicyFromProto(p *storagepb.Bucket_SoftDeletePolicy) *SoftDeletePolicy {
  2130  	if p == nil {
  2131  		return nil
  2132  	}
  2133  	return &SoftDeletePolicy{
  2134  		EffectiveTime:     p.GetEffectiveTime().AsTime(),
  2135  		RetentionDuration: p.GetRetentionDuration().AsDuration(),
  2136  	}
  2137  }
  2138  
  2139  // Objects returns an iterator over the objects in the bucket that match the
  2140  // Query q. If q is nil, no filtering is done. Objects will be iterated over
  2141  // lexicographically by name.
  2142  //
  2143  // Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
  2144  func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
  2145  	o := makeStorageOpts(true, b.retry, b.userProject)
  2146  	return b.c.tc.ListObjects(ctx, b.name, q, o...)
  2147  }
  2148  
  2149  // Retryer returns a bucket handle that is configured with custom retry
  2150  // behavior as specified by the options that are passed to it. All operations
  2151  // on the new handle will use the customized retry configuration.
  2152  // Retry options set on a object handle will take precedence over options set on
  2153  // the bucket handle.
  2154  // These retry options will merge with the client's retry configuration (if set)
  2155  // for the returned handle. Options passed into this method will take precedence
  2156  // over retry options on the client. Note that you must explicitly pass in each
  2157  // option you want to override.
  2158  func (b *BucketHandle) Retryer(opts ...RetryOption) *BucketHandle {
  2159  	b2 := *b
  2160  	var retry *retryConfig
  2161  	if b.retry != nil {
  2162  		// merge the options with the existing retry
  2163  		retry = b.retry
  2164  	} else {
  2165  		retry = &retryConfig{}
  2166  	}
  2167  	for _, opt := range opts {
  2168  		opt.apply(retry)
  2169  	}
  2170  	b2.retry = retry
  2171  	b2.acl.retry = retry
  2172  	b2.defaultObjectACL.retry = retry
  2173  	return &b2
  2174  }
  2175  
  2176  // An ObjectIterator is an iterator over ObjectAttrs.
  2177  //
  2178  // Note: This iterator is not safe for concurrent operations without explicit synchronization.
  2179  type ObjectIterator struct {
  2180  	ctx      context.Context
  2181  	query    Query
  2182  	pageInfo *iterator.PageInfo
  2183  	nextFunc func() error
  2184  	items    []*ObjectAttrs
  2185  }
  2186  
  2187  // PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
  2188  //
  2189  // Note: This method is not safe for concurrent operations without explicit synchronization.
  2190  func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
  2191  
  2192  // Next returns the next result. Its second return value is iterator.Done if
  2193  // there are no more results. Once Next returns iterator.Done, all subsequent
  2194  // calls will return iterator.Done.
  2195  //
  2196  // In addition, if Next returns an error other than iterator.Done, all
  2197  // subsequent calls will return the same error. To continue iteration, a new
  2198  // `ObjectIterator` must be created. Since objects are ordered lexicographically
  2199  // by name, `Query.StartOffset` can be used to create a new iterator which will
  2200  // start at the desired place. See
  2201  // https://pkg.go.dev/cloud.google.com/go/storage?tab=doc#hdr-Listing_objects.
  2202  //
  2203  // If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will
  2204  // have a non-empty Prefix field, and a zero value for all other fields. These
  2205  // represent prefixes.
  2206  //
  2207  // Note: This method is not safe for concurrent operations without explicit synchronization.
  2208  func (it *ObjectIterator) Next() (*ObjectAttrs, error) {
  2209  	if err := it.nextFunc(); err != nil {
  2210  		return nil, err
  2211  	}
  2212  	item := it.items[0]
  2213  	it.items = it.items[1:]
  2214  	return item, nil
  2215  }
  2216  
  2217  // Buckets returns an iterator over the buckets in the project. You may
  2218  // optionally set the iterator's Prefix field to restrict the list to buckets
  2219  // whose names begin with the prefix. By default, all buckets in the project
  2220  // are returned.
  2221  //
  2222  // Note: The returned iterator is not safe for concurrent operations without explicit synchronization.
  2223  func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator {
  2224  	o := makeStorageOpts(true, c.retry, "")
  2225  	return c.tc.ListBuckets(ctx, projectID, o...)
  2226  }
  2227  
  2228  // A BucketIterator is an iterator over BucketAttrs.
  2229  //
  2230  // Note: This iterator is not safe for concurrent operations without explicit synchronization.
  2231  type BucketIterator struct {
  2232  	// Prefix restricts the iterator to buckets whose names begin with it.
  2233  	Prefix string
  2234  
  2235  	ctx       context.Context
  2236  	projectID string
  2237  	buckets   []*BucketAttrs
  2238  	pageInfo  *iterator.PageInfo
  2239  	nextFunc  func() error
  2240  }
  2241  
  2242  // Next returns the next result. Its second return value is iterator.Done if
  2243  // there are no more results. Once Next returns iterator.Done, all subsequent
  2244  // calls will return iterator.Done.
  2245  //
  2246  // Note: This method is not safe for concurrent operations without explicit synchronization.
  2247  func (it *BucketIterator) Next() (*BucketAttrs, error) {
  2248  	if err := it.nextFunc(); err != nil {
  2249  		return nil, err
  2250  	}
  2251  	b := it.buckets[0]
  2252  	it.buckets = it.buckets[1:]
  2253  	return b, nil
  2254  }
  2255  
  2256  // PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
  2257  //
  2258  // Note: This method is not safe for concurrent operations without explicit synchronization.
  2259  func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
  2260  
  2261  // RPO (Recovery Point Objective) configures the turbo replication feature. See
  2262  // https://cloud.google.com/storage/docs/managing-turbo-replication for more information.
  2263  type RPO int
  2264  
  2265  const (
  2266  	// RPOUnknown is a zero value. It may be returned from bucket.Attrs() if RPO
  2267  	// is not present in the bucket metadata, that is, the bucket is not dual-region.
  2268  	// This value is also used if the RPO field is not set in a call to GCS.
  2269  	RPOUnknown RPO = iota
  2270  
  2271  	// RPODefault represents default replication. It is used to reset RPO on an
  2272  	// existing bucket  that has this field set to RPOAsyncTurbo. Otherwise it
  2273  	// is equivalent to RPOUnknown, and is always ignored. This value is valid
  2274  	// for dual- or multi-region buckets.
  2275  	RPODefault
  2276  
  2277  	// RPOAsyncTurbo represents turbo replication and is used to enable Turbo
  2278  	// Replication on a bucket. This value is only valid for dual-region buckets.
  2279  	RPOAsyncTurbo
  2280  
  2281  	rpoUnknown    string = ""
  2282  	rpoDefault           = "DEFAULT"
  2283  	rpoAsyncTurbo        = "ASYNC_TURBO"
  2284  )
  2285  
  2286  func (rpo RPO) String() string {
  2287  	switch rpo {
  2288  	case RPODefault:
  2289  		return rpoDefault
  2290  	case RPOAsyncTurbo:
  2291  		return rpoAsyncTurbo
  2292  	default:
  2293  		return rpoUnknown
  2294  	}
  2295  }
  2296  
  2297  // protoDateToUTCTime returns a new Time based on the google.type.Date, in UTC.
  2298  //
  2299  // Hours, minutes, seconds, and nanoseconds are set to 0.
  2300  func protoDateToUTCTime(d *dpb.Date) time.Time {
  2301  	return protoDateToTime(d, time.UTC)
  2302  }
  2303  
  2304  // protoDateToTime returns a new Time based on the google.type.Date and provided
  2305  // *time.Location.
  2306  //
  2307  // Hours, minutes, seconds, and nanoseconds are set to 0.
  2308  func protoDateToTime(d *dpb.Date, l *time.Location) time.Time {
  2309  	return time.Date(int(d.GetYear()), time.Month(d.GetMonth()), int(d.GetDay()), 0, 0, 0, 0, l)
  2310  }
  2311  
  2312  // timeToProtoDate returns a new google.type.Date based on the provided time.Time.
  2313  // The location is ignored, as is anything more precise than the day.
  2314  func timeToProtoDate(t time.Time) *dpb.Date {
  2315  	return &dpb.Date{
  2316  		Year:  int32(t.Year()),
  2317  		Month: int32(t.Month()),
  2318  		Day:   int32(t.Day()),
  2319  	}
  2320  }
  2321  

View as plain text