...

Source file src/cloud.google.com/go/bigquery/table.go

Documentation: cloud.google.com/go/bigquery

     1  // Copyright 2015 Google LLC
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package bigquery
    16  
    17  import (
    18  	"context"
    19  	"errors"
    20  	"fmt"
    21  	"time"
    22  
    23  	"cloud.google.com/go/internal/optional"
    24  	"cloud.google.com/go/internal/trace"
    25  	bq "google.golang.org/api/bigquery/v2"
    26  )
    27  
    28  // A Table is a reference to a BigQuery table.
    29  type Table struct {
    30  	// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
    31  	// In this case the result will be stored in an ephemeral table.
    32  	ProjectID string
    33  	DatasetID string
    34  	// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
    35  	// The maximum length is 1,024 characters.
    36  	TableID string
    37  
    38  	c *Client
    39  }
    40  
    41  // TableMetadata contains information about a BigQuery table.
    42  type TableMetadata struct {
    43  	// The following fields can be set when creating a table.
    44  
    45  	// The user-friendly name for the table.
    46  	Name string
    47  
    48  	// Output-only location of the table, based on the encapsulating dataset.
    49  	Location string
    50  
    51  	// The user-friendly description of the table.
    52  	Description string
    53  
    54  	// The table schema. If provided on create, ViewQuery must be empty.
    55  	Schema Schema
    56  
    57  	// If non-nil, this table is a materialized view.
    58  	MaterializedView *MaterializedViewDefinition
    59  
    60  	// The query to use for a logical view. If provided on create, Schema must be nil.
    61  	ViewQuery string
    62  
    63  	// Use Legacy SQL for the view query.
    64  	// At most one of UseLegacySQL and UseStandardSQL can be true.
    65  	UseLegacySQL bool
    66  
    67  	// Use Standard SQL for the view query. The default.
    68  	// At most one of UseLegacySQL and UseStandardSQL can be true.
    69  	// Deprecated: use UseLegacySQL.
    70  	UseStandardSQL bool
    71  
    72  	// If non-nil, the table is partitioned by time. Only one of
    73  	// time partitioning or range partitioning can be specified.
    74  	TimePartitioning *TimePartitioning
    75  
    76  	// If non-nil, the table is partitioned by integer range.  Only one of
    77  	// time partitioning or range partitioning can be specified.
    78  	RangePartitioning *RangePartitioning
    79  
    80  	// If set to true, queries that reference this table must specify a
    81  	// partition filter (e.g. a WHERE clause) that can be used to eliminate
    82  	// partitions. Used to prevent unintentional full data scans on large
    83  	// partitioned tables.
    84  	RequirePartitionFilter bool
    85  
    86  	// Clustering specifies the data clustering configuration for the table.
    87  	Clustering *Clustering
    88  
    89  	// The time when this table expires. If set, this table will expire at the
    90  	// specified time. Expired tables will be deleted and their storage
    91  	// reclaimed. The zero value is ignored.
    92  	ExpirationTime time.Time
    93  
    94  	// User-provided labels.
    95  	Labels map[string]string
    96  
    97  	// Information about a table stored outside of BigQuery.
    98  	ExternalDataConfig *ExternalDataConfig
    99  
   100  	// Custom encryption configuration (e.g., Cloud KMS keys).
   101  	EncryptionConfig *EncryptionConfig
   102  
   103  	// All the fields below are read-only.
   104  
   105  	FullID           string // An opaque ID uniquely identifying the table.
   106  	Type             TableType
   107  	CreationTime     time.Time
   108  	LastModifiedTime time.Time
   109  
   110  	// The size of the table in bytes.
   111  	// This does not include data that is being buffered during a streaming insert.
   112  	NumBytes int64
   113  
   114  	// The number of bytes in the table considered "long-term storage" for reduced
   115  	// billing purposes.  See https://cloud.google.com/bigquery/pricing#long-term-storage
   116  	// for more information.
   117  	NumLongTermBytes int64
   118  
   119  	// The number of rows of data in this table.
   120  	// This does not include data that is being buffered during a streaming insert.
   121  	NumRows uint64
   122  
   123  	// SnapshotDefinition contains additional information about the provenance of a
   124  	// given snapshot table.
   125  	SnapshotDefinition *SnapshotDefinition
   126  
   127  	// CloneDefinition contains additional information about the provenance of a
   128  	// given cloned table.
   129  	CloneDefinition *CloneDefinition
   130  
   131  	// Contains information regarding this table's streaming buffer, if one is
   132  	// present. This field will be nil if the table is not being streamed to or if
   133  	// there is no data in the streaming buffer.
   134  	StreamingBuffer *StreamingBuffer
   135  
   136  	// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
   137  	// ensure that the metadata hasn't changed since it was read.
   138  	ETag string
   139  
   140  	// Defines the default collation specification of new STRING fields
   141  	// in the table. During table creation or update, if a STRING field is added
   142  	// to this table without explicit collation specified, then the table inherits
   143  	// the table default collation. A change to this field affects only fields
   144  	// added afterwards, and does not alter the existing fields.
   145  	// The following values are supported:
   146  	//   - 'und:ci': undetermined locale, case insensitive.
   147  	//   - '': empty string. Default to case-sensitive behavior.
   148  	// More information: https://cloud.google.com/bigquery/docs/reference/standard-sql/collation-concepts
   149  	DefaultCollation string
   150  
   151  	// TableConstraints contains table primary and foreign keys constraints.
   152  	// Present only if the table has primary or foreign keys.
   153  	TableConstraints *TableConstraints
   154  
   155  	// The tags associated with this table. Tag
   156  	// keys are globally unique. See additional information on tags
   157  	// (https://cloud.google.com/iam/docs/tags-access-control#definitions).
   158  	// An object containing a list of "key": value pairs. The key is the
   159  	// namespaced friendly name of the tag key, e.g. "12345/environment"
   160  	// where 12345 is parent id. The value is the friendly short name of the
   161  	// tag value, e.g. "production".
   162  	ResourceTags map[string]string
   163  }
   164  
   165  // TableConstraints defines the primary key and foreign key of a table.
   166  type TableConstraints struct {
   167  	// PrimaryKey constraint on a table's columns.
   168  	// Present only if the table has a primary key.
   169  	// The primary key is not enforced.
   170  	PrimaryKey *PrimaryKey
   171  
   172  	// ForeignKeys represent a list of foreign keys constraints.
   173  	// Foreign keys are not enforced.
   174  	ForeignKeys []*ForeignKey
   175  }
   176  
   177  // PrimaryKey represents the primary key constraint on a table's columns.
   178  type PrimaryKey struct {
   179  	// Columns that compose the primary key constraint.
   180  	Columns []string
   181  }
   182  
   183  func (pk *PrimaryKey) toBQ() *bq.TableConstraintsPrimaryKey {
   184  	return &bq.TableConstraintsPrimaryKey{
   185  		Columns: pk.Columns,
   186  	}
   187  }
   188  
   189  func bqToPrimaryKey(tc *bq.TableConstraints) *PrimaryKey {
   190  	if tc.PrimaryKey == nil {
   191  		return nil
   192  	}
   193  	return &PrimaryKey{
   194  		Columns: tc.PrimaryKey.Columns,
   195  	}
   196  }
   197  
   198  // ForeignKey represents a foreign key constraint on a table's columns.
   199  type ForeignKey struct {
   200  	// Foreign key constraint name.
   201  	Name string
   202  
   203  	// Table that holds the primary key and is referenced by this foreign key.
   204  	ReferencedTable *Table
   205  
   206  	// Columns that compose the foreign key.
   207  	ColumnReferences []*ColumnReference
   208  }
   209  
   210  func (fk *ForeignKey) toBQ() *bq.TableConstraintsForeignKeys {
   211  	colRefs := []*bq.TableConstraintsForeignKeysColumnReferences{}
   212  	for _, colRef := range fk.ColumnReferences {
   213  		colRefs = append(colRefs, colRef.toBQ())
   214  	}
   215  	return &bq.TableConstraintsForeignKeys{
   216  		Name: fk.Name,
   217  		ReferencedTable: &bq.TableConstraintsForeignKeysReferencedTable{
   218  			DatasetId: fk.ReferencedTable.DatasetID,
   219  			ProjectId: fk.ReferencedTable.ProjectID,
   220  			TableId:   fk.ReferencedTable.TableID,
   221  		},
   222  		ColumnReferences: colRefs,
   223  	}
   224  }
   225  
   226  func bqToForeignKeys(tc *bq.TableConstraints, c *Client) []*ForeignKey {
   227  	fks := []*ForeignKey{}
   228  	for _, fk := range tc.ForeignKeys {
   229  		colRefs := []*ColumnReference{}
   230  		for _, colRef := range fk.ColumnReferences {
   231  			colRefs = append(colRefs, &ColumnReference{
   232  				ReferencedColumn:  colRef.ReferencedColumn,
   233  				ReferencingColumn: colRef.ReferencingColumn,
   234  			})
   235  		}
   236  		fks = append(fks, &ForeignKey{
   237  			Name:             fk.Name,
   238  			ReferencedTable:  c.DatasetInProject(fk.ReferencedTable.ProjectId, fk.ReferencedTable.DatasetId).Table(fk.ReferencedTable.TableId),
   239  			ColumnReferences: colRefs,
   240  		})
   241  	}
   242  	return fks
   243  }
   244  
   245  // ColumnReference represents the pair of the foreign key column and primary key column.
   246  type ColumnReference struct {
   247  	// ReferencingColumn is the column in the current table that composes the foreign key.
   248  	ReferencingColumn string
   249  	// ReferencedColumn is the column in the primary key of the foreign table that
   250  	// is referenced by the ReferencingColumn.
   251  	ReferencedColumn string
   252  }
   253  
   254  func (colRef *ColumnReference) toBQ() *bq.TableConstraintsForeignKeysColumnReferences {
   255  	return &bq.TableConstraintsForeignKeysColumnReferences{
   256  		ReferencedColumn:  colRef.ReferencedColumn,
   257  		ReferencingColumn: colRef.ReferencingColumn,
   258  	}
   259  }
   260  
   261  // TableCreateDisposition specifies the circumstances under which destination table will be created.
   262  // Default is CreateIfNeeded.
   263  type TableCreateDisposition string
   264  
   265  const (
   266  	// CreateIfNeeded will create the table if it does not already exist.
   267  	// Tables are created atomically on successful completion of a job.
   268  	CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
   269  
   270  	// CreateNever ensures the table must already exist and will not be
   271  	// automatically created.
   272  	CreateNever TableCreateDisposition = "CREATE_NEVER"
   273  )
   274  
   275  // TableWriteDisposition specifies how existing data in a destination table is treated.
   276  // Default is WriteAppend.
   277  type TableWriteDisposition string
   278  
   279  const (
   280  	// WriteAppend will append to any existing data in the destination table.
   281  	// Data is appended atomically on successful completion of a job.
   282  	WriteAppend TableWriteDisposition = "WRITE_APPEND"
   283  
   284  	// WriteTruncate overrides the existing data in the destination table.
   285  	// Data is overwritten atomically on successful completion of a job.
   286  	WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
   287  
   288  	// WriteEmpty fails writes if the destination table already contains data.
   289  	WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
   290  )
   291  
   292  // TableType is the type of table.
   293  type TableType string
   294  
   295  const (
   296  	// RegularTable is a regular table.
   297  	RegularTable TableType = "TABLE"
   298  	// ViewTable is a table type describing that the table is a logical view.
   299  	// See more information at https://cloud.google.com/bigquery/docs/views.
   300  	ViewTable TableType = "VIEW"
   301  	// ExternalTable is a table type describing that the table is an external
   302  	// table (also known as a federated data source). See more information at
   303  	// https://cloud.google.com/bigquery/external-data-sources.
   304  	ExternalTable TableType = "EXTERNAL"
   305  	// MaterializedView represents a managed storage table that's derived from
   306  	// a base table.
   307  	MaterializedView TableType = "MATERIALIZED_VIEW"
   308  	// Snapshot represents an immutable point in time snapshot of some other
   309  	// table.
   310  	Snapshot TableType = "SNAPSHOT"
   311  )
   312  
   313  // MaterializedViewDefinition contains information for materialized views.
   314  type MaterializedViewDefinition struct {
   315  	// EnableRefresh governs whether the derived view is updated to reflect
   316  	// changes in the base table.
   317  	EnableRefresh bool
   318  
   319  	// LastRefreshTime reports the time, in millisecond precision, that the
   320  	// materialized view was last updated.
   321  	LastRefreshTime time.Time
   322  
   323  	// Query contains the SQL query used to define the materialized view.
   324  	Query string
   325  
   326  	// RefreshInterval defines the maximum frequency, in millisecond precision,
   327  	// at which this this materialized view will be refreshed.
   328  	RefreshInterval time.Duration
   329  
   330  	// AllowNonIncrementalDefinition for materialized view definition.
   331  	// The default value is false.
   332  	AllowNonIncrementalDefinition bool
   333  
   334  	// MaxStaleness of data that could be returned when materialized
   335  	// view is queried.
   336  	MaxStaleness *IntervalValue
   337  }
   338  
   339  func (mvd *MaterializedViewDefinition) toBQ() *bq.MaterializedViewDefinition {
   340  	if mvd == nil {
   341  		return nil
   342  	}
   343  	maxStaleness := ""
   344  	if mvd.MaxStaleness != nil {
   345  		maxStaleness = mvd.MaxStaleness.String()
   346  	}
   347  	return &bq.MaterializedViewDefinition{
   348  		EnableRefresh:                 mvd.EnableRefresh,
   349  		Query:                         mvd.Query,
   350  		LastRefreshTime:               mvd.LastRefreshTime.UnixNano() / 1e6,
   351  		RefreshIntervalMs:             int64(mvd.RefreshInterval) / 1e6,
   352  		AllowNonIncrementalDefinition: mvd.AllowNonIncrementalDefinition,
   353  		MaxStaleness:                  maxStaleness,
   354  		// force sending the bool in all cases due to how Go handles false.
   355  		ForceSendFields: []string{"EnableRefresh", "AllowNonIncrementalDefinition"},
   356  	}
   357  }
   358  
   359  func bqToMaterializedViewDefinition(q *bq.MaterializedViewDefinition) *MaterializedViewDefinition {
   360  	if q == nil {
   361  		return nil
   362  	}
   363  	var maxStaleness *IntervalValue
   364  	if q.MaxStaleness != "" {
   365  		maxStaleness, _ = ParseInterval(q.MaxStaleness)
   366  	}
   367  	return &MaterializedViewDefinition{
   368  		EnableRefresh:                 q.EnableRefresh,
   369  		Query:                         q.Query,
   370  		LastRefreshTime:               unixMillisToTime(q.LastRefreshTime),
   371  		RefreshInterval:               time.Duration(q.RefreshIntervalMs) * time.Millisecond,
   372  		AllowNonIncrementalDefinition: q.AllowNonIncrementalDefinition,
   373  		MaxStaleness:                  maxStaleness,
   374  	}
   375  }
   376  
   377  // SnapshotDefinition provides metadata related to the origin of a snapshot.
   378  type SnapshotDefinition struct {
   379  
   380  	// BaseTableReference describes the ID of the table that this snapshot
   381  	// came from.
   382  	BaseTableReference *Table
   383  
   384  	// SnapshotTime indicates when the base table was snapshot.
   385  	SnapshotTime time.Time
   386  }
   387  
   388  func (sd *SnapshotDefinition) toBQ() *bq.SnapshotDefinition {
   389  	if sd == nil {
   390  		return nil
   391  	}
   392  	return &bq.SnapshotDefinition{
   393  		BaseTableReference: sd.BaseTableReference.toBQ(),
   394  		SnapshotTime:       sd.SnapshotTime.Format(time.RFC3339),
   395  	}
   396  }
   397  
   398  func bqToSnapshotDefinition(q *bq.SnapshotDefinition, c *Client) *SnapshotDefinition {
   399  	if q == nil {
   400  		return nil
   401  	}
   402  	sd := &SnapshotDefinition{
   403  		BaseTableReference: bqToTable(q.BaseTableReference, c),
   404  	}
   405  	// It's possible we could fail to populate SnapshotTime if we fail to parse
   406  	// the backend representation.
   407  	if t, err := time.Parse(time.RFC3339, q.SnapshotTime); err == nil {
   408  		sd.SnapshotTime = t
   409  	}
   410  	return sd
   411  }
   412  
   413  // CloneDefinition provides metadata related to the origin of a clone.
   414  type CloneDefinition struct {
   415  
   416  	// BaseTableReference describes the ID of the table that this clone
   417  	// came from.
   418  	BaseTableReference *Table
   419  
   420  	// CloneTime indicates when the base table was cloned.
   421  	CloneTime time.Time
   422  }
   423  
   424  func (cd *CloneDefinition) toBQ() *bq.CloneDefinition {
   425  	if cd == nil {
   426  		return nil
   427  	}
   428  	return &bq.CloneDefinition{
   429  		BaseTableReference: cd.BaseTableReference.toBQ(),
   430  		CloneTime:          cd.CloneTime.Format(time.RFC3339),
   431  	}
   432  }
   433  
   434  func bqToCloneDefinition(q *bq.CloneDefinition, c *Client) *CloneDefinition {
   435  	if q == nil {
   436  		return nil
   437  	}
   438  	cd := &CloneDefinition{
   439  		BaseTableReference: bqToTable(q.BaseTableReference, c),
   440  	}
   441  	// It's possible we could fail to populate CloneTime if we fail to parse
   442  	// the backend representation.
   443  	if t, err := time.Parse(time.RFC3339, q.CloneTime); err == nil {
   444  		cd.CloneTime = t
   445  	}
   446  	return cd
   447  }
   448  
   449  // TimePartitioningType defines the interval used to partition managed data.
   450  type TimePartitioningType string
   451  
   452  const (
   453  	// DayPartitioningType uses a day-based interval for time partitioning.
   454  	DayPartitioningType TimePartitioningType = "DAY"
   455  
   456  	// HourPartitioningType uses an hour-based interval for time partitioning.
   457  	HourPartitioningType TimePartitioningType = "HOUR"
   458  
   459  	// MonthPartitioningType uses a month-based interval for time partitioning.
   460  	MonthPartitioningType TimePartitioningType = "MONTH"
   461  
   462  	// YearPartitioningType uses a year-based interval for time partitioning.
   463  	YearPartitioningType TimePartitioningType = "YEAR"
   464  )
   465  
   466  // TimePartitioning describes the time-based date partitioning on a table.
   467  // For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
   468  type TimePartitioning struct {
   469  	// Defines the partition interval type.  Supported values are "HOUR", "DAY", "MONTH", and "YEAR".
   470  	// When the interval type is not specified, default behavior is DAY.
   471  	Type TimePartitioningType
   472  
   473  	// The amount of time to keep the storage for a partition.
   474  	// If the duration is empty (0), the data in the partitions do not expire.
   475  	Expiration time.Duration
   476  
   477  	// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
   478  	// table is partitioned by this field. The field must be a top-level TIMESTAMP or
   479  	// DATE field. Its mode must be NULLABLE or REQUIRED.
   480  	Field string
   481  
   482  	// If set to true, queries that reference this table must specify a
   483  	// partition filter (e.g. a WHERE clause) that can be used to eliminate
   484  	// partitions. Used to prevent unintentional full data scans on large
   485  	// partitioned tables.
   486  	// DEPRECATED: use the top-level RequirePartitionFilter in TableMetadata.
   487  	RequirePartitionFilter bool
   488  }
   489  
   490  func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
   491  	if p == nil {
   492  		return nil
   493  	}
   494  	// Treat unspecified values as DAY-based partitioning.
   495  	intervalType := DayPartitioningType
   496  	if p.Type != "" {
   497  		intervalType = p.Type
   498  	}
   499  	return &bq.TimePartitioning{
   500  		Type:                   string(intervalType),
   501  		ExpirationMs:           int64(p.Expiration / time.Millisecond),
   502  		Field:                  p.Field,
   503  		RequirePartitionFilter: p.RequirePartitionFilter,
   504  	}
   505  }
   506  
   507  func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
   508  	if q == nil {
   509  		return nil
   510  	}
   511  	return &TimePartitioning{
   512  		Type:                   TimePartitioningType(q.Type),
   513  		Expiration:             time.Duration(q.ExpirationMs) * time.Millisecond,
   514  		Field:                  q.Field,
   515  		RequirePartitionFilter: q.RequirePartitionFilter,
   516  	}
   517  }
   518  
   519  // RangePartitioning indicates an integer-range based storage organization strategy.
   520  type RangePartitioning struct {
   521  	// The field by which the table is partitioned.
   522  	// This field must be a top-level field, and must be typed as an
   523  	// INTEGER/INT64.
   524  	Field string
   525  	// The details of how partitions are mapped onto the integer range.
   526  	Range *RangePartitioningRange
   527  }
   528  
   529  // RangePartitioningRange defines the boundaries and width of partitioned values.
   530  type RangePartitioningRange struct {
   531  	// The start value of defined range of values, inclusive of the specified value.
   532  	Start int64
   533  	// The end of the defined range of values, exclusive of the defined value.
   534  	End int64
   535  	// The width of each interval range.
   536  	Interval int64
   537  }
   538  
   539  func (rp *RangePartitioning) toBQ() *bq.RangePartitioning {
   540  	if rp == nil {
   541  		return nil
   542  	}
   543  	return &bq.RangePartitioning{
   544  		Field: rp.Field,
   545  		Range: rp.Range.toBQ(),
   546  	}
   547  }
   548  
   549  func bqToRangePartitioning(q *bq.RangePartitioning) *RangePartitioning {
   550  	if q == nil {
   551  		return nil
   552  	}
   553  	return &RangePartitioning{
   554  		Field: q.Field,
   555  		Range: bqToRangePartitioningRange(q.Range),
   556  	}
   557  }
   558  
   559  func bqToRangePartitioningRange(br *bq.RangePartitioningRange) *RangePartitioningRange {
   560  	if br == nil {
   561  		return nil
   562  	}
   563  	return &RangePartitioningRange{
   564  		Start:    br.Start,
   565  		End:      br.End,
   566  		Interval: br.Interval,
   567  	}
   568  }
   569  
   570  func (rpr *RangePartitioningRange) toBQ() *bq.RangePartitioningRange {
   571  	if rpr == nil {
   572  		return nil
   573  	}
   574  	return &bq.RangePartitioningRange{
   575  		Start:           rpr.Start,
   576  		End:             rpr.End,
   577  		Interval:        rpr.Interval,
   578  		ForceSendFields: []string{"Start", "End", "Interval"},
   579  	}
   580  }
   581  
   582  // Clustering governs the organization of data within a managed table.
   583  // For more information, see https://cloud.google.com/bigquery/docs/clustered-tables
   584  type Clustering struct {
   585  	Fields []string
   586  }
   587  
   588  func (c *Clustering) toBQ() *bq.Clustering {
   589  	if c == nil {
   590  		return nil
   591  	}
   592  	return &bq.Clustering{
   593  		Fields: c.Fields,
   594  	}
   595  }
   596  
   597  func bqToClustering(q *bq.Clustering) *Clustering {
   598  	if q == nil {
   599  		return nil
   600  	}
   601  	return &Clustering{
   602  		Fields: q.Fields,
   603  	}
   604  }
   605  
   606  // EncryptionConfig configures customer-managed encryption on tables and ML models.
   607  type EncryptionConfig struct {
   608  	// Describes the Cloud KMS encryption key that will be used to protect
   609  	// destination BigQuery table. The BigQuery Service Account associated with your
   610  	// project requires access to this encryption key.
   611  	KMSKeyName string
   612  }
   613  
   614  func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
   615  	if e == nil {
   616  		return nil
   617  	}
   618  	return &bq.EncryptionConfiguration{
   619  		KmsKeyName: e.KMSKeyName,
   620  	}
   621  }
   622  
   623  func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
   624  	if q == nil {
   625  		return nil
   626  	}
   627  	return &EncryptionConfig{
   628  		KMSKeyName: q.KmsKeyName,
   629  	}
   630  }
   631  
   632  // StreamingBuffer holds information about the streaming buffer.
   633  type StreamingBuffer struct {
   634  	// A lower-bound estimate of the number of bytes currently in the streaming
   635  	// buffer.
   636  	EstimatedBytes uint64
   637  
   638  	// A lower-bound estimate of the number of rows currently in the streaming
   639  	// buffer.
   640  	EstimatedRows uint64
   641  
   642  	// The time of the oldest entry in the streaming buffer.
   643  	OldestEntryTime time.Time
   644  }
   645  
   646  func (t *Table) toBQ() *bq.TableReference {
   647  	return &bq.TableReference{
   648  		ProjectId: t.ProjectID,
   649  		DatasetId: t.DatasetID,
   650  		TableId:   t.TableID,
   651  	}
   652  }
   653  
   654  // IdentifierFormat represents a how certain resource identifiers such as table references
   655  // are formatted.
   656  type IdentifierFormat string
   657  
   658  var (
   659  	// StandardSQLID returns an identifier suitable for use with Standard SQL.
   660  	StandardSQLID IdentifierFormat = "SQL"
   661  
   662  	// LegacySQLID returns an identifier suitable for use with Legacy SQL.
   663  	LegacySQLID IdentifierFormat = "LEGACY_SQL"
   664  
   665  	// StorageAPIResourceID returns an identifier suitable for use with the Storage API.  Namely, it's for formatting
   666  	// a table resource for invoking read and write functionality.
   667  	StorageAPIResourceID IdentifierFormat = "STORAGE_API_RESOURCE"
   668  
   669  	// ErrUnknownIdentifierFormat is indicative of requesting an identifier in a format that is
   670  	// not supported.
   671  	ErrUnknownIdentifierFormat = errors.New("unknown identifier format")
   672  )
   673  
   674  // Identifier returns the ID of the table in the requested format.
   675  func (t *Table) Identifier(f IdentifierFormat) (string, error) {
   676  	switch f {
   677  	case LegacySQLID:
   678  		return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID), nil
   679  	case StorageAPIResourceID:
   680  		return fmt.Sprintf("projects/%s/datasets/%s/tables/%s", t.ProjectID, t.DatasetID, t.TableID), nil
   681  	case StandardSQLID:
   682  		// Note we don't need to quote the project ID here, as StandardSQL has special rules to allow
   683  		// dash identifiers for projects without issue in table identifiers.
   684  		return fmt.Sprintf("%s.%s.%s", t.ProjectID, t.DatasetID, t.TableID), nil
   685  	default:
   686  		return "", ErrUnknownIdentifierFormat
   687  	}
   688  }
   689  
   690  // FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
   691  func (t *Table) FullyQualifiedName() string {
   692  	s, _ := t.Identifier(LegacySQLID)
   693  	return s
   694  }
   695  
   696  // implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
   697  func (t *Table) implicitTable() bool {
   698  	return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
   699  }
   700  
   701  // Create creates a table in the BigQuery service.
   702  // Pass in a TableMetadata value to configure the table.
   703  // If tm.View.Query is non-empty, the created table will be of type VIEW.
   704  // If no ExpirationTime is specified, the table will never expire.
   705  // After table creation, a view can be modified only if its table was initially created
   706  // with a view.
   707  func (t *Table) Create(ctx context.Context, tm *TableMetadata) (err error) {
   708  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Create")
   709  	defer func() { trace.EndSpan(ctx, err) }()
   710  
   711  	table, err := tm.toBQ()
   712  	if err != nil {
   713  		return err
   714  	}
   715  	table.TableReference = &bq.TableReference{
   716  		ProjectId: t.ProjectID,
   717  		DatasetId: t.DatasetID,
   718  		TableId:   t.TableID,
   719  	}
   720  
   721  	req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
   722  	setClientHeader(req.Header())
   723  	return runWithRetry(ctx, func() (err error) {
   724  		ctx = trace.StartSpan(ctx, "bigquery.tables.insert")
   725  		_, err = req.Do()
   726  		trace.EndSpan(ctx, err)
   727  		return err
   728  	})
   729  }
   730  
   731  func (tm *TableMetadata) toBQ() (*bq.Table, error) {
   732  	t := &bq.Table{}
   733  	if tm == nil {
   734  		return t, nil
   735  	}
   736  	if tm.Schema != nil && tm.ViewQuery != "" {
   737  		return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
   738  	}
   739  	t.FriendlyName = tm.Name
   740  	t.Description = tm.Description
   741  	t.Labels = tm.Labels
   742  	if tm.Schema != nil {
   743  		t.Schema = tm.Schema.toBQ()
   744  	}
   745  	if tm.ViewQuery != "" {
   746  		if tm.UseStandardSQL && tm.UseLegacySQL {
   747  			return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
   748  		}
   749  		t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
   750  		if tm.UseLegacySQL {
   751  			t.View.UseLegacySql = true
   752  		} else {
   753  			t.View.UseLegacySql = false
   754  			t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
   755  		}
   756  	} else if tm.UseLegacySQL || tm.UseStandardSQL {
   757  		return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
   758  	}
   759  	t.MaterializedView = tm.MaterializedView.toBQ()
   760  	t.TimePartitioning = tm.TimePartitioning.toBQ()
   761  	t.RangePartitioning = tm.RangePartitioning.toBQ()
   762  	t.Clustering = tm.Clustering.toBQ()
   763  	t.RequirePartitionFilter = tm.RequirePartitionFilter
   764  	t.SnapshotDefinition = tm.SnapshotDefinition.toBQ()
   765  	t.CloneDefinition = tm.CloneDefinition.toBQ()
   766  
   767  	if !validExpiration(tm.ExpirationTime) {
   768  		return nil, fmt.Errorf("invalid expiration time: %v.\n"+
   769  			"Valid expiration times are after 1678 and before 2262", tm.ExpirationTime)
   770  	}
   771  	if !tm.ExpirationTime.IsZero() && tm.ExpirationTime != NeverExpire {
   772  		t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
   773  	}
   774  	if tm.ExternalDataConfig != nil {
   775  		edc := tm.ExternalDataConfig.toBQ()
   776  		t.ExternalDataConfiguration = &edc
   777  	}
   778  	t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
   779  	if tm.FullID != "" {
   780  		return nil, errors.New("cannot set FullID on create")
   781  	}
   782  	if tm.Type != "" {
   783  		return nil, errors.New("cannot set Type on create")
   784  	}
   785  	if !tm.CreationTime.IsZero() {
   786  		return nil, errors.New("cannot set CreationTime on create")
   787  	}
   788  	if !tm.LastModifiedTime.IsZero() {
   789  		return nil, errors.New("cannot set LastModifiedTime on create")
   790  	}
   791  	if tm.NumBytes != 0 {
   792  		return nil, errors.New("cannot set NumBytes on create")
   793  	}
   794  	if tm.NumLongTermBytes != 0 {
   795  		return nil, errors.New("cannot set NumLongTermBytes on create")
   796  	}
   797  	if tm.NumRows != 0 {
   798  		return nil, errors.New("cannot set NumRows on create")
   799  	}
   800  	if tm.StreamingBuffer != nil {
   801  		return nil, errors.New("cannot set StreamingBuffer on create")
   802  	}
   803  	if tm.ETag != "" {
   804  		return nil, errors.New("cannot set ETag on create")
   805  	}
   806  	t.DefaultCollation = string(tm.DefaultCollation)
   807  
   808  	if tm.TableConstraints != nil {
   809  		t.TableConstraints = &bq.TableConstraints{}
   810  		if tm.TableConstraints.PrimaryKey != nil {
   811  			t.TableConstraints.PrimaryKey = tm.TableConstraints.PrimaryKey.toBQ()
   812  		}
   813  		if len(tm.TableConstraints.ForeignKeys) > 0 {
   814  			t.TableConstraints.ForeignKeys = make([]*bq.TableConstraintsForeignKeys, len(tm.TableConstraints.ForeignKeys))
   815  			for i, fk := range tm.TableConstraints.ForeignKeys {
   816  				t.TableConstraints.ForeignKeys[i] = fk.toBQ()
   817  			}
   818  		}
   819  	}
   820  	if tm.ResourceTags != nil {
   821  		t.ResourceTags = make(map[string]string)
   822  		for k, v := range tm.ResourceTags {
   823  			t.ResourceTags[k] = v
   824  		}
   825  	}
   826  	return t, nil
   827  }
   828  
   829  // We use this for the option pattern rather than exposing the underlying
   830  // discovery type directly.
   831  type tableGetCall struct {
   832  	call *bq.TablesGetCall
   833  }
   834  
   835  // TableMetadataOption allow requests to alter requests for table metadata.
   836  type TableMetadataOption func(*tableGetCall)
   837  
   838  // TableMetadataView specifies which details about a table are desired.
   839  type TableMetadataView string
   840  
   841  const (
   842  	// BasicMetadataView populates basic table information including schema partitioning,
   843  	// but does not contain storage statistics like number or rows or bytes.  This is a more
   844  	// efficient view to use for large tables or higher metadata query rates.
   845  	BasicMetadataView TableMetadataView = "BASIC"
   846  
   847  	// FullMetadataView returns all table information, including storage statistics.  It currently
   848  	// returns the same information as StorageStatsMetadataView, but may include additional information
   849  	// in the future.
   850  	FullMetadataView TableMetadataView = "FULL"
   851  
   852  	// StorageStatsMetadataView includes all information from the basic view, and includes storage statistics.  It currently
   853  	StorageStatsMetadataView TableMetadataView = "STORAGE_STATS"
   854  )
   855  
   856  // WithMetadataView is used to customize what details are returned when interrogating a
   857  // table via the Metadata() call.  Generally this is used to limit data returned for performance
   858  // reasons (such as large tables that take time computing storage statistics).
   859  func WithMetadataView(tmv TableMetadataView) TableMetadataOption {
   860  	return func(tgc *tableGetCall) {
   861  		tgc.call.View(string(tmv))
   862  	}
   863  }
   864  
   865  // Metadata fetches the metadata for the table.
   866  func (t *Table) Metadata(ctx context.Context, opts ...TableMetadataOption) (md *TableMetadata, err error) {
   867  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Metadata")
   868  	defer func() { trace.EndSpan(ctx, err) }()
   869  
   870  	tgc := &tableGetCall{
   871  		call: t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx),
   872  	}
   873  
   874  	for _, o := range opts {
   875  		o(tgc)
   876  	}
   877  
   878  	setClientHeader(tgc.call.Header())
   879  	var res *bq.Table
   880  	if err := runWithRetry(ctx, func() (err error) {
   881  		sCtx := trace.StartSpan(ctx, "bigquery.tables.get")
   882  		res, err = tgc.call.Do()
   883  		trace.EndSpan(sCtx, err)
   884  		return err
   885  	}); err != nil {
   886  		return nil, err
   887  	}
   888  	return bqToTableMetadata(res, t.c)
   889  }
   890  
   891  func bqToTableMetadata(t *bq.Table, c *Client) (*TableMetadata, error) {
   892  	md := &TableMetadata{
   893  		Description:            t.Description,
   894  		Name:                   t.FriendlyName,
   895  		Location:               t.Location,
   896  		Type:                   TableType(t.Type),
   897  		FullID:                 t.Id,
   898  		Labels:                 t.Labels,
   899  		NumBytes:               t.NumBytes,
   900  		NumLongTermBytes:       t.NumLongTermBytes,
   901  		NumRows:                t.NumRows,
   902  		ExpirationTime:         unixMillisToTime(t.ExpirationTime),
   903  		CreationTime:           unixMillisToTime(t.CreationTime),
   904  		LastModifiedTime:       unixMillisToTime(int64(t.LastModifiedTime)),
   905  		ETag:                   t.Etag,
   906  		DefaultCollation:       t.DefaultCollation,
   907  		EncryptionConfig:       bqToEncryptionConfig(t.EncryptionConfiguration),
   908  		RequirePartitionFilter: t.RequirePartitionFilter,
   909  		SnapshotDefinition:     bqToSnapshotDefinition(t.SnapshotDefinition, c),
   910  		CloneDefinition:        bqToCloneDefinition(t.CloneDefinition, c),
   911  	}
   912  	if t.MaterializedView != nil {
   913  		md.MaterializedView = bqToMaterializedViewDefinition(t.MaterializedView)
   914  	}
   915  	if t.Schema != nil {
   916  		md.Schema = bqToSchema(t.Schema)
   917  	}
   918  	if t.View != nil {
   919  		md.ViewQuery = t.View.Query
   920  		md.UseLegacySQL = t.View.UseLegacySql
   921  	}
   922  	md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
   923  	md.RangePartitioning = bqToRangePartitioning(t.RangePartitioning)
   924  	md.Clustering = bqToClustering(t.Clustering)
   925  	if t.StreamingBuffer != nil {
   926  		md.StreamingBuffer = &StreamingBuffer{
   927  			EstimatedBytes:  t.StreamingBuffer.EstimatedBytes,
   928  			EstimatedRows:   t.StreamingBuffer.EstimatedRows,
   929  			OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
   930  		}
   931  	}
   932  	if t.ExternalDataConfiguration != nil {
   933  		edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
   934  		if err != nil {
   935  			return nil, err
   936  		}
   937  		md.ExternalDataConfig = edc
   938  	}
   939  	if t.TableConstraints != nil {
   940  		md.TableConstraints = &TableConstraints{
   941  			PrimaryKey:  bqToPrimaryKey(t.TableConstraints),
   942  			ForeignKeys: bqToForeignKeys(t.TableConstraints, c),
   943  		}
   944  	}
   945  	if t.ResourceTags != nil {
   946  		md.ResourceTags = make(map[string]string)
   947  		for k, v := range t.ResourceTags {
   948  			md.ResourceTags[k] = v
   949  		}
   950  	}
   951  	return md, nil
   952  }
   953  
   954  // Delete deletes the table.
   955  func (t *Table) Delete(ctx context.Context) (err error) {
   956  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Delete")
   957  	defer func() { trace.EndSpan(ctx, err) }()
   958  
   959  	call := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
   960  	setClientHeader(call.Header())
   961  
   962  	return runWithRetry(ctx, func() (err error) {
   963  		ctx = trace.StartSpan(ctx, "bigquery.tables.delete")
   964  		err = call.Do()
   965  		trace.EndSpan(ctx, err)
   966  		return err
   967  	})
   968  }
   969  
   970  // Read fetches the contents of the table.
   971  func (t *Table) Read(ctx context.Context) *RowIterator {
   972  	return t.read(ctx, fetchPage)
   973  }
   974  
   975  func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
   976  	if t.c.isStorageReadAvailable() {
   977  		it, err := newStorageRowIteratorFromTable(ctx, t, false)
   978  		if err == nil {
   979  			return it
   980  		}
   981  	}
   982  	return newRowIterator(ctx, &rowSource{t: t}, pf)
   983  }
   984  
   985  // NeverExpire is a sentinel value used to remove a table'e expiration time.
   986  var NeverExpire = time.Time{}.Add(-1)
   987  
   988  // We use this for the option pattern rather than exposing the underlying
   989  // discovery type directly.
   990  type tablePatchCall struct {
   991  	call *bq.TablesPatchCall
   992  }
   993  
   994  // TableUpdateOption allow requests to update table metadata.
   995  type TableUpdateOption func(*tablePatchCall)
   996  
   997  // WithAutoDetectSchema governs whether the schema autodetection occurs as part of the table update.
   998  // This is relevant in cases like external tables where schema is detected from the source data.
   999  func WithAutoDetectSchema(b bool) TableUpdateOption {
  1000  	return func(tpc *tablePatchCall) {
  1001  		tpc.call.AutodetectSchema(b)
  1002  	}
  1003  }
  1004  
  1005  // Update modifies specific Table metadata fields.
  1006  func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string, opts ...TableUpdateOption) (md *TableMetadata, err error) {
  1007  	ctx = trace.StartSpan(ctx, "cloud.google.com/go/bigquery.Table.Update")
  1008  	defer func() { trace.EndSpan(ctx, err) }()
  1009  
  1010  	bqt, err := tm.toBQ()
  1011  	if err != nil {
  1012  		return nil, err
  1013  	}
  1014  
  1015  	tpc := &tablePatchCall{
  1016  		call: t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx),
  1017  	}
  1018  
  1019  	for _, o := range opts {
  1020  		o(tpc)
  1021  	}
  1022  
  1023  	setClientHeader(tpc.call.Header())
  1024  	if etag != "" {
  1025  		tpc.call.Header().Set("If-Match", etag)
  1026  	}
  1027  	var res *bq.Table
  1028  	if err := runWithRetry(ctx, func() (err error) {
  1029  		ctx = trace.StartSpan(ctx, "bigquery.tables.patch")
  1030  		res, err = tpc.call.Do()
  1031  		trace.EndSpan(ctx, err)
  1032  		return err
  1033  	}); err != nil {
  1034  		return nil, err
  1035  	}
  1036  	return bqToTableMetadata(res, t.c)
  1037  }
  1038  
  1039  func (tm *TableMetadataToUpdate) toBQ() (*bq.Table, error) {
  1040  	t := &bq.Table{}
  1041  	forceSend := func(field string) {
  1042  		t.ForceSendFields = append(t.ForceSendFields, field)
  1043  	}
  1044  
  1045  	if tm.Description != nil {
  1046  		t.Description = optional.ToString(tm.Description)
  1047  		forceSend("Description")
  1048  	}
  1049  	if tm.Name != nil {
  1050  		t.FriendlyName = optional.ToString(tm.Name)
  1051  		forceSend("FriendlyName")
  1052  	}
  1053  	if tm.MaterializedView != nil {
  1054  		t.MaterializedView = tm.MaterializedView.toBQ()
  1055  		forceSend("MaterializedView")
  1056  	}
  1057  	if tm.Schema != nil {
  1058  		t.Schema = tm.Schema.toBQ()
  1059  		forceSend("Schema")
  1060  	}
  1061  	if tm.EncryptionConfig != nil {
  1062  		t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
  1063  	}
  1064  	if tm.ExternalDataConfig != nil {
  1065  		cfg := tm.ExternalDataConfig.toBQ()
  1066  		t.ExternalDataConfiguration = &cfg
  1067  	}
  1068  
  1069  	if tm.Clustering != nil {
  1070  		t.Clustering = tm.Clustering.toBQ()
  1071  	}
  1072  
  1073  	if !validExpiration(tm.ExpirationTime) {
  1074  		return nil, invalidTimeError(tm.ExpirationTime)
  1075  	}
  1076  	if tm.ExpirationTime == NeverExpire {
  1077  		t.NullFields = append(t.NullFields, "ExpirationTime")
  1078  	} else if !tm.ExpirationTime.IsZero() {
  1079  		t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
  1080  		forceSend("ExpirationTime")
  1081  	}
  1082  	if tm.TimePartitioning != nil {
  1083  		t.TimePartitioning = tm.TimePartitioning.toBQ()
  1084  		t.TimePartitioning.ForceSendFields = []string{"RequirePartitionFilter"}
  1085  		if tm.TimePartitioning.Expiration == 0 {
  1086  			t.TimePartitioning.NullFields = []string{"ExpirationMs"}
  1087  		}
  1088  	}
  1089  	if tm.RequirePartitionFilter != nil {
  1090  		t.RequirePartitionFilter = optional.ToBool(tm.RequirePartitionFilter)
  1091  		forceSend("RequirePartitionFilter")
  1092  	}
  1093  	if tm.ViewQuery != nil {
  1094  		t.View = &bq.ViewDefinition{
  1095  			Query:           optional.ToString(tm.ViewQuery),
  1096  			ForceSendFields: []string{"Query"},
  1097  		}
  1098  	}
  1099  	if tm.UseLegacySQL != nil {
  1100  		if t.View == nil {
  1101  			t.View = &bq.ViewDefinition{}
  1102  		}
  1103  		t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
  1104  		t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
  1105  	}
  1106  	if tm.DefaultCollation != nil {
  1107  		t.DefaultCollation = optional.ToString(tm.DefaultCollation)
  1108  		forceSend("DefaultCollation")
  1109  	}
  1110  	if tm.TableConstraints != nil {
  1111  		t.TableConstraints = &bq.TableConstraints{}
  1112  		if tm.TableConstraints.PrimaryKey != nil {
  1113  			t.TableConstraints.PrimaryKey = tm.TableConstraints.PrimaryKey.toBQ()
  1114  			t.TableConstraints.PrimaryKey.ForceSendFields = append(t.TableConstraints.PrimaryKey.ForceSendFields, "Columns")
  1115  			t.TableConstraints.ForceSendFields = append(t.TableConstraints.ForceSendFields, "PrimaryKey")
  1116  		}
  1117  		if tm.TableConstraints.ForeignKeys != nil {
  1118  			t.TableConstraints.ForeignKeys = make([]*bq.TableConstraintsForeignKeys, len(tm.TableConstraints.ForeignKeys))
  1119  			for i, fk := range tm.TableConstraints.ForeignKeys {
  1120  				t.TableConstraints.ForeignKeys[i] = fk.toBQ()
  1121  			}
  1122  			t.TableConstraints.ForceSendFields = append(t.TableConstraints.ForceSendFields, "ForeignKeys")
  1123  		}
  1124  	}
  1125  	if tm.ResourceTags != nil {
  1126  		t.ResourceTags = make(map[string]string)
  1127  		for k, v := range tm.ResourceTags {
  1128  			t.ResourceTags[k] = v
  1129  		}
  1130  		forceSend("ResourceTags")
  1131  	}
  1132  	labels, forces, nulls := tm.update()
  1133  	t.Labels = labels
  1134  	t.ForceSendFields = append(t.ForceSendFields, forces...)
  1135  	t.NullFields = append(t.NullFields, nulls...)
  1136  	return t, nil
  1137  }
  1138  
  1139  // validExpiration ensures a specified time is either the sentinel NeverExpire,
  1140  // the zero value, or within the defined range of UnixNano. Internal
  1141  // represetations of expiration times are based upon Time.UnixNano. Any time
  1142  // before 1678 or after 2262 cannot be represented by an int64 and is therefore
  1143  // undefined and invalid. See https://godoc.org/time#Time.UnixNano.
  1144  func validExpiration(t time.Time) bool {
  1145  	return t == NeverExpire || t.IsZero() || time.Unix(0, t.UnixNano()).Equal(t)
  1146  }
  1147  
  1148  // invalidTimeError emits a consistent error message for failures of the
  1149  // validExpiration function.
  1150  func invalidTimeError(t time.Time) error {
  1151  	return fmt.Errorf("invalid expiration time %v. "+
  1152  		"Valid expiration times are after 1678 and before 2262", t)
  1153  }
  1154  
  1155  // TableMetadataToUpdate is used when updating a table's metadata.
  1156  // Only non-nil fields will be updated.
  1157  type TableMetadataToUpdate struct {
  1158  	// The user-friendly description of this table.
  1159  	Description optional.String
  1160  
  1161  	// The user-friendly name for this table.
  1162  	Name optional.String
  1163  
  1164  	// The table's schema.
  1165  	// When updating a schema, you can add columns but not remove them.
  1166  	Schema Schema
  1167  
  1168  	// The table's clustering configuration.
  1169  	// For more information on how modifying clustering affects the table, see:
  1170  	// https://cloud.google.com/bigquery/docs/creating-clustered-tables#modifying-cluster-spec
  1171  	Clustering *Clustering
  1172  
  1173  	// The table's encryption configuration.
  1174  	EncryptionConfig *EncryptionConfig
  1175  
  1176  	// The time when this table expires. To remove a table's expiration,
  1177  	// set ExpirationTime to NeverExpire. The zero value is ignored.
  1178  	ExpirationTime time.Time
  1179  
  1180  	// ExternalDataConfig controls the definition of a table defined against
  1181  	// an external source, such as one based on files in Google Cloud Storage.
  1182  	ExternalDataConfig *ExternalDataConfig
  1183  
  1184  	// The query to use for a view.
  1185  	ViewQuery optional.String
  1186  
  1187  	// Use Legacy SQL for the view query.
  1188  	UseLegacySQL optional.Bool
  1189  
  1190  	// MaterializedView allows changes to the underlying materialized view
  1191  	// definition. When calling Update, ensure that all mutable fields of
  1192  	// MaterializedViewDefinition are populated.
  1193  	MaterializedView *MaterializedViewDefinition
  1194  
  1195  	// TimePartitioning allows modification of certain aspects of partition
  1196  	// configuration such as partition expiration and whether partition
  1197  	// filtration is required at query time.  When calling Update, ensure
  1198  	// that all mutable fields of TimePartitioning are populated.
  1199  	TimePartitioning *TimePartitioning
  1200  
  1201  	// RequirePartitionFilter governs whether the table enforces partition
  1202  	// elimination when referenced in a query.
  1203  	RequirePartitionFilter optional.Bool
  1204  
  1205  	// Defines the default collation specification of new STRING fields
  1206  	// in the table.
  1207  	DefaultCollation optional.String
  1208  
  1209  	// TableConstraints allows modification of table constraints
  1210  	// such as primary and foreign keys.
  1211  	TableConstraints *TableConstraints
  1212  
  1213  	// The tags associated with this table. Tag
  1214  	// keys are globally unique. See additional information on tags
  1215  	// (https://cloud.google.com/iam/docs/tags-access-control#definitions).
  1216  	// An object containing a list of "key": value pairs. The key is the
  1217  	// namespaced friendly name of the tag key, e.g. "12345/environment"
  1218  	// where 12345 is parent id. The value is the friendly short name of the
  1219  	// tag value, e.g. "production".
  1220  	ResourceTags map[string]string
  1221  
  1222  	labelUpdater
  1223  }
  1224  
  1225  // labelUpdater contains common code for updating labels.
  1226  type labelUpdater struct {
  1227  	setLabels    map[string]string
  1228  	deleteLabels map[string]bool
  1229  }
  1230  
  1231  // SetLabel causes a label to be added or modified on a call to Update.
  1232  func (u *labelUpdater) SetLabel(name, value string) {
  1233  	if u.setLabels == nil {
  1234  		u.setLabels = map[string]string{}
  1235  	}
  1236  	u.setLabels[name] = value
  1237  }
  1238  
  1239  // DeleteLabel causes a label to be deleted on a call to Update.
  1240  func (u *labelUpdater) DeleteLabel(name string) {
  1241  	if u.deleteLabels == nil {
  1242  		u.deleteLabels = map[string]bool{}
  1243  	}
  1244  	u.deleteLabels[name] = true
  1245  }
  1246  
  1247  func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
  1248  	if u.setLabels == nil && u.deleteLabels == nil {
  1249  		return nil, nil, nil
  1250  	}
  1251  	labels = map[string]string{}
  1252  	for k, v := range u.setLabels {
  1253  		labels[k] = v
  1254  	}
  1255  	if len(labels) == 0 && len(u.deleteLabels) > 0 {
  1256  		forces = []string{"Labels"}
  1257  	}
  1258  	for l := range u.deleteLabels {
  1259  		nulls = append(nulls, "Labels."+l)
  1260  	}
  1261  	return labels, forces, nulls
  1262  }
  1263  

View as plain text