...

Source file src/cloud.google.com/go/bigquery/external.go

Documentation: cloud.google.com/go/bigquery

     1  // Copyright 2017 Google LLC
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //      http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  package bigquery
    16  
    17  import (
    18  	"encoding/base64"
    19  	"unicode/utf8"
    20  
    21  	bq "google.golang.org/api/bigquery/v2"
    22  )
    23  
    24  // DataFormat describes the format of BigQuery table data.
    25  type DataFormat string
    26  
    27  // Constants describing the format of BigQuery table data.
    28  const (
    29  	CSV             DataFormat = "CSV"
    30  	Avro            DataFormat = "AVRO"
    31  	JSON            DataFormat = "NEWLINE_DELIMITED_JSON"
    32  	DatastoreBackup DataFormat = "DATASTORE_BACKUP"
    33  	GoogleSheets    DataFormat = "GOOGLE_SHEETS"
    34  	Bigtable        DataFormat = "BIGTABLE"
    35  	Parquet         DataFormat = "PARQUET"
    36  	ORC             DataFormat = "ORC"
    37  	// For BQ ML Models, TensorFlow Saved Model format.
    38  	TFSavedModel DataFormat = "ML_TF_SAVED_MODEL"
    39  	// For BQ ML Models, xgBoost Booster format.
    40  	XGBoostBooster DataFormat = "ML_XGBOOST_BOOSTER"
    41  	Iceberg        DataFormat = "ICEBERG"
    42  )
    43  
    44  // ExternalData is a table which is stored outside of BigQuery. It is implemented by
    45  // *ExternalDataConfig.
    46  // GCSReference also implements it, for backwards compatibility.
    47  type ExternalData interface {
    48  	toBQ() bq.ExternalDataConfiguration
    49  }
    50  
    51  // ExternalDataConfig describes data external to BigQuery that can be used
    52  // in queries and to create external tables.
    53  type ExternalDataConfig struct {
    54  	// The format of the data. Required.
    55  	SourceFormat DataFormat
    56  
    57  	// The fully-qualified URIs that point to your
    58  	// data in Google Cloud. Required.
    59  	//
    60  	// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
    61  	// and it must come after the 'bucket' name. Size limits related to load jobs
    62  	// apply to external data sources.
    63  	//
    64  	// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
    65  	// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
    66  	//
    67  	// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
    68  	// the '*' wildcard character is not allowed.
    69  	SourceURIs []string
    70  
    71  	// The schema of the data. Required for CSV and JSON; disallowed for the
    72  	// other formats.
    73  	Schema Schema
    74  
    75  	// Try to detect schema and format options automatically.
    76  	// Any option specified explicitly will be honored.
    77  	AutoDetect bool
    78  
    79  	// The compression type of the data.
    80  	Compression Compression
    81  
    82  	// IgnoreUnknownValues causes values not matching the schema to be
    83  	// tolerated. Unknown values are ignored. For CSV this ignores extra values
    84  	// at the end of a line. For JSON this ignores named values that do not
    85  	// match any column name. If this field is not set, records containing
    86  	// unknown values are treated as bad records. The MaxBadRecords field can
    87  	// be used to customize how bad records are handled.
    88  	IgnoreUnknownValues bool
    89  
    90  	// MaxBadRecords is the maximum number of bad records that will be ignored
    91  	// when reading data.
    92  	MaxBadRecords int64
    93  
    94  	// Additional options for CSV, GoogleSheets, Bigtable, and Parquet formats.
    95  	Options ExternalDataConfigOptions
    96  
    97  	// HivePartitioningOptions allows use of Hive partitioning based on the
    98  	// layout of objects in Google Cloud Storage.
    99  	HivePartitioningOptions *HivePartitioningOptions
   100  
   101  	// DecimalTargetTypes allows selection of how decimal values are converted when
   102  	// processed in bigquery, subject to the value type having sufficient precision/scale
   103  	// to support the values.  In the order of NUMERIC, BIGNUMERIC, and STRING, a type is
   104  	// selected if is present in the list and if supports the necessary precision and scale.
   105  	//
   106  	// StringTargetType supports all precision and scale values.
   107  	DecimalTargetTypes []DecimalTargetType
   108  
   109  	// ConnectionID associates an external data configuration with a connection ID.
   110  	// Connections are managed through the BigQuery Connection API:
   111  	// https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1
   112  	ConnectionID string
   113  
   114  	// When creating an external table, the user can provide a reference file with the table schema.
   115  	// This is enabled for the following formats: AVRO, PARQUET, ORC.
   116  	ReferenceFileSchemaURI string
   117  }
   118  
   119  func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
   120  	q := bq.ExternalDataConfiguration{
   121  		SourceFormat:            string(e.SourceFormat),
   122  		SourceUris:              e.SourceURIs,
   123  		Autodetect:              e.AutoDetect,
   124  		Compression:             string(e.Compression),
   125  		IgnoreUnknownValues:     e.IgnoreUnknownValues,
   126  		MaxBadRecords:           e.MaxBadRecords,
   127  		HivePartitioningOptions: e.HivePartitioningOptions.toBQ(),
   128  		ConnectionId:            e.ConnectionID,
   129  		ReferenceFileSchemaUri:  e.ReferenceFileSchemaURI,
   130  	}
   131  	if e.Schema != nil {
   132  		q.Schema = e.Schema.toBQ()
   133  	}
   134  	if e.Options != nil {
   135  		e.Options.populateExternalDataConfig(&q)
   136  	}
   137  	for _, v := range e.DecimalTargetTypes {
   138  		q.DecimalTargetTypes = append(q.DecimalTargetTypes, string(v))
   139  	}
   140  	return q
   141  }
   142  
   143  func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
   144  	e := &ExternalDataConfig{
   145  		SourceFormat:            DataFormat(q.SourceFormat),
   146  		SourceURIs:              q.SourceUris,
   147  		AutoDetect:              q.Autodetect,
   148  		Compression:             Compression(q.Compression),
   149  		IgnoreUnknownValues:     q.IgnoreUnknownValues,
   150  		MaxBadRecords:           q.MaxBadRecords,
   151  		Schema:                  bqToSchema(q.Schema),
   152  		HivePartitioningOptions: bqToHivePartitioningOptions(q.HivePartitioningOptions),
   153  		ConnectionID:            q.ConnectionId,
   154  		ReferenceFileSchemaURI:  q.ReferenceFileSchemaUri,
   155  	}
   156  	for _, v := range q.DecimalTargetTypes {
   157  		e.DecimalTargetTypes = append(e.DecimalTargetTypes, DecimalTargetType(v))
   158  	}
   159  	switch {
   160  	case q.AvroOptions != nil:
   161  		e.Options = bqToAvroOptions(q.AvroOptions)
   162  	case q.CsvOptions != nil:
   163  		e.Options = bqToCSVOptions(q.CsvOptions)
   164  	case q.GoogleSheetsOptions != nil:
   165  		e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
   166  	case q.BigtableOptions != nil:
   167  		var err error
   168  		e.Options, err = bqToBigtableOptions(q.BigtableOptions)
   169  		if err != nil {
   170  			return nil, err
   171  		}
   172  	case q.ParquetOptions != nil:
   173  		e.Options = bqToParquetOptions(q.ParquetOptions)
   174  	}
   175  	return e, nil
   176  }
   177  
   178  // ExternalDataConfigOptions are additional options for external data configurations.
   179  // This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
   180  type ExternalDataConfigOptions interface {
   181  	populateExternalDataConfig(*bq.ExternalDataConfiguration)
   182  }
   183  
   184  // AvroOptions are additional options for Avro external data data sources.
   185  type AvroOptions struct {
   186  	// UseAvroLogicalTypes indicates whether to interpret logical types as the
   187  	// corresponding BigQuery data type (for example, TIMESTAMP), instead of using
   188  	// the raw type (for example, INTEGER).
   189  	UseAvroLogicalTypes bool
   190  }
   191  
   192  func (o *AvroOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
   193  	c.AvroOptions = &bq.AvroOptions{
   194  		UseAvroLogicalTypes: o.UseAvroLogicalTypes,
   195  	}
   196  }
   197  
   198  func bqToAvroOptions(q *bq.AvroOptions) *AvroOptions {
   199  	if q == nil {
   200  		return nil
   201  	}
   202  	return &AvroOptions{
   203  		UseAvroLogicalTypes: q.UseAvroLogicalTypes,
   204  	}
   205  }
   206  
   207  // CSVOptions are additional options for CSV external data sources.
   208  type CSVOptions struct {
   209  	// AllowJaggedRows causes missing trailing optional columns to be tolerated
   210  	// when reading CSV data. Missing values are treated as nulls.
   211  	AllowJaggedRows bool
   212  
   213  	// AllowQuotedNewlines sets whether quoted data sections containing
   214  	// newlines are allowed when reading CSV data.
   215  	AllowQuotedNewlines bool
   216  
   217  	// Encoding is the character encoding of data to be read.
   218  	Encoding Encoding
   219  
   220  	// FieldDelimiter is the separator for fields in a CSV file, used when
   221  	// reading or exporting data. The default is ",".
   222  	FieldDelimiter string
   223  
   224  	// Quote is the value used to quote data sections in a CSV file. The
   225  	// default quotation character is the double quote ("), which is used if
   226  	// both Quote and ForceZeroQuote are unset.
   227  	// To specify that no character should be interpreted as a quotation
   228  	// character, set ForceZeroQuote to true.
   229  	// Only used when reading data.
   230  	Quote          string
   231  	ForceZeroQuote bool
   232  
   233  	// The number of rows at the top of a CSV file that BigQuery will skip when
   234  	// reading data.
   235  	SkipLeadingRows int64
   236  
   237  	// An optional custom string that will represent a NULL
   238  	// value in CSV import data.
   239  	NullMarker string
   240  
   241  	// Preserves the embedded ASCII control characters (the first 32 characters in the ASCII-table,
   242  	// from '\\x00' to '\\x1F') when loading from CSV. Only applicable to CSV, ignored for other formats.
   243  	PreserveASCIIControlCharacters bool
   244  }
   245  
   246  func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
   247  	c.CsvOptions = &bq.CsvOptions{
   248  		AllowJaggedRows:                o.AllowJaggedRows,
   249  		AllowQuotedNewlines:            o.AllowQuotedNewlines,
   250  		Encoding:                       string(o.Encoding),
   251  		FieldDelimiter:                 o.FieldDelimiter,
   252  		Quote:                          o.quote(),
   253  		SkipLeadingRows:                o.SkipLeadingRows,
   254  		NullMarker:                     o.NullMarker,
   255  		PreserveAsciiControlCharacters: o.PreserveASCIIControlCharacters,
   256  	}
   257  }
   258  
   259  // quote returns the CSV quote character, or nil if unset.
   260  func (o *CSVOptions) quote() *string {
   261  	if o.ForceZeroQuote {
   262  		quote := ""
   263  		return &quote
   264  	}
   265  	if o.Quote == "" {
   266  		return nil
   267  	}
   268  	return &o.Quote
   269  }
   270  
   271  func (o *CSVOptions) setQuote(ps *string) {
   272  	if ps != nil {
   273  		o.Quote = *ps
   274  		if o.Quote == "" {
   275  			o.ForceZeroQuote = true
   276  		}
   277  	}
   278  }
   279  
   280  func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
   281  	o := &CSVOptions{
   282  		AllowJaggedRows:                q.AllowJaggedRows,
   283  		AllowQuotedNewlines:            q.AllowQuotedNewlines,
   284  		Encoding:                       Encoding(q.Encoding),
   285  		FieldDelimiter:                 q.FieldDelimiter,
   286  		SkipLeadingRows:                q.SkipLeadingRows,
   287  		NullMarker:                     q.NullMarker,
   288  		PreserveASCIIControlCharacters: q.PreserveAsciiControlCharacters,
   289  	}
   290  	o.setQuote(q.Quote)
   291  	return o
   292  }
   293  
   294  // GoogleSheetsOptions are additional options for GoogleSheets external data sources.
   295  type GoogleSheetsOptions struct {
   296  	// The number of rows at the top of a sheet that BigQuery will skip when
   297  	// reading data.
   298  	SkipLeadingRows int64
   299  	// Optionally specifies a more specific range of cells to include.
   300  	// Typical format: sheet_name!top_left_cell_id:bottom_right_cell_id
   301  	//
   302  	// Example: sheet1!A1:B20
   303  	Range string
   304  }
   305  
   306  func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
   307  	c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
   308  		SkipLeadingRows: o.SkipLeadingRows,
   309  		Range:           o.Range,
   310  	}
   311  }
   312  
   313  func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
   314  	return &GoogleSheetsOptions{
   315  		SkipLeadingRows: q.SkipLeadingRows,
   316  		Range:           q.Range,
   317  	}
   318  }
   319  
   320  // BigtableOptions are additional options for Bigtable external data sources.
   321  type BigtableOptions struct {
   322  	// A list of column families to expose in the table schema along with their
   323  	// types. If omitted, all column families are present in the table schema and
   324  	// their values are read as BYTES.
   325  	ColumnFamilies []*BigtableColumnFamily
   326  
   327  	// If true, then the column families that are not specified in columnFamilies
   328  	// list are not exposed in the table schema. Otherwise, they are read with BYTES
   329  	// type values. The default is false.
   330  	IgnoreUnspecifiedColumnFamilies bool
   331  
   332  	// If true, then the rowkey column families will be read and converted to string.
   333  	// Otherwise they are read with BYTES type values and users need to manually cast
   334  	// them with CAST if necessary. The default is false.
   335  	ReadRowkeyAsString bool
   336  }
   337  
   338  func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
   339  	q := &bq.BigtableOptions{
   340  		IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
   341  		ReadRowkeyAsString:              o.ReadRowkeyAsString,
   342  	}
   343  	for _, f := range o.ColumnFamilies {
   344  		q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
   345  	}
   346  	c.BigtableOptions = q
   347  }
   348  
   349  func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
   350  	b := &BigtableOptions{
   351  		IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
   352  		ReadRowkeyAsString:              q.ReadRowkeyAsString,
   353  	}
   354  	for _, f := range q.ColumnFamilies {
   355  		f2, err := bqToBigtableColumnFamily(f)
   356  		if err != nil {
   357  			return nil, err
   358  		}
   359  		b.ColumnFamilies = append(b.ColumnFamilies, f2)
   360  	}
   361  	return b, nil
   362  }
   363  
   364  // BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
   365  type BigtableColumnFamily struct {
   366  	// Identifier of the column family.
   367  	FamilyID string
   368  
   369  	// Lists of columns that should be exposed as individual fields as opposed to a
   370  	// list of (column name, value) pairs. All columns whose qualifier matches a
   371  	// qualifier in this list can be accessed as .. Other columns can be accessed as
   372  	// a list through .Column field.
   373  	Columns []*BigtableColumn
   374  
   375  	// The encoding of the values when the type is not STRING. Acceptable encoding values are:
   376  	// - TEXT - indicates values are alphanumeric text strings.
   377  	// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
   378  	// This can be overridden for a specific column by listing that column in 'columns' and
   379  	// specifying an encoding for it.
   380  	Encoding string
   381  
   382  	// If true, only the latest version of values are exposed for all columns in this
   383  	// column family. This can be overridden for a specific column by listing that
   384  	// column in 'columns' and specifying a different setting for that column.
   385  	OnlyReadLatest bool
   386  
   387  	// The type to convert the value in cells of this
   388  	// column family. The values are expected to be encoded using HBase
   389  	// Bytes.toBytes function when using the BINARY encoding value.
   390  	// Following BigQuery types are allowed (case-sensitive):
   391  	// BYTES STRING INTEGER FLOAT BOOLEAN.
   392  	// The default type is BYTES. This can be overridden for a specific column by
   393  	// listing that column in 'columns' and specifying a type for it.
   394  	Type string
   395  }
   396  
   397  func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
   398  	q := &bq.BigtableColumnFamily{
   399  		FamilyId:       b.FamilyID,
   400  		Encoding:       b.Encoding,
   401  		OnlyReadLatest: b.OnlyReadLatest,
   402  		Type:           b.Type,
   403  	}
   404  	for _, col := range b.Columns {
   405  		q.Columns = append(q.Columns, col.toBQ())
   406  	}
   407  	return q
   408  }
   409  
   410  func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
   411  	b := &BigtableColumnFamily{
   412  		FamilyID:       q.FamilyId,
   413  		Encoding:       q.Encoding,
   414  		OnlyReadLatest: q.OnlyReadLatest,
   415  		Type:           q.Type,
   416  	}
   417  	for _, col := range q.Columns {
   418  		c, err := bqToBigtableColumn(col)
   419  		if err != nil {
   420  			return nil, err
   421  		}
   422  		b.Columns = append(b.Columns, c)
   423  	}
   424  	return b, nil
   425  }
   426  
   427  // BigtableColumn describes how BigQuery should access a Bigtable column.
   428  type BigtableColumn struct {
   429  	// Qualifier of the column. Columns in the parent column family that have this
   430  	// exact qualifier are exposed as . field. The column field name is the
   431  	// same as the column qualifier.
   432  	Qualifier string
   433  
   434  	// If the qualifier is not a valid BigQuery field identifier i.e. does not match
   435  	// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
   436  	// name and is used as field name in queries.
   437  	FieldName string
   438  
   439  	// If true, only the latest version of values are exposed for this column.
   440  	// See BigtableColumnFamily.OnlyReadLatest.
   441  	OnlyReadLatest bool
   442  
   443  	// The encoding of the values when the type is not STRING.
   444  	// See BigtableColumnFamily.Encoding
   445  	Encoding string
   446  
   447  	// The type to convert the value in cells of this column.
   448  	// See BigtableColumnFamily.Type
   449  	Type string
   450  }
   451  
   452  func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
   453  	q := &bq.BigtableColumn{
   454  		FieldName:      b.FieldName,
   455  		OnlyReadLatest: b.OnlyReadLatest,
   456  		Encoding:       b.Encoding,
   457  		Type:           b.Type,
   458  	}
   459  	if utf8.ValidString(b.Qualifier) {
   460  		q.QualifierString = b.Qualifier
   461  	} else {
   462  		q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
   463  	}
   464  	return q
   465  }
   466  
   467  func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
   468  	b := &BigtableColumn{
   469  		FieldName:      q.FieldName,
   470  		OnlyReadLatest: q.OnlyReadLatest,
   471  		Encoding:       q.Encoding,
   472  		Type:           q.Type,
   473  	}
   474  	if q.QualifierString != "" {
   475  		b.Qualifier = q.QualifierString
   476  	} else {
   477  		bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
   478  		if err != nil {
   479  			return nil, err
   480  		}
   481  		b.Qualifier = string(bytes)
   482  	}
   483  	return b, nil
   484  }
   485  
   486  // ParquetOptions are additional options for Parquet external data sources.
   487  type ParquetOptions struct {
   488  	// EnumAsString indicates whether to infer Parquet ENUM logical type as
   489  	// STRING instead of BYTES by default.
   490  	EnumAsString bool
   491  
   492  	// EnableListInference indicates whether to use schema inference
   493  	// specifically for Parquet LIST logical type.
   494  	EnableListInference bool
   495  }
   496  
   497  func (o *ParquetOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
   498  	if o != nil {
   499  		c.ParquetOptions = &bq.ParquetOptions{
   500  			EnumAsString:        o.EnumAsString,
   501  			EnableListInference: o.EnableListInference,
   502  		}
   503  	}
   504  }
   505  
   506  func bqToParquetOptions(q *bq.ParquetOptions) *ParquetOptions {
   507  	if q == nil {
   508  		return nil
   509  	}
   510  	return &ParquetOptions{
   511  		EnumAsString:        q.EnumAsString,
   512  		EnableListInference: q.EnableListInference,
   513  	}
   514  }
   515  
   516  // HivePartitioningMode is used in conjunction with HivePartitioningOptions.
   517  type HivePartitioningMode string
   518  
   519  const (
   520  	// AutoHivePartitioningMode automatically infers partitioning key and types.
   521  	AutoHivePartitioningMode HivePartitioningMode = "AUTO"
   522  	// StringHivePartitioningMode automatically infers partitioning keys and treats values as string.
   523  	StringHivePartitioningMode HivePartitioningMode = "STRINGS"
   524  	// CustomHivePartitioningMode allows custom definition of the external partitioning.
   525  	CustomHivePartitioningMode HivePartitioningMode = "CUSTOM"
   526  )
   527  
   528  // HivePartitioningOptions defines the behavior of Hive partitioning
   529  // when working with external data.
   530  type HivePartitioningOptions struct {
   531  
   532  	// Mode defines which hive partitioning mode to use when reading data.
   533  	Mode HivePartitioningMode
   534  
   535  	// When hive partition detection is requested, a common prefix for
   536  	// all source uris should be supplied.  The prefix must end immediately
   537  	// before the partition key encoding begins.
   538  	//
   539  	// For example, consider files following this data layout.
   540  	//   gs://bucket/path_to_table/dt=2019-01-01/country=BR/id=7/file.avro
   541  	//   gs://bucket/path_to_table/dt=2018-12-31/country=CA/id=3/file.avro
   542  	//
   543  	// When hive partitioning is requested with either AUTO or STRINGS
   544  	// detection, the common prefix can be either of
   545  	// gs://bucket/path_to_table or gs://bucket/path_to_table/ (trailing
   546  	// slash does not matter).
   547  	SourceURIPrefix string
   548  
   549  	// If set to true, queries against this external table require
   550  	// a partition filter to be present that can perform partition
   551  	// elimination.  Hive-partitioned load jobs with this field
   552  	// set to true will fail.
   553  	RequirePartitionFilter bool
   554  }
   555  
   556  func (o *HivePartitioningOptions) toBQ() *bq.HivePartitioningOptions {
   557  	if o == nil {
   558  		return nil
   559  	}
   560  	return &bq.HivePartitioningOptions{
   561  		Mode:                   string(o.Mode),
   562  		SourceUriPrefix:        o.SourceURIPrefix,
   563  		RequirePartitionFilter: o.RequirePartitionFilter,
   564  	}
   565  }
   566  
   567  func bqToHivePartitioningOptions(q *bq.HivePartitioningOptions) *HivePartitioningOptions {
   568  	if q == nil {
   569  		return nil
   570  	}
   571  	return &HivePartitioningOptions{
   572  		Mode:                   HivePartitioningMode(q.Mode),
   573  		SourceURIPrefix:        q.SourceUriPrefix,
   574  		RequirePartitionFilter: q.RequirePartitionFilter,
   575  	}
   576  }
   577  

View as plain text