...

Source file src/google.golang.org/genproto/googleapis/cloud/automl/v1beta1/alias.go

Documentation: google.golang.org/genproto/googleapis/cloud/automl/v1beta1

     1  // Copyright 2022 Google LLC
     2  //
     3  // Licensed under the Apache License, Version 2.0 (the "License");
     4  // you may not use this file except in compliance with the License.
     5  // You may obtain a copy of the License at
     6  //
     7  //     http://www.apache.org/licenses/LICENSE-2.0
     8  //
     9  // Unless required by applicable law or agreed to in writing, software
    10  // distributed under the License is distributed on an "AS IS" BASIS,
    11  // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  // See the License for the specific language governing permissions and
    13  // limitations under the License.
    14  
    15  // Code generated by aliasgen. DO NOT EDIT.
    16  
    17  // Package automl aliases all exported identifiers in package
    18  // "cloud.google.com/go/automl/apiv1beta1/automlpb".
    19  //
    20  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb.
    21  // Please read https://github.com/googleapis/google-cloud-go/blob/main/migration.md
    22  // for more details.
    23  package automl
    24  
    25  import (
    26  	src "cloud.google.com/go/automl/apiv1beta1/automlpb"
    27  	grpc "google.golang.org/grpc"
    28  )
    29  
    30  // Deprecated: Please use consts in: cloud.google.com/go/automl/apiv1beta1/automlpb
    31  const (
    32  	ClassificationType_CLASSIFICATION_TYPE_UNSPECIFIED     = src.ClassificationType_CLASSIFICATION_TYPE_UNSPECIFIED
    33  	ClassificationType_MULTICLASS                          = src.ClassificationType_MULTICLASS
    34  	ClassificationType_MULTILABEL                          = src.ClassificationType_MULTILABEL
    35  	DocumentDimensions_CENTIMETER                          = src.DocumentDimensions_CENTIMETER
    36  	DocumentDimensions_DOCUMENT_DIMENSION_UNIT_UNSPECIFIED = src.DocumentDimensions_DOCUMENT_DIMENSION_UNIT_UNSPECIFIED
    37  	DocumentDimensions_INCH                                = src.DocumentDimensions_INCH
    38  	DocumentDimensions_POINT                               = src.DocumentDimensions_POINT
    39  	Document_Layout_FORM_FIELD                             = src.Document_Layout_FORM_FIELD
    40  	Document_Layout_FORM_FIELD_CONTENTS                    = src.Document_Layout_FORM_FIELD_CONTENTS
    41  	Document_Layout_FORM_FIELD_NAME                        = src.Document_Layout_FORM_FIELD_NAME
    42  	Document_Layout_PARAGRAPH                              = src.Document_Layout_PARAGRAPH
    43  	Document_Layout_TABLE                                  = src.Document_Layout_TABLE
    44  	Document_Layout_TABLE_CELL                             = src.Document_Layout_TABLE_CELL
    45  	Document_Layout_TABLE_HEADER                           = src.Document_Layout_TABLE_HEADER
    46  	Document_Layout_TABLE_ROW                              = src.Document_Layout_TABLE_ROW
    47  	Document_Layout_TEXT_SEGMENT_TYPE_UNSPECIFIED          = src.Document_Layout_TEXT_SEGMENT_TYPE_UNSPECIFIED
    48  	Document_Layout_TOKEN                                  = src.Document_Layout_TOKEN
    49  	Model_DEPLOYED                                         = src.Model_DEPLOYED
    50  	Model_DEPLOYMENT_STATE_UNSPECIFIED                     = src.Model_DEPLOYMENT_STATE_UNSPECIFIED
    51  	Model_UNDEPLOYED                                       = src.Model_UNDEPLOYED
    52  	TypeCode_ARRAY                                         = src.TypeCode_ARRAY
    53  	TypeCode_CATEGORY                                      = src.TypeCode_CATEGORY
    54  	TypeCode_FLOAT64                                       = src.TypeCode_FLOAT64
    55  	TypeCode_STRING                                        = src.TypeCode_STRING
    56  	TypeCode_STRUCT                                        = src.TypeCode_STRUCT
    57  	TypeCode_TIMESTAMP                                     = src.TypeCode_TIMESTAMP
    58  	TypeCode_TYPE_CODE_UNSPECIFIED                         = src.TypeCode_TYPE_CODE_UNSPECIFIED
    59  )
    60  
    61  // Deprecated: Please use vars in: cloud.google.com/go/automl/apiv1beta1/automlpb
    62  var (
    63  	ClassificationType_name                                   = src.ClassificationType_name
    64  	ClassificationType_value                                  = src.ClassificationType_value
    65  	DocumentDimensions_DocumentDimensionUnit_name             = src.DocumentDimensions_DocumentDimensionUnit_name
    66  	DocumentDimensions_DocumentDimensionUnit_value            = src.DocumentDimensions_DocumentDimensionUnit_value
    67  	Document_Layout_TextSegmentType_name                      = src.Document_Layout_TextSegmentType_name
    68  	Document_Layout_TextSegmentType_value                     = src.Document_Layout_TextSegmentType_value
    69  	File_google_cloud_automl_v1beta1_annotation_payload_proto = src.File_google_cloud_automl_v1beta1_annotation_payload_proto
    70  	File_google_cloud_automl_v1beta1_annotation_spec_proto    = src.File_google_cloud_automl_v1beta1_annotation_spec_proto
    71  	File_google_cloud_automl_v1beta1_classification_proto     = src.File_google_cloud_automl_v1beta1_classification_proto
    72  	File_google_cloud_automl_v1beta1_column_spec_proto        = src.File_google_cloud_automl_v1beta1_column_spec_proto
    73  	File_google_cloud_automl_v1beta1_data_items_proto         = src.File_google_cloud_automl_v1beta1_data_items_proto
    74  	File_google_cloud_automl_v1beta1_data_stats_proto         = src.File_google_cloud_automl_v1beta1_data_stats_proto
    75  	File_google_cloud_automl_v1beta1_data_types_proto         = src.File_google_cloud_automl_v1beta1_data_types_proto
    76  	File_google_cloud_automl_v1beta1_dataset_proto            = src.File_google_cloud_automl_v1beta1_dataset_proto
    77  	File_google_cloud_automl_v1beta1_detection_proto          = src.File_google_cloud_automl_v1beta1_detection_proto
    78  	File_google_cloud_automl_v1beta1_geometry_proto           = src.File_google_cloud_automl_v1beta1_geometry_proto
    79  	File_google_cloud_automl_v1beta1_image_proto              = src.File_google_cloud_automl_v1beta1_image_proto
    80  	File_google_cloud_automl_v1beta1_io_proto                 = src.File_google_cloud_automl_v1beta1_io_proto
    81  	File_google_cloud_automl_v1beta1_model_evaluation_proto   = src.File_google_cloud_automl_v1beta1_model_evaluation_proto
    82  	File_google_cloud_automl_v1beta1_model_proto              = src.File_google_cloud_automl_v1beta1_model_proto
    83  	File_google_cloud_automl_v1beta1_operations_proto         = src.File_google_cloud_automl_v1beta1_operations_proto
    84  	File_google_cloud_automl_v1beta1_prediction_service_proto = src.File_google_cloud_automl_v1beta1_prediction_service_proto
    85  	File_google_cloud_automl_v1beta1_ranges_proto             = src.File_google_cloud_automl_v1beta1_ranges_proto
    86  	File_google_cloud_automl_v1beta1_regression_proto         = src.File_google_cloud_automl_v1beta1_regression_proto
    87  	File_google_cloud_automl_v1beta1_service_proto            = src.File_google_cloud_automl_v1beta1_service_proto
    88  	File_google_cloud_automl_v1beta1_table_spec_proto         = src.File_google_cloud_automl_v1beta1_table_spec_proto
    89  	File_google_cloud_automl_v1beta1_tables_proto             = src.File_google_cloud_automl_v1beta1_tables_proto
    90  	File_google_cloud_automl_v1beta1_temporal_proto           = src.File_google_cloud_automl_v1beta1_temporal_proto
    91  	File_google_cloud_automl_v1beta1_text_extraction_proto    = src.File_google_cloud_automl_v1beta1_text_extraction_proto
    92  	File_google_cloud_automl_v1beta1_text_proto               = src.File_google_cloud_automl_v1beta1_text_proto
    93  	File_google_cloud_automl_v1beta1_text_segment_proto       = src.File_google_cloud_automl_v1beta1_text_segment_proto
    94  	File_google_cloud_automl_v1beta1_text_sentiment_proto     = src.File_google_cloud_automl_v1beta1_text_sentiment_proto
    95  	File_google_cloud_automl_v1beta1_translation_proto        = src.File_google_cloud_automl_v1beta1_translation_proto
    96  	File_google_cloud_automl_v1beta1_video_proto              = src.File_google_cloud_automl_v1beta1_video_proto
    97  	Model_DeploymentState_name                                = src.Model_DeploymentState_name
    98  	Model_DeploymentState_value                               = src.Model_DeploymentState_value
    99  	TypeCode_name                                             = src.TypeCode_name
   100  	TypeCode_value                                            = src.TypeCode_value
   101  )
   102  
   103  // Contains annotation information that is relevant to AutoML.
   104  //
   105  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   106  type AnnotationPayload = src.AnnotationPayload
   107  type AnnotationPayload_Classification = src.AnnotationPayload_Classification
   108  type AnnotationPayload_ImageObjectDetection = src.AnnotationPayload_ImageObjectDetection
   109  type AnnotationPayload_Tables = src.AnnotationPayload_Tables
   110  type AnnotationPayload_TextExtraction = src.AnnotationPayload_TextExtraction
   111  type AnnotationPayload_TextSentiment = src.AnnotationPayload_TextSentiment
   112  type AnnotationPayload_Translation = src.AnnotationPayload_Translation
   113  type AnnotationPayload_VideoClassification = src.AnnotationPayload_VideoClassification
   114  type AnnotationPayload_VideoObjectTracking = src.AnnotationPayload_VideoObjectTracking
   115  
   116  // A definition of an annotation spec.
   117  //
   118  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   119  type AnnotationSpec = src.AnnotationSpec
   120  
   121  // The data statistics of a series of ARRAY values.
   122  //
   123  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   124  type ArrayStats = src.ArrayStats
   125  
   126  // AutoMlClient is the client API for AutoMl service. For semantics around ctx
   127  // use and closing/ending streaming RPCs, please refer to
   128  // https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
   129  //
   130  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   131  type AutoMlClient = src.AutoMlClient
   132  
   133  // AutoMlServer is the server API for AutoMl service.
   134  //
   135  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   136  type AutoMlServer = src.AutoMlServer
   137  
   138  // Input configuration for BatchPredict Action. The format of input depends on
   139  // the ML problem of the model used for prediction. As input source the
   140  // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is
   141  // expected, unless specified otherwise. The formats are represented in EBNF
   142  // with commas being literal and with non-terminal symbols defined near the end
   143  // of this comment. The formats are: - For Image Classification: CSV file(s)
   144  // with each line having just a single column: GCS_FILE_PATH which leads to
   145  // image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. This
   146  // path is treated as the ID in the Batch predict output. Three sample rows:
   147  // gs://folder/image1.jpeg gs://folder/image2.gif gs://folder/image3.png - For
   148  // Image Object Detection: CSV file(s) with each line having just a single
   149  // column: GCS_FILE_PATH which leads to image of up to 30MB in size. Supported
   150  // extensions: .JPEG, .GIF, .PNG. This path is treated as the ID in the Batch
   151  // predict output. Three sample rows: gs://folder/image1.jpeg
   152  // gs://folder/image2.gif gs://folder/image3.png - For Video Classification:
   153  // CSV file(s) with each line in format:
   154  // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END GCS_FILE_PATH leads to
   155  // video of up to 50GB in size and up to 3h duration. Supported extensions:
   156  // .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and TIME_SEGMENT_END must be
   157  // within the length of the video, and end has to be after the start. Three
   158  // sample rows: gs://folder/video1.mp4,10,40 gs://folder/video1.mp4,20,60
   159  // gs://folder/vid2.mov,0,inf - For Video Object Tracking: CSV file(s) with
   160  // each line in format: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END
   161  // GCS_FILE_PATH leads to video of up to 50GB in size and up to 3h duration.
   162  // Supported extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and
   163  // TIME_SEGMENT_END must be within the length of the video, and end has to be
   164  // after the start. Three sample rows: gs://folder/video1.mp4,10,240
   165  // gs://folder/video1.mp4,300,360 gs://folder/vid2.mov,0,inf - For Text
   166  // Classification: CSV file(s) with each line having just a single column:
   167  // GCS_FILE_PATH | TEXT_SNIPPET Any given text file can have size upto 128kB.
   168  // Any given text snippet content must have 60,000 characters or less. Three
   169  // sample rows: gs://folder/text1.txt "Some text content to predict"
   170  // gs://folder/text3.pdf Supported file extensions: .txt, .pdf - For Text
   171  // Sentiment: CSV file(s) with each line having just a single column:
   172  // GCS_FILE_PATH | TEXT_SNIPPET Any given text file can have size upto 128kB.
   173  // Any given text snippet content must have 500 characters or less. Three
   174  // sample rows: gs://folder/text1.txt "Some text content to predict"
   175  // gs://folder/text3.pdf Supported file extensions: .txt, .pdf - For Text
   176  // Extraction .JSONL (i.e. JSON Lines) file(s) which either provide text
   177  // in-line or as documents (for a single BatchPredict call only one of the
   178  // these formats may be used). The in-line .JSONL file(s) contain per line a
   179  // proto that wraps a temporary user-assigned TextSnippet ID (string up to 2000
   180  // characters long) called "id", a TextSnippet proto (in json representation)
   181  // and zero or more TextFeature protos. Any given text snippet content must
   182  // have 30,000 characters or less, and also be UTF-8 NFC encoded (ASCII already
   183  // is). The IDs provided should be unique. The document .JSONL file(s) contain,
   184  // per line, a proto that wraps a Document proto with input_config set. Only
   185  // PDF documents are supported now, and each document must be up to 2MB large.
   186  // Any given .JSONL file must be 100MB or smaller, and no more than 20 files
   187  // may be given. Sample in-line JSON Lines file (presented here with artificial
   188  // line breaks, but the only actual line break is denoted by \n): { "id":
   189  // "my_first_id", "text_snippet": { "content": "dog car cat"}, "text_features":
   190  // [ { "text_segment": {"start_offset": 4, "end_offset": 6}, "structural_type":
   191  // PARAGRAPH, "bounding_poly": { "normalized_vertices": [ {"x": 0.1, "y": 0.1},
   192  // {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y": 0.1}, ] }, } ],
   193  // }\n { "id": "2", "text_snippet": { "content": "An elaborate content",
   194  // "mime_type": "text/plain" } } Sample document JSON Lines file (presented
   195  // here with artificial line breaks, but the only actual line break is denoted
   196  // by \n).: { "document": { "input_config": { "gcs_source": { "input_uris": [
   197  // "gs://folder/document1.pdf" ] } } } }\n { "document": { "input_config": {
   198  // "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] } } } } - For
   199  // Tables: Either
   200  // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
   201  // [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source].
   202  // GCS case: CSV file(s), each by itself 10GB or smaller and total size must be
   203  // 100GB or smaller, where first file must have a header containing column
   204  // names. If the first row of a subsequent file is the same as the header, then
   205  // it is also treated as a header. All other rows contain values for the
   206  // corresponding columns. The column names must contain the model's
   207  // [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
   208  // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] (order
   209  // doesn't matter). The columns corresponding to the model's input feature
   210  // column specs must contain values compatible with the column spec's data
   211  // types. Prediction on all the rows, i.e. the CSV lines, will be attempted.
   212  // For FORECASTING
   213  // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
   214  // all columns having
   215  // [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
   216  // type will be ignored. First three sample rows of a CSV file: "First
   217  // Name","Last Name","Dob","Addresses"
   218  // "John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
   219  // "Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
   220  // BigQuery case: An URI of a BigQuery table. The user data size of the
   221  // BigQuery table must be 100GB or smaller. The column names must contain the
   222  // model's
   223  // [input_feature_column_specs'][google.cloud.automl.v1beta1.TablesModelMetadata.input_feature_column_specs]
   224  // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] (order
   225  // doesn't matter). The columns corresponding to the model's input feature
   226  // column specs must contain values compatible with the column spec's data
   227  // types. Prediction on all the rows of the table will be attempted. For
   228  // FORECASTING
   229  // [prediction_type][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
   230  // all columns having
   231  // [TIME_SERIES_AVAILABLE_PAST_ONLY][google.cloud.automl.v1beta1.ColumnSpec.ForecastingMetadata.ColumnType]
   232  // type will be ignored. Definitions: GCS_FILE_PATH = A path to file on GCS,
   233  // e.g. "gs://folder/video.avi". TEXT_SNIPPET = A content of a text snippet,
   234  // UTF-8 encoded, enclosed within double quotes ("") TIME_SEGMENT_START =
   235  // TIME_OFFSET Expresses a beginning, inclusive, of a time segment within an
   236  // example that has a time dimension (e.g. video). TIME_SEGMENT_END =
   237  // TIME_OFFSET Expresses an end, exclusive, of a time segment within an example
   238  // that has a time dimension (e.g. video). TIME_OFFSET = A number of seconds as
   239  // measured from the start of an example (e.g. video). Fractions are allowed,
   240  // up to a microsecond precision. "inf" is allowed and it means the end of the
   241  // example. Errors: If any of the provided CSV files can't be parsed or if more
   242  // than certain percent of CSV rows cannot be processed then the operation
   243  // fails and prediction does not happen. Regardless of overall success or
   244  // failure the per-row failures, up to a certain count cap, will be listed in
   245  // Operation.metadata.partial_failures.
   246  //
   247  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   248  type BatchPredictInputConfig = src.BatchPredictInputConfig
   249  type BatchPredictInputConfig_BigquerySource = src.BatchPredictInputConfig_BigquerySource
   250  type BatchPredictInputConfig_GcsSource = src.BatchPredictInputConfig_GcsSource
   251  
   252  // Details of BatchPredict operation.
   253  //
   254  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   255  type BatchPredictOperationMetadata = src.BatchPredictOperationMetadata
   256  
   257  // Further describes this batch predict's output. Supplements
   258  // [BatchPredictOutputConfig][google.cloud.automl.v1beta1.BatchPredictOutputConfig].
   259  //
   260  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   261  type BatchPredictOperationMetadata_BatchPredictOutputInfo = src.BatchPredictOperationMetadata_BatchPredictOutputInfo
   262  type BatchPredictOperationMetadata_BatchPredictOutputInfo_BigqueryOutputDataset = src.BatchPredictOperationMetadata_BatchPredictOutputInfo_BigqueryOutputDataset
   263  type BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory = src.BatchPredictOperationMetadata_BatchPredictOutputInfo_GcsOutputDirectory
   264  
   265  // Output configuration for BatchPredict Action. # As destination the
   266  // [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination]
   267  // must be set unless specified otherwise for a domain. If gcs_destination is
   268  // set then in the given directory a new directory is created. Its name will be
   269  // "prediction-<model-display-name>-<timestamp-of-prediction-call>", where
   270  // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. The contents of it
   271  // depends on the ML problem the predictions are made for. - For Image
   272  // Classification: In the created directory files
   273  // `image_classification_1.jsonl`,
   274  // `image_classification_2.jsonl`,...,`image_classification_N.jsonl` will be
   275  // created, where N may be 1, and depends on the total number of the
   276  // successfully predicted images and annotations. A single image will be listed
   277  // only once with all its annotations, and its annotations will never be split
   278  // across files. Each .JSONL file will contain, per line, a JSON representation
   279  // of a proto that wraps image's "ID" : "<id_value>" followed by a list of zero
   280  // or more AnnotationPayload protos (called annotations), which have
   281  // classification detail populated. If prediction for any image failed
   282  // (partially or completely), then an additional `errors_1.jsonl`,
   283  // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on
   284  // total number of failed predictions). These files will have a JSON
   285  // representation of a proto that wraps the same "ID" : "<id_value>" but here
   286  // followed by exactly one [`google.rpc.Status`](https:
   287  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   288  // containing only `code` and `message`fields. * For Image Object Detection: In
   289  // the created directory files `image_object_detection_1.jsonl`,
   290  // `image_object_detection_2.jsonl`,...,`image_object_detection_N.jsonl` will
   291  // be created, where N may be 1, and depends on the total number of the
   292  // successfully predicted images and annotations. Each .JSONL file will
   293  // contain, per line, a JSON representation of a proto that wraps image's "ID"
   294  // : "<id_value>" followed by a list of zero or more AnnotationPayload protos
   295  // (called annotations), which have image_object_detection detail populated. A
   296  // single image will be listed only once with all its annotations, and its
   297  // annotations will never be split across files. If prediction for any image
   298  // failed (partially or completely), then additional `errors_1.jsonl`,
   299  // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on
   300  // total number of failed predictions). These files will have a JSON
   301  // representation of a proto that wraps the same "ID" : "<id_value>" but here
   302  // followed by exactly one [`google.rpc.Status`](https:
   303  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   304  // containing only `code` and `message`fields. * For Video Classification: In
   305  // the created directory a video_classification.csv file, and a .JSON file per
   306  // each video classification requested in the input (i.e. each line in given
   307  // CSV(s)), will be created. The format of video_classification.csv is:
   308  // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
   309  // where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
   310  // the prediction input lines (i.e. video_classification.csv has precisely the
   311  // same number of lines as the prediction input had.) JSON_FILE_NAME = Name of
   312  // .JSON file in the output directory, which contains prediction responses for
   313  // the video time segment. STATUS = "OK" if prediction completed successfully,
   314  // or an error code with message otherwise. If STATUS is not "OK" then the
   315  // .JSON file for that line may not exist or be empty. Each .JSON file,
   316  // assuming STATUS is "OK", will contain a list of AnnotationPayload protos in
   317  // JSON format, which are the predictions for the video time segment the file
   318  // is assigned to in the video_classification.csv. All AnnotationPayload protos
   319  // will have video_classification field set, and will be sorted by
   320  // video_classification.type field (note that the returned types are governed
   321  // by `classifaction_types` parameter in
   322  // [PredictService.BatchPredictRequest.params][]). * For Video Object Tracking:
   323  // In the created directory a video_object_tracking.csv file will be created,
   324  // and multiple files video_object_trackinng_1.json,
   325  // video_object_trackinng_2.json,..., video_object_trackinng_N.json, where N is
   326  // the number of requests in the input (i.e. the number of lines in given
   327  // CSV(s)). The format of video_object_tracking.csv is:
   328  // GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END,JSON_FILE_NAME,STATUS
   329  // where: GCS_FILE_PATH,TIME_SEGMENT_START,TIME_SEGMENT_END = matches 1 to 1
   330  // the prediction input lines (i.e. video_object_tracking.csv has precisely the
   331  // same number of lines as the prediction input had.) JSON_FILE_NAME = Name of
   332  // .JSON file in the output directory, which contains prediction responses for
   333  // the video time segment. STATUS = "OK" if prediction completed successfully,
   334  // or an error code with message otherwise. If STATUS is not "OK" then the
   335  // .JSON file for that line may not exist or be empty. Each .JSON file,
   336  // assuming STATUS is "OK", will contain a list of AnnotationPayload protos in
   337  // JSON format, which are the predictions for each frame of the video time
   338  // segment the file is assigned to in video_object_tracking.csv. All
   339  // AnnotationPayload protos will have video_object_tracking field set. * For
   340  // Text Classification: In the created directory files
   341  // `text_classification_1.jsonl`,
   342  // `text_classification_2.jsonl`,...,`text_classification_N.jsonl` will be
   343  // created, where N may be 1, and depends on the total number of inputs and
   344  // annotations found. Each .JSONL file will contain, per line, a JSON
   345  // representation of a proto that wraps input text snippet or input text file
   346  // and a list of zero or more AnnotationPayload protos (called annotations),
   347  // which have classification detail populated. A single text snippet or file
   348  // will be listed only once with all its annotations, and its annotations will
   349  // never be split across files. If prediction for any text snippet or file
   350  // failed (partially or completely), then additional `errors_1.jsonl`,
   351  // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on
   352  // total number of failed predictions). These files will have a JSON
   353  // representation of a proto that wraps input text snippet or input text file
   354  // followed by exactly one [`google.rpc.Status`](https:
   355  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   356  // containing only `code` and `message`. * For Text Sentiment: In the created
   357  // directory files `text_sentiment_1.jsonl`,
   358  // `text_sentiment_2.jsonl`,...,`text_sentiment_N.jsonl` will be created, where
   359  // N may be 1, and depends on the total number of inputs and annotations found.
   360  // Each .JSONL file will contain, per line, a JSON representation of a proto
   361  // that wraps input text snippet or input text file and a list of zero or more
   362  // AnnotationPayload protos (called annotations), which have text_sentiment
   363  // detail populated. A single text snippet or file will be listed only once
   364  // with all its annotations, and its annotations will never be split across
   365  // files. If prediction for any text snippet or file failed (partially or
   366  // completely), then additional `errors_1.jsonl`, `errors_2.jsonl`,...,
   367  // `errors_N.jsonl` files will be created (N depends on total number of failed
   368  // predictions). These files will have a JSON representation of a proto that
   369  // wraps input text snippet or input text file followed by exactly one
   370  // [`google.rpc.Status`](https:
   371  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   372  // containing only `code` and `message`. * For Text Extraction: In the created
   373  // directory files `text_extraction_1.jsonl`,
   374  // `text_extraction_2.jsonl`,...,`text_extraction_N.jsonl` will be created,
   375  // where N may be 1, and depends on the total number of inputs and annotations
   376  // found. The contents of these .JSONL file(s) depend on whether the input used
   377  // inline text, or documents. If input was inline, then each .JSONL file will
   378  // contain, per line, a JSON representation of a proto that wraps given in
   379  // request text snippet's "id" (if specified), followed by input text snippet,
   380  // and a list of zero or more AnnotationPayload protos (called annotations),
   381  // which have text_extraction detail populated. A single text snippet will be
   382  // listed only once with all its annotations, and its annotations will never be
   383  // split across files. If input used documents, then each .JSONL file will
   384  // contain, per line, a JSON representation of a proto that wraps given in
   385  // request document proto, followed by its OCR-ed representation in the form of
   386  // a text snippet, finally followed by a list of zero or more AnnotationPayload
   387  // protos (called annotations), which have text_extraction detail populated and
   388  // refer, via their indices, to the OCR-ed text snippet. A single document (and
   389  // its text snippet) will be listed only once with all its annotations, and its
   390  // annotations will never be split across files. If prediction for any text
   391  // snippet failed (partially or completely), then additional `errors_1.jsonl`,
   392  // `errors_2.jsonl`,..., `errors_N.jsonl` files will be created (N depends on
   393  // total number of failed predictions). These files will have a JSON
   394  // representation of a proto that wraps either the "id" : "<id_value>" (in case
   395  // of inline) or the document proto (in case of document) but here followed by
   396  // exactly one [`google.rpc.Status`](https:
   397  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   398  // containing only `code` and `message`. * For Tables: Output depends on
   399  // whether
   400  // [gcs_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.gcs_destination]
   401  // or
   402  // [bigquery_destination][google.cloud.automl.v1beta1.BatchPredictOutputConfig.bigquery_destination]
   403  // is set (either is allowed). GCS case: In the created directory files
   404  // `tables_1.csv`, `tables_2.csv`,..., `tables_N.csv` will be created, where N
   405  // may be 1, and depends on the total number of the successfully predicted
   406  // rows. For all CLASSIFICATION
   407  // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
   408  // Each .csv file will contain a header, listing all columns'
   409  // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name] given
   410  // on input followed by M target column names in the format of
   411  // "<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
   412  // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>_<target
   413  // value>_score" where M is the number of distinct target values, i.e. number
   414  // of distinct values in the target column of the table used to train the
   415  // model. Subsequent lines will contain the respective values of successfully
   416  // predicted rows, with the last, i.e. the target, columns having the
   417  // corresponding prediction
   418  // [scores][google.cloud.automl.v1beta1.TablesAnnotation.score]. For REGRESSION
   419  // and FORECASTING
   420  // [prediction_type-s][google.cloud.automl.v1beta1.TablesModelMetadata.prediction_type]:
   421  // Each .csv file will contain a header, listing all columns'
   422  // [display_name-s][google.cloud.automl.v1beta1.display_name] given on input
   423  // followed by the predicted target column with name in the format of
   424  // "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
   425  // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>"
   426  // Subsequent lines will contain the respective values of successfully
   427  // predicted rows, with the last, i.e. the target, column having the predicted
   428  // target value. If prediction for any rows failed, then an additional
   429  // `errors_1.csv`, `errors_2.csv`,..., `errors_N.csv` will be created (N
   430  // depends on total number of failed rows). These files will have analogous
   431  // format as `tables_*.csv`, but always with a single target column having
   432  // [`google.rpc.Status`](https:
   433  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   434  // represented as a JSON string, and containing only `code` and `message`.
   435  // BigQuery case:
   436  // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
   437  // pointing to a BigQuery project must be set. In the given project a new
   438  // dataset will be created with name
   439  // `prediction_<model-display-name>_<timestamp-of-prediction-call>` where
   440  // <model-display-name> will be made BigQuery-dataset-name compatible (e.g.
   441  // most special characters will become underscores), and timestamp will be in
   442  // YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two
   443  // tables will be created, `predictions`, and `errors`. The `predictions`
   444  // table's column names will be the input columns'
   445  // [display_name-s][google.cloud.automl.v1beta1.ColumnSpec.display_name]
   446  // followed by the target column with name in the format of
   447  // "predicted_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
   448  // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>" The
   449  // input feature columns will contain the respective values of successfully
   450  // predicted rows, with the target column having an ARRAY of
   451  // [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload],
   452  // represented as STRUCT-s, containing
   453  // [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation]. The
   454  // `errors` table contains rows for which the prediction has failed, it has
   455  // analogous input columns while the target column name is in the format of
   456  // "errors_<[target_column_specs][google.cloud.automl.v1beta1.TablesModelMetadata.target_column_spec]
   457  // [display_name][google.cloud.automl.v1beta1.ColumnSpec.display_name]>", and
   458  // as a value has [`google.rpc.Status`](https:
   459  // //github.com/googleapis/googleapis/blob/master/google/rpc/status.proto)
   460  // represented as a STRUCT, and containing only `code` and `message`.
   461  //
   462  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   463  type BatchPredictOutputConfig = src.BatchPredictOutputConfig
   464  type BatchPredictOutputConfig_BigqueryDestination = src.BatchPredictOutputConfig_BigqueryDestination
   465  type BatchPredictOutputConfig_GcsDestination = src.BatchPredictOutputConfig_GcsDestination
   466  
   467  // Request message for
   468  // [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
   469  //
   470  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   471  type BatchPredictRequest = src.BatchPredictRequest
   472  
   473  // Result of the Batch Predict. This message is returned in
   474  // [response][google.longrunning.Operation.response] of the operation returned
   475  // by the
   476  // [PredictionService.BatchPredict][google.cloud.automl.v1beta1.PredictionService.BatchPredict].
   477  //
   478  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   479  type BatchPredictResult = src.BatchPredictResult
   480  
   481  // The BigQuery location for the output content.
   482  //
   483  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   484  type BigQueryDestination = src.BigQueryDestination
   485  
   486  // The BigQuery location for the input content.
   487  //
   488  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   489  type BigQuerySource = src.BigQuerySource
   490  
   491  // Bounding box matching model metrics for a single intersection-over-union
   492  // threshold and multiple label match confidence thresholds.
   493  //
   494  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   495  type BoundingBoxMetricsEntry = src.BoundingBoxMetricsEntry
   496  
   497  // Metrics for a single confidence threshold.
   498  //
   499  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   500  type BoundingBoxMetricsEntry_ConfidenceMetricsEntry = src.BoundingBoxMetricsEntry_ConfidenceMetricsEntry
   501  
   502  // A bounding polygon of a detected object on a plane. On output both vertices
   503  // and normalized_vertices are provided. The polygon is formed by connecting
   504  // vertices in the order they are listed.
   505  //
   506  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   507  type BoundingPoly = src.BoundingPoly
   508  
   509  // The data statistics of a series of CATEGORY values.
   510  //
   511  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   512  type CategoryStats = src.CategoryStats
   513  
   514  // The statistics of a single CATEGORY value.
   515  //
   516  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   517  type CategoryStats_SingleCategoryStats = src.CategoryStats_SingleCategoryStats
   518  
   519  // Contains annotation details specific to classification.
   520  //
   521  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   522  type ClassificationAnnotation = src.ClassificationAnnotation
   523  
   524  // Model evaluation metrics for classification problems. Note: For Video
   525  // Classification this metrics only describe quality of the Video
   526  // Classification predictions of "segment_classification" type.
   527  //
   528  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   529  type ClassificationEvaluationMetrics = src.ClassificationEvaluationMetrics
   530  
   531  // Metrics for a single confidence threshold.
   532  //
   533  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   534  type ClassificationEvaluationMetrics_ConfidenceMetricsEntry = src.ClassificationEvaluationMetrics_ConfidenceMetricsEntry
   535  
   536  // Confusion matrix of the model running the classification.
   537  //
   538  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   539  type ClassificationEvaluationMetrics_ConfusionMatrix = src.ClassificationEvaluationMetrics_ConfusionMatrix
   540  
   541  // Output only. A row in the confusion matrix.
   542  //
   543  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   544  type ClassificationEvaluationMetrics_ConfusionMatrix_Row = src.ClassificationEvaluationMetrics_ConfusionMatrix_Row
   545  
   546  // Type of the classification problem.
   547  //
   548  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   549  type ClassificationType = src.ClassificationType
   550  
   551  // A representation of a column in a relational table. When listing them,
   552  // column specs are returned in the same order in which they were given on
   553  // import . Used by: - Tables
   554  //
   555  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   556  type ColumnSpec = src.ColumnSpec
   557  
   558  // Identifies the table's column, and its correlation with the column this
   559  // ColumnSpec describes.
   560  //
   561  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   562  type ColumnSpec_CorrelatedColumn = src.ColumnSpec_CorrelatedColumn
   563  
   564  // A correlation statistics between two series of DataType values. The series
   565  // may have differing DataType-s, but within a single series the DataType must
   566  // be the same.
   567  //
   568  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   569  type CorrelationStats = src.CorrelationStats
   570  
   571  // Request message for
   572  // [AutoMl.CreateDataset][google.cloud.automl.v1beta1.AutoMl.CreateDataset].
   573  //
   574  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   575  type CreateDatasetRequest = src.CreateDatasetRequest
   576  
   577  // Details of CreateModel operation.
   578  //
   579  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   580  type CreateModelOperationMetadata = src.CreateModelOperationMetadata
   581  
   582  // Request message for
   583  // [AutoMl.CreateModel][google.cloud.automl.v1beta1.AutoMl.CreateModel].
   584  //
   585  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   586  type CreateModelRequest = src.CreateModelRequest
   587  
   588  // The data statistics of a series of values that share the same DataType.
   589  //
   590  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   591  type DataStats = src.DataStats
   592  type DataStats_ArrayStats = src.DataStats_ArrayStats
   593  type DataStats_CategoryStats = src.DataStats_CategoryStats
   594  type DataStats_Float64Stats = src.DataStats_Float64Stats
   595  type DataStats_StringStats = src.DataStats_StringStats
   596  type DataStats_StructStats = src.DataStats_StructStats
   597  type DataStats_TimestampStats = src.DataStats_TimestampStats
   598  
   599  // Indicated the type of data that can be stored in a structured data entity
   600  // (e.g. a table).
   601  //
   602  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   603  type DataType = src.DataType
   604  type DataType_ListElementType = src.DataType_ListElementType
   605  type DataType_StructType = src.DataType_StructType
   606  type DataType_TimeFormat = src.DataType_TimeFormat
   607  
   608  // A workspace for solving a single, particular machine learning (ML) problem.
   609  // A workspace contains examples that may be annotated.
   610  //
   611  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   612  type Dataset = src.Dataset
   613  type Dataset_ImageClassificationDatasetMetadata = src.Dataset_ImageClassificationDatasetMetadata
   614  type Dataset_ImageObjectDetectionDatasetMetadata = src.Dataset_ImageObjectDetectionDatasetMetadata
   615  type Dataset_TablesDatasetMetadata = src.Dataset_TablesDatasetMetadata
   616  type Dataset_TextClassificationDatasetMetadata = src.Dataset_TextClassificationDatasetMetadata
   617  type Dataset_TextExtractionDatasetMetadata = src.Dataset_TextExtractionDatasetMetadata
   618  type Dataset_TextSentimentDatasetMetadata = src.Dataset_TextSentimentDatasetMetadata
   619  type Dataset_TranslationDatasetMetadata = src.Dataset_TranslationDatasetMetadata
   620  type Dataset_VideoClassificationDatasetMetadata = src.Dataset_VideoClassificationDatasetMetadata
   621  type Dataset_VideoObjectTrackingDatasetMetadata = src.Dataset_VideoObjectTrackingDatasetMetadata
   622  
   623  // Request message for
   624  // [AutoMl.DeleteDataset][google.cloud.automl.v1beta1.AutoMl.DeleteDataset].
   625  //
   626  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   627  type DeleteDatasetRequest = src.DeleteDatasetRequest
   628  
   629  // Request message for
   630  // [AutoMl.DeleteModel][google.cloud.automl.v1beta1.AutoMl.DeleteModel].
   631  //
   632  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   633  type DeleteModelRequest = src.DeleteModelRequest
   634  
   635  // Details of operations that perform deletes of any entities.
   636  //
   637  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   638  type DeleteOperationMetadata = src.DeleteOperationMetadata
   639  
   640  // Details of DeployModel operation.
   641  //
   642  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   643  type DeployModelOperationMetadata = src.DeployModelOperationMetadata
   644  
   645  // Request message for
   646  // [AutoMl.DeployModel][google.cloud.automl.v1beta1.AutoMl.DeployModel].
   647  //
   648  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   649  type DeployModelRequest = src.DeployModelRequest
   650  type DeployModelRequest_ImageClassificationModelDeploymentMetadata = src.DeployModelRequest_ImageClassificationModelDeploymentMetadata
   651  type DeployModelRequest_ImageObjectDetectionModelDeploymentMetadata = src.DeployModelRequest_ImageObjectDetectionModelDeploymentMetadata
   652  
   653  // A structured text document e.g. a PDF.
   654  //
   655  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   656  type Document = src.Document
   657  
   658  // Message that describes dimension of a document.
   659  //
   660  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   661  type DocumentDimensions = src.DocumentDimensions
   662  
   663  // Unit of the document dimension.
   664  //
   665  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   666  type DocumentDimensions_DocumentDimensionUnit = src.DocumentDimensions_DocumentDimensionUnit
   667  
   668  // Input configuration of a [Document][google.cloud.automl.v1beta1.Document].
   669  //
   670  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   671  type DocumentInputConfig = src.DocumentInputConfig
   672  
   673  // Describes the layout information of a
   674  // [text_segment][google.cloud.automl.v1beta1.Document.Layout.text_segment] in
   675  // the document.
   676  //
   677  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   678  type Document_Layout = src.Document_Layout
   679  
   680  // The type of TextSegment in the context of the original document.
   681  //
   682  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   683  type Document_Layout_TextSegmentType = src.Document_Layout_TextSegmentType
   684  
   685  // A range between two double numbers.
   686  //
   687  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   688  type DoubleRange = src.DoubleRange
   689  
   690  // Example data used for training or prediction.
   691  //
   692  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   693  type ExamplePayload = src.ExamplePayload
   694  type ExamplePayload_Document = src.ExamplePayload_Document
   695  type ExamplePayload_Image = src.ExamplePayload_Image
   696  type ExamplePayload_Row = src.ExamplePayload_Row
   697  type ExamplePayload_TextSnippet = src.ExamplePayload_TextSnippet
   698  
   699  // Details of ExportData operation.
   700  //
   701  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   702  type ExportDataOperationMetadata = src.ExportDataOperationMetadata
   703  
   704  // Further describes this export data's output. Supplements
   705  // [OutputConfig][google.cloud.automl.v1beta1.OutputConfig].
   706  //
   707  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   708  type ExportDataOperationMetadata_ExportDataOutputInfo = src.ExportDataOperationMetadata_ExportDataOutputInfo
   709  type ExportDataOperationMetadata_ExportDataOutputInfo_BigqueryOutputDataset = src.ExportDataOperationMetadata_ExportDataOutputInfo_BigqueryOutputDataset
   710  type ExportDataOperationMetadata_ExportDataOutputInfo_GcsOutputDirectory = src.ExportDataOperationMetadata_ExportDataOutputInfo_GcsOutputDirectory
   711  
   712  // Request message for
   713  // [AutoMl.ExportData][google.cloud.automl.v1beta1.AutoMl.ExportData].
   714  //
   715  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   716  type ExportDataRequest = src.ExportDataRequest
   717  
   718  // Details of EvaluatedExamples operation.
   719  //
   720  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   721  type ExportEvaluatedExamplesOperationMetadata = src.ExportEvaluatedExamplesOperationMetadata
   722  
   723  // Further describes the output of the evaluated examples export. Supplements
   724  // [ExportEvaluatedExamplesOutputConfig][google.cloud.automl.v1beta1.ExportEvaluatedExamplesOutputConfig].
   725  //
   726  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   727  type ExportEvaluatedExamplesOperationMetadata_ExportEvaluatedExamplesOutputInfo = src.ExportEvaluatedExamplesOperationMetadata_ExportEvaluatedExamplesOutputInfo
   728  
   729  // Output configuration for ExportEvaluatedExamples Action. Note that this
   730  // call is available only for 30 days since the moment the model was evaluated.
   731  // The output depends on the domain, as follows (note that only examples from
   732  // the TEST set are exported): - For Tables:
   733  // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
   734  // pointing to a BigQuery project must be set. In the given project a new
   735  // dataset will be created with name
   736  // `export_evaluated_examples_<model-display-name>_<timestamp-of-export-call>`
   737  // where <model-display-name> will be made BigQuery-dataset-name compatible
   738  // (e.g. most special characters will become underscores), and timestamp will
   739  // be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset an
   740  // `evaluated_examples` table will be created. It will have all the same
   741  // columns as the
   742  // [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_spec_id]
   743  // of the [dataset][google.cloud.automl.v1beta1.Model.dataset_id] from which
   744  // the model was created, as they were at the moment of model's evaluation
   745  // (this includes the target column with its ground truth), followed by a
   746  // column called "predicted_<target_column>". That last column will contain the
   747  // model's prediction result for each respective row, given as ARRAY of
   748  // [AnnotationPayloads][google.cloud.automl.v1beta1.AnnotationPayload],
   749  // represented as STRUCT-s, containing
   750  // [TablesAnnotation][google.cloud.automl.v1beta1.TablesAnnotation].
   751  //
   752  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   753  type ExportEvaluatedExamplesOutputConfig = src.ExportEvaluatedExamplesOutputConfig
   754  type ExportEvaluatedExamplesOutputConfig_BigqueryDestination = src.ExportEvaluatedExamplesOutputConfig_BigqueryDestination
   755  
   756  // Request message for
   757  // [AutoMl.ExportEvaluatedExamples][google.cloud.automl.v1beta1.AutoMl.ExportEvaluatedExamples].
   758  //
   759  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   760  type ExportEvaluatedExamplesRequest = src.ExportEvaluatedExamplesRequest
   761  
   762  // Details of ExportModel operation.
   763  //
   764  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   765  type ExportModelOperationMetadata = src.ExportModelOperationMetadata
   766  
   767  // Further describes the output of model export. Supplements
   768  // [ModelExportOutputConfig][google.cloud.automl.v1beta1.ModelExportOutputConfig].
   769  //
   770  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   771  type ExportModelOperationMetadata_ExportModelOutputInfo = src.ExportModelOperationMetadata_ExportModelOutputInfo
   772  
   773  // Request message for
   774  // [AutoMl.ExportModel][google.cloud.automl.v1beta1.AutoMl.ExportModel]. Models
   775  // need to be enabled for exporting, otherwise an error code will be returned.
   776  //
   777  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   778  type ExportModelRequest = src.ExportModelRequest
   779  
   780  // The data statistics of a series of FLOAT64 values.
   781  //
   782  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   783  type Float64Stats = src.Float64Stats
   784  
   785  // A bucket of a histogram.
   786  //
   787  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   788  type Float64Stats_HistogramBucket = src.Float64Stats_HistogramBucket
   789  
   790  // The GCR location where the image must be pushed to.
   791  //
   792  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   793  type GcrDestination = src.GcrDestination
   794  
   795  // The Google Cloud Storage location where the output is to be written to.
   796  //
   797  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   798  type GcsDestination = src.GcsDestination
   799  
   800  // The Google Cloud Storage location for the input content.
   801  //
   802  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   803  type GcsSource = src.GcsSource
   804  
   805  // Request message for
   806  // [AutoMl.GetAnnotationSpec][google.cloud.automl.v1beta1.AutoMl.GetAnnotationSpec].
   807  //
   808  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   809  type GetAnnotationSpecRequest = src.GetAnnotationSpecRequest
   810  
   811  // Request message for
   812  // [AutoMl.GetColumnSpec][google.cloud.automl.v1beta1.AutoMl.GetColumnSpec].
   813  //
   814  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   815  type GetColumnSpecRequest = src.GetColumnSpecRequest
   816  
   817  // Request message for
   818  // [AutoMl.GetDataset][google.cloud.automl.v1beta1.AutoMl.GetDataset].
   819  //
   820  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   821  type GetDatasetRequest = src.GetDatasetRequest
   822  
   823  // Request message for
   824  // [AutoMl.GetModelEvaluation][google.cloud.automl.v1beta1.AutoMl.GetModelEvaluation].
   825  //
   826  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   827  type GetModelEvaluationRequest = src.GetModelEvaluationRequest
   828  
   829  // Request message for
   830  // [AutoMl.GetModel][google.cloud.automl.v1beta1.AutoMl.GetModel].
   831  //
   832  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   833  type GetModelRequest = src.GetModelRequest
   834  
   835  // Request message for
   836  // [AutoMl.GetTableSpec][google.cloud.automl.v1beta1.AutoMl.GetTableSpec].
   837  //
   838  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   839  type GetTableSpecRequest = src.GetTableSpecRequest
   840  
   841  // A representation of an image. Only images up to 30MB in size are supported.
   842  //
   843  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   844  type Image = src.Image
   845  
   846  // Dataset metadata that is specific to image classification.
   847  //
   848  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   849  type ImageClassificationDatasetMetadata = src.ImageClassificationDatasetMetadata
   850  
   851  // Model deployment metadata specific to Image Classification.
   852  //
   853  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   854  type ImageClassificationModelDeploymentMetadata = src.ImageClassificationModelDeploymentMetadata
   855  
   856  // Model metadata for image classification.
   857  //
   858  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   859  type ImageClassificationModelMetadata = src.ImageClassificationModelMetadata
   860  
   861  // Annotation details for image object detection.
   862  //
   863  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   864  type ImageObjectDetectionAnnotation = src.ImageObjectDetectionAnnotation
   865  
   866  // Dataset metadata specific to image object detection.
   867  //
   868  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   869  type ImageObjectDetectionDatasetMetadata = src.ImageObjectDetectionDatasetMetadata
   870  
   871  // Model evaluation metrics for image object detection problems. Evaluates
   872  // prediction quality of labeled bounding boxes.
   873  //
   874  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   875  type ImageObjectDetectionEvaluationMetrics = src.ImageObjectDetectionEvaluationMetrics
   876  
   877  // Model deployment metadata specific to Image Object Detection.
   878  //
   879  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   880  type ImageObjectDetectionModelDeploymentMetadata = src.ImageObjectDetectionModelDeploymentMetadata
   881  
   882  // Model metadata specific to image object detection.
   883  //
   884  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   885  type ImageObjectDetectionModelMetadata = src.ImageObjectDetectionModelMetadata
   886  type Image_ImageBytes = src.Image_ImageBytes
   887  type Image_InputConfig = src.Image_InputConfig
   888  
   889  // Details of ImportData operation.
   890  //
   891  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   892  type ImportDataOperationMetadata = src.ImportDataOperationMetadata
   893  
   894  // Request message for
   895  // [AutoMl.ImportData][google.cloud.automl.v1beta1.AutoMl.ImportData].
   896  //
   897  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
   898  type ImportDataRequest = src.ImportDataRequest
   899  
   900  // Input configuration for ImportData Action. The format of input depends on
   901  // dataset_metadata the Dataset into which the import is happening has. As
   902  // input source the
   903  // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] is
   904  // expected, unless specified otherwise. Additionally any input .CSV file by
   905  // itself must be 100MB or smaller, unless specified otherwise. If an "example"
   906  // file (that is, image, video etc.) with identical content (even if it had
   907  // different GCS_FILE_PATH) is mentioned multiple times, then its label,
   908  // bounding boxes etc. are appended. The same file should be always provided
   909  // with the same ML_USE and GCS_FILE_PATH, if it is not, then these values are
   910  // nondeterministically selected from the given ones. The formats are
   911  // represented in EBNF with commas being literal and with non-terminal symbols
   912  // defined near the end of this comment. The formats are: - For Image
   913  // Classification: CSV file(s) with each line in format:
   914  // ML_USE,GCS_FILE_PATH,LABEL,LABEL,... GCS_FILE_PATH leads to image of up to
   915  // 30MB in size. Supported extensions: .JPEG, .GIF, .PNG, .WEBP, .BMP, .TIFF,
   916  // .ICO For MULTICLASS classification type, at most one LABEL is allowed per
   917  // image. If an image has not yet been labeled, then it should be mentioned
   918  // just once with no LABEL. Some sample rows:
   919  // TRAIN,gs://folder/image1.jpg,daisy
   920  // TEST,gs://folder/image2.jpg,dandelion,tulip,rose
   921  // UNASSIGNED,gs://folder/image3.jpg,daisy UNASSIGNED,gs://folder/image4.jpg -
   922  // For Image Object Detection: CSV file(s) with each line in format:
   923  // ML_USE,GCS_FILE_PATH,(LABEL,BOUNDING_BOX | ,,,,,,,) GCS_FILE_PATH leads to
   924  // image of up to 30MB in size. Supported extensions: .JPEG, .GIF, .PNG. Each
   925  // image is assumed to be exhaustively labeled. The minimum allowed
   926  // BOUNDING_BOX edge length is 0.01, and no more than 500 BOUNDING_BOX-es per
   927  // image are allowed (one BOUNDING_BOX is defined per line). If an image has
   928  // not yet been labeled, then it should be mentioned just once with no LABEL
   929  // and the ",,,,,,," in place of the BOUNDING_BOX. For images which are known
   930  // to not contain any bounding boxes, they should be labelled explictly as
   931  // "NEGATIVE_IMAGE", followed by ",,,,,,," in place of the BOUNDING_BOX. Sample
   932  // rows: TRAIN,gs://folder/image1.png,car,0.1,0.1,,,0.3,0.3,,
   933  // TRAIN,gs://folder/image1.png,bike,.7,.6,,,.8,.9,,
   934  // UNASSIGNED,gs://folder/im2.png,car,0.1,0.1,0.2,0.1,0.2,0.3,0.1,0.3
   935  // TEST,gs://folder/im3.png,,,,,,,,,
   936  // TRAIN,gs://folder/im4.png,NEGATIVE_IMAGE,,,,,,,,, - For Video
   937  // Classification: CSV file(s) with each line in format: ML_USE,GCS_FILE_PATH
   938  // where ML_USE VALIDATE value should not be used. The GCS_FILE_PATH should
   939  // lead to another .csv file which describes examples that have given ML_USE,
   940  // using the following row format:
   941  // GCS_FILE_PATH,(LABEL,TIME_SEGMENT_START,TIME_SEGMENT_END | ,,) Here
   942  // GCS_FILE_PATH leads to a video of up to 50GB in size and up to 3h duration.
   943  // Supported extensions: .MOV, .MPEG4, .MP4, .AVI. TIME_SEGMENT_START and
   944  // TIME_SEGMENT_END must be within the length of the video, and end has to be
   945  // after the start. Any segment of a video which has one or more labels on it,
   946  // is considered a hard negative for all other labels. Any segment with no
   947  // labels on it is considered to be unknown. If a whole video is unknown, then
   948  // it shuold be mentioned just once with ",," in place of LABEL,
   949  // TIME_SEGMENT_START,TIME_SEGMENT_END. Sample top level CSV file:
   950  // TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv
   951  // UNASSIGNED,gs://folder/other_videos.csv Sample rows of a CSV file for a
   952  // particular ML_USE: gs://folder/video1.avi,car,120,180.000021
   953  // gs://folder/video1.avi,bike,150,180.000021 gs://folder/vid2.avi,car,0,60.5
   954  // gs://folder/vid3.avi,,, - For Video Object Tracking: CSV file(s) with each
   955  // line in format: ML_USE,GCS_FILE_PATH where ML_USE VALIDATE value should not
   956  // be used. The GCS_FILE_PATH should lead to another .csv file which describes
   957  // examples that have given ML_USE, using one of the following row format:
   958  // GCS_FILE_PATH,LABEL,[INSTANCE_ID],TIMESTAMP,BOUNDING_BOX or
   959  // GCS_FILE_PATH,,,,,,,,,, Here GCS_FILE_PATH leads to a video of up to 50GB in
   960  // size and up to 3h duration. Supported extensions: .MOV, .MPEG4, .MP4, .AVI.
   961  // Providing INSTANCE_IDs can help to obtain a better model. When a specific
   962  // labeled entity leaves the video frame, and shows up afterwards it is not
   963  // required, albeit preferable, that the same INSTANCE_ID is given to it.
   964  // TIMESTAMP must be within the length of the video, the BOUNDING_BOX is
   965  // assumed to be drawn on the closest video's frame to the TIMESTAMP. Any
   966  // mentioned by the TIMESTAMP frame is expected to be exhaustively labeled and
   967  // no more than 500 BOUNDING_BOX-es per frame are allowed. If a whole video is
   968  // unknown, then it should be mentioned just once with ",,,,,,,,,," in place of
   969  // LABEL, [INSTANCE_ID],TIMESTAMP,BOUNDING_BOX. Sample top level CSV file:
   970  // TRAIN,gs://folder/train_videos.csv TEST,gs://folder/test_videos.csv
   971  // UNASSIGNED,gs://folder/other_videos.csv Seven sample rows of a CSV file for
   972  // a particular ML_USE:
   973  // gs://folder/video1.avi,car,1,12.10,0.8,0.8,0.9,0.8,0.9,0.9,0.8,0.9
   974  // gs://folder/video1.avi,car,1,12.90,0.4,0.8,0.5,0.8,0.5,0.9,0.4,0.9
   975  // gs://folder/video1.avi,car,2,12.10,.4,.2,.5,.2,.5,.3,.4,.3
   976  // gs://folder/video1.avi,car,2,12.90,.8,.2,,,.9,.3,,
   977  // gs://folder/video1.avi,bike,,12.50,.45,.45,,,.55,.55,,
   978  // gs://folder/video2.avi,car,1,0,.1,.9,,,.9,.1,,
   979  // gs://folder/video2.avi,,,,,,,,,,, - For Text Extraction: CSV file(s) with
   980  // each line in format: ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .JSONL
   981  // (that is, JSON Lines) file which either imports text in-line or as
   982  // documents. Any given .JSONL file must be 100MB or smaller. The in-line
   983  // .JSONL file contains, per line, a proto that wraps a TextSnippet proto (in
   984  // json representation) followed by one or more AnnotationPayload protos
   985  // (called annotations), which have display_name and text_extraction detail
   986  // populated. The given text is expected to be annotated exhaustively, for
   987  // example, if you look for animals and text contains "dolphin" that is not
   988  // labeled, then "dolphin" is assumed to not be an animal. Any given text
   989  // snippet content must be 10KB or smaller, and also be UTF-8 NFC encoded
   990  // (ASCII already is). The document .JSONL file contains, per line, a proto
   991  // that wraps a Document proto. The Document proto must have either
   992  // document_text or input_config set. In document_text case, the Document proto
   993  // may also contain the spatial information of the document, including layout,
   994  // document dimension and page number. In input_config case, only PDF documents
   995  // are supported now, and each document may be up to 2MB large. Currently,
   996  // annotations on documents cannot be specified at import. Three sample CSV
   997  // rows: TRAIN,gs://folder/file1.jsonl VALIDATE,gs://folder/file2.jsonl
   998  // TEST,gs://folder/file3.jsonl Sample in-line JSON Lines file for entity
   999  // extraction (presented here with artificial line breaks, but the only actual
  1000  // line break is denoted by \n).: { "document": { "document_text": {"content":
  1001  // "dog cat"} "layout": [ { "text_segment": { "start_offset": 0, "end_offset":
  1002  // 3, }, "page_number": 1, "bounding_poly": { "normalized_vertices": [ {"x":
  1003  // 0.1, "y": 0.1}, {"x": 0.1, "y": 0.3}, {"x": 0.3, "y": 0.3}, {"x": 0.3, "y":
  1004  // 0.1}, ], }, "text_segment_type": TOKEN, }, { "text_segment": {
  1005  // "start_offset": 4, "end_offset": 7, }, "page_number": 1, "bounding_poly": {
  1006  // "normalized_vertices": [ {"x": 0.4, "y": 0.1}, {"x": 0.4, "y": 0.3}, {"x":
  1007  // 0.8, "y": 0.3}, {"x": 0.8, "y": 0.1}, ], }, "text_segment_type": TOKEN, } ],
  1008  // "document_dimensions": { "width": 8.27, "height": 11.69, "unit": INCH, }
  1009  // "page_count": 1, }, "annotations": [ { "display_name": "animal",
  1010  // "text_extraction": {"text_segment": {"start_offset": 0, "end_offset": 3}} },
  1011  // { "display_name": "animal", "text_extraction": {"text_segment":
  1012  // {"start_offset": 4, "end_offset": 7}} } ], }\n { "text_snippet": {
  1013  // "content": "This dog is good." }, "annotations": [ { "display_name":
  1014  // "animal", "text_extraction": { "text_segment": {"start_offset": 5,
  1015  // "end_offset": 8} } } ] } Sample document JSON Lines file (presented here
  1016  // with artificial line breaks, but the only actual line break is denoted by
  1017  // \n).: { "document": { "input_config": { "gcs_source": { "input_uris": [
  1018  // "gs://folder/document1.pdf" ] } } } }\n { "document": { "input_config": {
  1019  // "gcs_source": { "input_uris": [ "gs://folder/document2.pdf" ] } } } } - For
  1020  // Text Classification: CSV file(s) with each line in format:
  1021  // ML_USE,(TEXT_SNIPPET | GCS_FILE_PATH),LABEL,LABEL,... TEXT_SNIPPET and
  1022  // GCS_FILE_PATH are distinguished by a pattern. If the column content is a
  1023  // valid gcs file path, i.e. prefixed by "gs://", it will be treated as a
  1024  // GCS_FILE_PATH, else if the content is enclosed within double quotes (""), it
  1025  // is treated as a TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead
  1026  // to a .txt file with UTF-8 encoding, for example, "gs://folder/content.txt",
  1027  // and the content in it is extracted as a text snippet. In TEXT_SNIPPET case,
  1028  // the column content excluding quotes is treated as to be imported text
  1029  // snippet. In both cases, the text snippet/file size must be within 128kB.
  1030  // Maximum 100 unique labels are allowed per CSV row. Sample rows: TRAIN,"They
  1031  // have bad food and very rude",RudeService,BadFood
  1032  // TRAIN,gs://folder/content.txt,SlowService TEST,"Typically always bad service
  1033  // there.",RudeService VALIDATE,"Stomach ache to go.",BadFood - For Text
  1034  // Sentiment: CSV file(s) with each line in format: ML_USE,(TEXT_SNIPPET |
  1035  // GCS_FILE_PATH),SENTIMENT TEXT_SNIPPET and GCS_FILE_PATH are distinguished by
  1036  // a pattern. If the column content is a valid gcs file path, that is, prefixed
  1037  // by "gs://", it is treated as a GCS_FILE_PATH, otherwise it is treated as a
  1038  // TEXT_SNIPPET. In the GCS_FILE_PATH case, the path must lead to a .txt file
  1039  // with UTF-8 encoding, for example, "gs://folder/content.txt", and the content
  1040  // in it is extracted as a text snippet. In TEXT_SNIPPET case, the column
  1041  // content itself is treated as to be imported text snippet. In both cases, the
  1042  // text snippet must be up to 500 characters long. Sample rows:
  1043  // TRAIN,"@freewrytin this is way too good for your product",2 TRAIN,"I need
  1044  // this product so bad",3 TEST,"Thank you for this product.",4
  1045  // VALIDATE,gs://folder/content.txt,2 - For Tables: Either
  1046  // [gcs_source][google.cloud.automl.v1beta1.InputConfig.gcs_source] or
  1047  // [bigquery_source][google.cloud.automl.v1beta1.InputConfig.bigquery_source]
  1048  // can be used. All inputs is concatenated into a single
  1049  // [primary_table][google.cloud.automl.v1beta1.TablesDatasetMetadata.primary_table_name]
  1050  // For gcs_source: CSV file(s), where the first row of the first file is the
  1051  // header, containing unique column names. If the first row of a subsequent
  1052  // file is the same as the header, then it is also treated as a header. All
  1053  // other rows contain values for the corresponding columns. Each .CSV file by
  1054  // itself must be 10GB or smaller, and their total size must be 100GB or
  1055  // smaller. First three sample rows of a CSV file: "Id","First Name","Last
  1056  // Name","Dob","Addresses"
  1057  // "1","John","Doe","1968-01-22","[{"status":"current","address":"123_First_Avenue","city":"Seattle","state":"WA","zip":"11111","numberOfYears":"1"},{"status":"previous","address":"456_Main_Street","city":"Portland","state":"OR","zip":"22222","numberOfYears":"5"}]"
  1058  // "2","Jane","Doe","1980-10-16","[{"status":"current","address":"789_Any_Avenue","city":"Albany","state":"NY","zip":"33333","numberOfYears":"2"},{"status":"previous","address":"321_Main_Street","city":"Hoboken","state":"NJ","zip":"44444","numberOfYears":"3"}]}
  1059  // For bigquery_source: An URI of a BigQuery table. The user data size of the
  1060  // BigQuery table must be 100GB or smaller. An imported table must have between
  1061  // 2 and 1,000 columns, inclusive, and between 1000 and 100,000,000 rows,
  1062  // inclusive. There are at most 5 import data running in parallel. Definitions:
  1063  // ML_USE = "TRAIN" | "VALIDATE" | "TEST" | "UNASSIGNED" Describes how the
  1064  // given example (file) should be used for model training. "UNASSIGNED" can be
  1065  // used when user has no preference. GCS_FILE_PATH = A path to file on GCS,
  1066  // e.g. "gs://folder/image1.png". LABEL = A display name of an object on an
  1067  // image, video etc., e.g. "dog". Must be up to 32 characters long and can
  1068  // consist only of ASCII Latin letters A-Z and a-z, underscores(_), and ASCII
  1069  // digits 0-9. For each label an AnnotationSpec is created which display_name
  1070  // becomes the label; AnnotationSpecs are given back in predictions.
  1071  // INSTANCE_ID = A positive integer that identifies a specific instance of a
  1072  // labeled entity on an example. Used e.g. to track two cars on a video while
  1073  // being able to tell apart which one is which. BOUNDING_BOX =
  1074  // VERTEX,VERTEX,VERTEX,VERTEX | VERTEX,,,VERTEX,, A rectangle parallel to the
  1075  // frame of the example (image, video). If 4 vertices are given they are
  1076  // connected by edges in the order provided, if 2 are given they are recognized
  1077  // as diagonally opposite vertices of the rectangle. VERTEX =
  1078  // COORDINATE,COORDINATE First coordinate is horizontal (x), the second is
  1079  // vertical (y). COORDINATE = A float in 0 to 1 range, relative to total length
  1080  // of image or video in given dimension. For fractions the leading non-decimal
  1081  // 0 can be omitted (i.e. 0.3 = .3). Point 0,0 is in top left.
  1082  // TIME_SEGMENT_START = TIME_OFFSET Expresses a beginning, inclusive, of a time
  1083  // segment within an example that has a time dimension (e.g. video).
  1084  // TIME_SEGMENT_END = TIME_OFFSET Expresses an end, exclusive, of a time
  1085  // segment within an example that has a time dimension (e.g. video).
  1086  // TIME_OFFSET = A number of seconds as measured from the start of an example
  1087  // (e.g. video). Fractions are allowed, up to a microsecond precision. "inf" is
  1088  // allowed, and it means the end of the example. TEXT_SNIPPET = A content of a
  1089  // text snippet, UTF-8 encoded, enclosed within double quotes (""). SENTIMENT =
  1090  // An integer between 0 and
  1091  // Dataset.text_sentiment_dataset_metadata.sentiment_max (inclusive). Describes
  1092  // the ordinal of the sentiment - higher value means a more positive sentiment.
  1093  // All the values are completely relative, i.e. neither 0 needs to mean a
  1094  // negative or neutral sentiment nor sentiment_max needs to mean a positive one
  1095  // - it is just required that 0 is the least positive sentiment in the data,
  1096  // and sentiment_max is the most positive one. The SENTIMENT shouldn't be
  1097  // confused with "score" or "magnitude" from the previous Natural Language
  1098  // Sentiment Analysis API. All SENTIMENT values between 0 and sentiment_max
  1099  // must be represented in the imported data. On prediction the same 0 to
  1100  // sentiment_max range will be used. The difference between neighboring
  1101  // sentiment values needs not to be uniform, e.g. 1 and 2 may be similar
  1102  // whereas the difference between 2 and 3 may be huge. Errors: If any of the
  1103  // provided CSV files can't be parsed or if more than certain percent of CSV
  1104  // rows cannot be processed then the operation fails and nothing is imported.
  1105  // Regardless of overall success or failure the per-row failures, up to a
  1106  // certain count cap, is listed in Operation.metadata.partial_failures.
  1107  //
  1108  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1109  type InputConfig = src.InputConfig
  1110  type InputConfig_BigquerySource = src.InputConfig_BigquerySource
  1111  type InputConfig_GcsSource = src.InputConfig_GcsSource
  1112  
  1113  // Request message for
  1114  // [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
  1115  //
  1116  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1117  type ListColumnSpecsRequest = src.ListColumnSpecsRequest
  1118  
  1119  // Response message for
  1120  // [AutoMl.ListColumnSpecs][google.cloud.automl.v1beta1.AutoMl.ListColumnSpecs].
  1121  //
  1122  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1123  type ListColumnSpecsResponse = src.ListColumnSpecsResponse
  1124  
  1125  // Request message for
  1126  // [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
  1127  //
  1128  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1129  type ListDatasetsRequest = src.ListDatasetsRequest
  1130  
  1131  // Response message for
  1132  // [AutoMl.ListDatasets][google.cloud.automl.v1beta1.AutoMl.ListDatasets].
  1133  //
  1134  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1135  type ListDatasetsResponse = src.ListDatasetsResponse
  1136  
  1137  // Request message for
  1138  // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
  1139  //
  1140  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1141  type ListModelEvaluationsRequest = src.ListModelEvaluationsRequest
  1142  
  1143  // Response message for
  1144  // [AutoMl.ListModelEvaluations][google.cloud.automl.v1beta1.AutoMl.ListModelEvaluations].
  1145  //
  1146  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1147  type ListModelEvaluationsResponse = src.ListModelEvaluationsResponse
  1148  
  1149  // Request message for
  1150  // [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
  1151  //
  1152  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1153  type ListModelsRequest = src.ListModelsRequest
  1154  
  1155  // Response message for
  1156  // [AutoMl.ListModels][google.cloud.automl.v1beta1.AutoMl.ListModels].
  1157  //
  1158  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1159  type ListModelsResponse = src.ListModelsResponse
  1160  
  1161  // Request message for
  1162  // [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
  1163  //
  1164  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1165  type ListTableSpecsRequest = src.ListTableSpecsRequest
  1166  
  1167  // Response message for
  1168  // [AutoMl.ListTableSpecs][google.cloud.automl.v1beta1.AutoMl.ListTableSpecs].
  1169  //
  1170  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1171  type ListTableSpecsResponse = src.ListTableSpecsResponse
  1172  
  1173  // API proto representing a trained machine learning model.
  1174  //
  1175  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1176  type Model = src.Model
  1177  
  1178  // Evaluation results of a model.
  1179  //
  1180  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1181  type ModelEvaluation = src.ModelEvaluation
  1182  type ModelEvaluation_ClassificationEvaluationMetrics = src.ModelEvaluation_ClassificationEvaluationMetrics
  1183  type ModelEvaluation_ImageObjectDetectionEvaluationMetrics = src.ModelEvaluation_ImageObjectDetectionEvaluationMetrics
  1184  type ModelEvaluation_RegressionEvaluationMetrics = src.ModelEvaluation_RegressionEvaluationMetrics
  1185  type ModelEvaluation_TextExtractionEvaluationMetrics = src.ModelEvaluation_TextExtractionEvaluationMetrics
  1186  type ModelEvaluation_TextSentimentEvaluationMetrics = src.ModelEvaluation_TextSentimentEvaluationMetrics
  1187  type ModelEvaluation_TranslationEvaluationMetrics = src.ModelEvaluation_TranslationEvaluationMetrics
  1188  type ModelEvaluation_VideoObjectTrackingEvaluationMetrics = src.ModelEvaluation_VideoObjectTrackingEvaluationMetrics
  1189  
  1190  // Output configuration for ModelExport Action.
  1191  //
  1192  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1193  type ModelExportOutputConfig = src.ModelExportOutputConfig
  1194  type ModelExportOutputConfig_GcrDestination = src.ModelExportOutputConfig_GcrDestination
  1195  type ModelExportOutputConfig_GcsDestination = src.ModelExportOutputConfig_GcsDestination
  1196  
  1197  // Deployment state of the model.
  1198  //
  1199  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1200  type Model_DeploymentState = src.Model_DeploymentState
  1201  type Model_ImageClassificationModelMetadata = src.Model_ImageClassificationModelMetadata
  1202  type Model_ImageObjectDetectionModelMetadata = src.Model_ImageObjectDetectionModelMetadata
  1203  type Model_TablesModelMetadata = src.Model_TablesModelMetadata
  1204  type Model_TextClassificationModelMetadata = src.Model_TextClassificationModelMetadata
  1205  type Model_TextExtractionModelMetadata = src.Model_TextExtractionModelMetadata
  1206  type Model_TextSentimentModelMetadata = src.Model_TextSentimentModelMetadata
  1207  type Model_TranslationModelMetadata = src.Model_TranslationModelMetadata
  1208  type Model_VideoClassificationModelMetadata = src.Model_VideoClassificationModelMetadata
  1209  type Model_VideoObjectTrackingModelMetadata = src.Model_VideoObjectTrackingModelMetadata
  1210  
  1211  // A vertex represents a 2D point in the image. The normalized vertex
  1212  // coordinates are between 0 to 1 fractions relative to the original plane
  1213  // (image, video). E.g. if the plane (e.g. whole image) would have size 10 x 20
  1214  // then a point with normalized coordinates (0.1, 0.3) would be at the position
  1215  // (1, 6) on that plane.
  1216  //
  1217  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1218  type NormalizedVertex = src.NormalizedVertex
  1219  
  1220  // Metadata used across all long running operations returned by AutoML API.
  1221  //
  1222  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1223  type OperationMetadata = src.OperationMetadata
  1224  type OperationMetadata_BatchPredictDetails = src.OperationMetadata_BatchPredictDetails
  1225  type OperationMetadata_CreateModelDetails = src.OperationMetadata_CreateModelDetails
  1226  type OperationMetadata_DeleteDetails = src.OperationMetadata_DeleteDetails
  1227  type OperationMetadata_DeployModelDetails = src.OperationMetadata_DeployModelDetails
  1228  type OperationMetadata_ExportDataDetails = src.OperationMetadata_ExportDataDetails
  1229  type OperationMetadata_ExportEvaluatedExamplesDetails = src.OperationMetadata_ExportEvaluatedExamplesDetails
  1230  type OperationMetadata_ExportModelDetails = src.OperationMetadata_ExportModelDetails
  1231  type OperationMetadata_ImportDataDetails = src.OperationMetadata_ImportDataDetails
  1232  type OperationMetadata_UndeployModelDetails = src.OperationMetadata_UndeployModelDetails
  1233  
  1234  // - For Translation: CSV file `translation.csv`, with each line in format:
  1235  // ML_USE,GCS_FILE_PATH GCS_FILE_PATH leads to a .TSV file which describes
  1236  // examples that have given ML_USE, using the following row format per line:
  1237  // TEXT_SNIPPET (in source language) \t TEXT_SNIPPET (in target language) - For
  1238  // Tables: Output depends on whether the dataset was imported from GCS or
  1239  // BigQuery. GCS case:
  1240  // [gcs_destination][google.cloud.automl.v1beta1.OutputConfig.gcs_destination]
  1241  // must be set. Exported are CSV file(s) `tables_1.csv`,
  1242  // `tables_2.csv`,...,`tables_N.csv` with each having as header line the
  1243  // table's column names, and all other lines contain values for the header
  1244  // columns. BigQuery case:
  1245  // [bigquery_destination][google.cloud.automl.v1beta1.OutputConfig.bigquery_destination]
  1246  // pointing to a BigQuery project must be set. In the given project a new
  1247  // dataset will be created with name
  1248  // `export_data_<automl-dataset-display-name>_<timestamp-of-export-call>` where
  1249  // <automl-dataset-display-name> will be made BigQuery-dataset-name compatible
  1250  // (e.g. most special characters will become underscores), and timestamp will
  1251  // be in YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In that dataset a
  1252  // new table called `primary_table` will be created, and filled with precisely
  1253  // the same data as this obtained on import.
  1254  //
  1255  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1256  type OutputConfig = src.OutputConfig
  1257  type OutputConfig_BigqueryDestination = src.OutputConfig_BigqueryDestination
  1258  type OutputConfig_GcsDestination = src.OutputConfig_GcsDestination
  1259  
  1260  // Request message for
  1261  // [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
  1262  //
  1263  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1264  type PredictRequest = src.PredictRequest
  1265  
  1266  // Response message for
  1267  // [PredictionService.Predict][google.cloud.automl.v1beta1.PredictionService.Predict].
  1268  //
  1269  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1270  type PredictResponse = src.PredictResponse
  1271  
  1272  // PredictionServiceClient is the client API for PredictionService service.
  1273  // For semantics around ctx use and closing/ending streaming RPCs, please refer
  1274  // to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
  1275  //
  1276  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1277  type PredictionServiceClient = src.PredictionServiceClient
  1278  
  1279  // PredictionServiceServer is the server API for PredictionService service.
  1280  //
  1281  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1282  type PredictionServiceServer = src.PredictionServiceServer
  1283  
  1284  // Metrics for regression problems.
  1285  //
  1286  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1287  type RegressionEvaluationMetrics = src.RegressionEvaluationMetrics
  1288  
  1289  // A representation of a row in a relational table.
  1290  //
  1291  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1292  type Row = src.Row
  1293  
  1294  // The data statistics of a series of STRING values.
  1295  //
  1296  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1297  type StringStats = src.StringStats
  1298  
  1299  // The statistics of a unigram.
  1300  //
  1301  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1302  type StringStats_UnigramStats = src.StringStats_UnigramStats
  1303  
  1304  // The data statistics of a series of STRUCT values.
  1305  //
  1306  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1307  type StructStats = src.StructStats
  1308  
  1309  // `StructType` defines the DataType-s of a
  1310  // [STRUCT][google.cloud.automl.v1beta1.TypeCode.STRUCT] type.
  1311  //
  1312  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1313  type StructType = src.StructType
  1314  
  1315  // A specification of a relational table. The table's schema is represented
  1316  // via its child column specs. It is pre-populated as part of ImportData by
  1317  // schema inference algorithm, the version of which is a required parameter of
  1318  // ImportData InputConfig. Note: While working with a table, at times the
  1319  // schema may be inconsistent with the data in the table (e.g. string in a
  1320  // FLOAT64 column). The consistency validation is done upon creation of a
  1321  // model. Used by: - Tables
  1322  //
  1323  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1324  type TableSpec = src.TableSpec
  1325  
  1326  // Contains annotation details specific to Tables.
  1327  //
  1328  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1329  type TablesAnnotation = src.TablesAnnotation
  1330  
  1331  // Metadata for a dataset used for AutoML Tables.
  1332  //
  1333  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1334  type TablesDatasetMetadata = src.TablesDatasetMetadata
  1335  
  1336  // An information specific to given column and Tables Model, in context of the
  1337  // Model and the predictions created by it.
  1338  //
  1339  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1340  type TablesModelColumnInfo = src.TablesModelColumnInfo
  1341  
  1342  // Model metadata specific to AutoML Tables.
  1343  //
  1344  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1345  type TablesModelMetadata = src.TablesModelMetadata
  1346  type TablesModelMetadata_OptimizationObjectivePrecisionValue = src.TablesModelMetadata_OptimizationObjectivePrecisionValue
  1347  type TablesModelMetadata_OptimizationObjectiveRecallValue = src.TablesModelMetadata_OptimizationObjectiveRecallValue
  1348  
  1349  // Dataset metadata for classification.
  1350  //
  1351  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1352  type TextClassificationDatasetMetadata = src.TextClassificationDatasetMetadata
  1353  
  1354  // Model metadata that is specific to text classification.
  1355  //
  1356  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1357  type TextClassificationModelMetadata = src.TextClassificationModelMetadata
  1358  
  1359  // Annotation for identifying spans of text.
  1360  //
  1361  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1362  type TextExtractionAnnotation = src.TextExtractionAnnotation
  1363  type TextExtractionAnnotation_TextSegment = src.TextExtractionAnnotation_TextSegment
  1364  
  1365  // Dataset metadata that is specific to text extraction
  1366  //
  1367  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1368  type TextExtractionDatasetMetadata = src.TextExtractionDatasetMetadata
  1369  
  1370  // Model evaluation metrics for text extraction problems.
  1371  //
  1372  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1373  type TextExtractionEvaluationMetrics = src.TextExtractionEvaluationMetrics
  1374  
  1375  // Metrics for a single confidence threshold.
  1376  //
  1377  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1378  type TextExtractionEvaluationMetrics_ConfidenceMetricsEntry = src.TextExtractionEvaluationMetrics_ConfidenceMetricsEntry
  1379  
  1380  // Model metadata that is specific to text extraction.
  1381  //
  1382  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1383  type TextExtractionModelMetadata = src.TextExtractionModelMetadata
  1384  
  1385  // A contiguous part of a text (string), assuming it has an UTF-8 NFC
  1386  // encoding.
  1387  //
  1388  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1389  type TextSegment = src.TextSegment
  1390  
  1391  // Contains annotation details specific to text sentiment.
  1392  //
  1393  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1394  type TextSentimentAnnotation = src.TextSentimentAnnotation
  1395  
  1396  // Dataset metadata for text sentiment.
  1397  //
  1398  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1399  type TextSentimentDatasetMetadata = src.TextSentimentDatasetMetadata
  1400  
  1401  // Model evaluation metrics for text sentiment problems.
  1402  //
  1403  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1404  type TextSentimentEvaluationMetrics = src.TextSentimentEvaluationMetrics
  1405  
  1406  // Model metadata that is specific to text sentiment.
  1407  //
  1408  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1409  type TextSentimentModelMetadata = src.TextSentimentModelMetadata
  1410  
  1411  // A representation of a text snippet.
  1412  //
  1413  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1414  type TextSnippet = src.TextSnippet
  1415  
  1416  // A time period inside of an example that has a time dimension (e.g. video).
  1417  //
  1418  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1419  type TimeSegment = src.TimeSegment
  1420  
  1421  // The data statistics of a series of TIMESTAMP values.
  1422  //
  1423  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1424  type TimestampStats = src.TimestampStats
  1425  
  1426  // Stats split by a defined in context granularity.
  1427  //
  1428  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1429  type TimestampStats_GranularStats = src.TimestampStats_GranularStats
  1430  
  1431  // Annotation details specific to translation.
  1432  //
  1433  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1434  type TranslationAnnotation = src.TranslationAnnotation
  1435  
  1436  // Dataset metadata that is specific to translation.
  1437  //
  1438  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1439  type TranslationDatasetMetadata = src.TranslationDatasetMetadata
  1440  
  1441  // Evaluation metrics for the dataset.
  1442  //
  1443  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1444  type TranslationEvaluationMetrics = src.TranslationEvaluationMetrics
  1445  
  1446  // Model metadata that is specific to translation.
  1447  //
  1448  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1449  type TranslationModelMetadata = src.TranslationModelMetadata
  1450  
  1451  // `TypeCode` is used as a part of
  1452  // [DataType][google.cloud.automl.v1beta1.DataType].
  1453  //
  1454  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1455  type TypeCode = src.TypeCode
  1456  
  1457  // Details of UndeployModel operation.
  1458  //
  1459  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1460  type UndeployModelOperationMetadata = src.UndeployModelOperationMetadata
  1461  
  1462  // Request message for
  1463  // [AutoMl.UndeployModel][google.cloud.automl.v1beta1.AutoMl.UndeployModel].
  1464  //
  1465  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1466  type UndeployModelRequest = src.UndeployModelRequest
  1467  
  1468  // UnimplementedAutoMlServer can be embedded to have forward compatible
  1469  // implementations.
  1470  //
  1471  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1472  type UnimplementedAutoMlServer = src.UnimplementedAutoMlServer
  1473  
  1474  // UnimplementedPredictionServiceServer can be embedded to have forward
  1475  // compatible implementations.
  1476  //
  1477  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1478  type UnimplementedPredictionServiceServer = src.UnimplementedPredictionServiceServer
  1479  
  1480  // Request message for
  1481  // [AutoMl.UpdateColumnSpec][google.cloud.automl.v1beta1.AutoMl.UpdateColumnSpec]
  1482  //
  1483  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1484  type UpdateColumnSpecRequest = src.UpdateColumnSpecRequest
  1485  
  1486  // Request message for
  1487  // [AutoMl.UpdateDataset][google.cloud.automl.v1beta1.AutoMl.UpdateDataset]
  1488  //
  1489  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1490  type UpdateDatasetRequest = src.UpdateDatasetRequest
  1491  
  1492  // Request message for
  1493  // [AutoMl.UpdateTableSpec][google.cloud.automl.v1beta1.AutoMl.UpdateTableSpec]
  1494  //
  1495  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1496  type UpdateTableSpecRequest = src.UpdateTableSpecRequest
  1497  
  1498  // Contains annotation details specific to video classification.
  1499  //
  1500  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1501  type VideoClassificationAnnotation = src.VideoClassificationAnnotation
  1502  
  1503  // Dataset metadata specific to video classification. All Video Classification
  1504  // datasets are treated as multi label.
  1505  //
  1506  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1507  type VideoClassificationDatasetMetadata = src.VideoClassificationDatasetMetadata
  1508  
  1509  // Model metadata specific to video classification.
  1510  //
  1511  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1512  type VideoClassificationModelMetadata = src.VideoClassificationModelMetadata
  1513  
  1514  // Annotation details for video object tracking.
  1515  //
  1516  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1517  type VideoObjectTrackingAnnotation = src.VideoObjectTrackingAnnotation
  1518  
  1519  // Dataset metadata specific to video object tracking.
  1520  //
  1521  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1522  type VideoObjectTrackingDatasetMetadata = src.VideoObjectTrackingDatasetMetadata
  1523  
  1524  // Model evaluation metrics for video object tracking problems. Evaluates
  1525  // prediction quality of both labeled bounding boxes and labeled tracks (i.e.
  1526  // series of bounding boxes sharing same label and instance ID).
  1527  //
  1528  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1529  type VideoObjectTrackingEvaluationMetrics = src.VideoObjectTrackingEvaluationMetrics
  1530  
  1531  // Model metadata specific to video object tracking.
  1532  //
  1533  // Deprecated: Please use types in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1534  type VideoObjectTrackingModelMetadata = src.VideoObjectTrackingModelMetadata
  1535  
  1536  // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1537  func NewAutoMlClient(cc grpc.ClientConnInterface) AutoMlClient { return src.NewAutoMlClient(cc) }
  1538  
  1539  // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1540  func NewPredictionServiceClient(cc grpc.ClientConnInterface) PredictionServiceClient {
  1541  	return src.NewPredictionServiceClient(cc)
  1542  }
  1543  
  1544  // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1545  func RegisterAutoMlServer(s *grpc.Server, srv AutoMlServer) { src.RegisterAutoMlServer(s, srv) }
  1546  
  1547  // Deprecated: Please use funcs in: cloud.google.com/go/automl/apiv1beta1/automlpb
  1548  func RegisterPredictionServiceServer(s *grpc.Server, srv PredictionServiceServer) {
  1549  	src.RegisterPredictionServiceServer(s, srv)
  1550  }
  1551  

View as plain text